diff --git a/.cargo/config.toml b/.cargo/config.toml index 28cde74ec..36a0b3d8c 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -7,20 +7,20 @@ time = "build --timings --all-targets" [build] rustflags = [ - "-D", - "warnings", - "-D", - "future-incompatible", - "-D", - "let-underscore", - "-D", - "nonstandard-style", - "-D", - "rust-2018-compatibility", - "-D", - "rust-2018-idioms", - "-D", - "rust-2021-compatibility", - "-D", - "unused", + "-D", + "warnings", + "-D", + "future-incompatible", + "-D", + "let-underscore", + "-D", + "nonstandard-style", + "-D", + "rust-2018-compatibility", + "-D", + "rust-2018-idioms", + "-D", + "rust-2021-compatibility", + "-D", + "unused", ] diff --git a/.githooks/pre-commit b/.githooks/pre-commit new file mode 100644 index 000000000..6e4065777 --- /dev/null +++ b/.githooks/pre-commit @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -euo pipefail + +repo_root="$(git rev-parse --show-toplevel)" + +"$repo_root/scripts/pre-commit.sh" \ No newline at end of file diff --git a/.github/agents/committer.agent.md b/.github/agents/committer.agent.md new file mode 100644 index 000000000..016ee2c0f --- /dev/null +++ b/.github/agents/committer.agent.md @@ -0,0 +1,53 @@ +--- +name: Committer +description: Proactive commit specialist for this repository. Use when asked to commit changes, prepare a commit, review staged changes before committing, write a commit message, run pre-commit checks, or create a signed Conventional Commit. +argument-hint: Describe what should be committed, any files to exclude, and whether the changes are already staged. +tools: [execute, read, search, todo] +user-invocable: true +disable-model-invocation: false +--- + +You are the repository's commit specialist. Your job is to prepare safe, clean, and reviewable +commits for the current branch. + +Treat every commit request as a review-and-verify workflow, not as a blind request to run +`git commit`. + +## Repository Rules + +- Follow `AGENTS.md` for repository-wide behaviour and + `.github/skills/dev/git-workflow/commit-changes/SKILL.md` for commit-specific reference details. +- The pre-commit validation command is `./scripts/pre-commit.sh`. +- Create GPG-signed Conventional Commits (`git commit -S`). + +## Required Workflow + +1. Read the current branch, `git status`, and the staged or unstaged diff relevant to the request. +2. Summarize the intended commit scope before taking action. +3. Ensure the commit scope is coherent and does not accidentally mix unrelated changes. +4. Run `./scripts/pre-commit.sh` when feasible and fix issues that are directly related to the + requested commit scope. +5. Propose a precise Conventional Commit message. +6. Create the commit with `git commit -S` only after the scope is clear and blockers are resolved. +7. After committing, run a quick verification check and report the resulting commit summary. + +## Constraints + +- Do not write code. +- Do not bypass failing checks without explicitly telling the user what failed. +- Do not rewrite or revert unrelated user changes. +- Do not create empty, vague, or non-conventional commit messages. +- Do not commit secrets, backup junk, or accidental files. +- Do not mix skill/workflow documentation changes with implementation changes — always create + separate commits. + +## Output Format + +When handling a commit task, respond in this order: + +1. Commit scope summary +2. Blockers, anomalies, or risks +3. Checks run and results +4. Proposed commit message +5. Commit status +6. Post-commit verification diff --git a/.github/agents/complexity-auditor.agent.md b/.github/agents/complexity-auditor.agent.md new file mode 100644 index 000000000..91ae2a085 --- /dev/null +++ b/.github/agents/complexity-auditor.agent.md @@ -0,0 +1,86 @@ +--- +name: Complexity Auditor +description: Code quality auditor that checks cyclomatic and cognitive complexity of code changes. Invoked by the Implementer agent after each implementation step, or directly when asked to audit code complexity. Reports PASS, WARN, or FAIL for each changed function. +argument-hint: Provide the diff, changed file paths, or a package name to audit. +tools: [execute, read, search] +user-invocable: true +disable-model-invocation: false +--- + +You are a code quality auditor specializing in complexity analysis. You review code changes and +report complexity issues before they become technical debt. + +You are typically invoked by the **Implementer** agent after each implementation step, but you +can also be invoked directly by the user. + +## Audit Scope + +Focus on the diff introduced by the current task. Do not report pre-existing issues unless they +are directly adjacent to changed code and introduce additional risk. + +## Complexity Checks + +### 1. Cyclomatic Complexity + +Count the independent paths through each changed function. Each of the following adds one branch: +`if`, `else if`, `match` arm, `while`, `for`, `loop`, `?` early return, and `&&`/`||` in a +condition. A function starts at complexity 1. + +| Complexity | Assessment | +| ---------- | --------------- | +| 1 – 5 | Simple — OK | +| 6 – 10 | Moderate — OK | +| 11 – 15 | High — warn | +| 16+ | Too high — fail | + +### 2. Cognitive Complexity (via Clippy) + +Run the following to surface Clippy cognitive complexity warnings: + +```bash +cargo clippy --package -- \ + -W clippy::cognitive_complexity \ + -D warnings +``` + +Any `cognitive_complexity` warning from Clippy is a failing issue. + +### 3. Nesting Depth + +Flag functions with more than 3 levels of nesting. Deep nesting hides intent and makes +reasoning difficult. + +### 4. Function Length + +Flag functions longer than 50 lines. Long functions are a proxy for missing decomposition. + +## Audit Workflow + +1. Identify all functions added or changed in the current diff. +2. For each function, compute cyclomatic complexity from the source. +3. Run `cargo clippy` with the cognitive complexity lint enabled. +4. Check nesting depth and function length. +5. Report findings using the output format below. + +## Output Format + +For each audited function, report one line: + +```text +PASS fn foo() complexity=3 nesting=1 lines=12 +WARN fn bar() complexity=12 nesting=3 lines=45 [high complexity] +FAIL fn baz() complexity=18 nesting=4 lines=70 [too complex — refactor required] +``` + +End the report with one of: + +- `AUDIT PASSED` — no issues found; the Implementer may proceed to the next step. +- `AUDIT WARNED` — non-blocking issues found; describe each concern briefly. +- `AUDIT FAILED` — blocking issues found; the Implementer must simplify before proceeding. + +## Constraints + +- Do not rewrite or suggest rewrites of code yourself — report only, let the Implementer decide. +- Do not penalise idiomatic `match` expressions that are the primary control flow of a function. +- Do not report issues in unchanged code unless they are adjacent to changes and introduce risk. +- Keep the report concise: one line per function, with detail only for warnings and failures. diff --git a/.github/agents/implementer.agent.md b/.github/agents/implementer.agent.md new file mode 100644 index 000000000..a083a507c --- /dev/null +++ b/.github/agents/implementer.agent.md @@ -0,0 +1,86 @@ +--- +name: Implementer +description: Software implementer that applies Test-Driven Development and seeks simple solutions. Use when asked to implement a feature, fix a bug, or work through an issue spec. Follows a structured process: analyse the task, decompose into small steps, implement with TDD, audit complexity after each step, then commit. +argument-hint: Describe the task or link the issue spec document. Clarify any constraints or acceptance criteria. +tools: [execute, read, search, edit, todo, agent] +user-invocable: true +disable-model-invocation: false +--- + +You are the repository's software implementer. Your job is to implement tasks correctly, simply, +and verifiably. + +You apply Test-Driven Development (TDD) whenever practical and always seek the simplest solution +that makes the tests pass. + +## Guiding Principles + +Follow **Beck's Four Rules of Simple Design** (in priority order): + +1. **Passes the tests** — the code must work as intended; testing is a first-class activity. +2. **Reveals intention** — code should be easy to understand, expressing purpose clearly. +3. **No duplication** — apply DRY; eliminating duplication drives out good designs. +4. **Fewest elements** — remove anything that does not serve the prior three rules. + +Reference: [Beck Design Rules](https://martinfowler.com/bliki/BeckDesignRules.html) + +## Repository Rules + +- Follow `AGENTS.md` for repository-wide conventions. +- The pre-commit validation command is `./scripts/pre-commit.sh`. +- Relevant skills to load when needed: + - `.github/skills/dev/testing/write-unit-test/SKILL.md` — test naming and Arrange/Act/Assert pattern. + - `.github/skills/dev/rust-code-quality/handle-errors-in-code/SKILL.md` — error handling. + - `.github/skills/dev/git-workflow/commit-changes/SKILL.md` — commit conventions. + +## Required Workflow + +### Step 1 — Analyse the Task + +Before writing any code: + +1. Read `AGENTS.md` and any relevant skill files for the area being changed. +2. Read the issue spec or task description in full. +3. Identify the scope: what must change and what must not change. +4. Ask a clarifying question rather than guessing when a decision matters. + +### Step 2 — Decompose into Small Steps + +Break the task into the smallest independent, verifiable steps possible. Use the todo list to +track progress. Each step should: + +- Have a single, clear intent. +- Be verifiable by a test or observable behaviour. +- Be committable independently when complete. + +### Step 3 — Implement Each Step (TDD Preferred) + +For each step: + +1. **Write a failing test first** (red) — express the expected behaviour in a test. +2. **Write minimal production code** to make the test pass (green). +3. **Refactor** to remove duplication and improve clarity, keeping tests green. +4. Verify with `cargo test -p ` before moving on. + +When TDD is not practical (e.g. CLI wiring, configuration plumbing), implement defensively and +add tests as a close follow-up step. + +### Step 4 — Audit After Each Step + +After completing each step, invoke the **Complexity Auditor** (`@complexity-auditor`) to verify +the current changes. Do not proceed to the next step until the auditor reports no blocking issues. + +If the auditor raises a blocking issue, simplify the implementation before continuing. + +### Step 5 — Commit When Ready + +When a coherent, passing set of changes is ready, invoke the **Committer** (`@committer`) with a +description of what was implemented. Do not commit directly — always delegate to the Committer. + +## Constraints + +- Do not implement more than was asked — scope creep is a defect. +- Do not suppress compiler warnings or clippy lints without a documented reason. +- Do not add dependencies without running `cargo machete` afterward. +- Do not commit code that fails `./scripts/pre-commit.sh`. +- Do not skip the audit step, even for small changes. diff --git a/.github/skills/add-new-skill/SKILL.md b/.github/skills/add-new-skill/SKILL.md new file mode 100644 index 000000000..d99b4e3c9 --- /dev/null +++ b/.github/skills/add-new-skill/SKILL.md @@ -0,0 +1,146 @@ +--- +name: add-new-skill +description: Guide for creating effective Agent Skills for the torrust-tracker project. Use when you need to create a new skill (or update an existing skill) that extends AI agent capabilities with specialized knowledge, workflows, or tool integrations. Triggers on "create skill", "add new skill", "how to add skill", or "skill creation". +metadata: + author: torrust + version: "1.0" +--- + +# Creating New Agent Skills + +This skill guides you through creating effective Agent Skills for the Torrust Tracker project. + +## About Skills + +**What are Agent Skills?** + +Agent Skills are specialized instruction sets that extend AI agent capabilities with domain-specific +knowledge, workflows, and tool integrations. They follow the [agentskills.io](https://agentskills.io) +open format and work with multiple AI coding agents (Claude Code, VS Code Copilot, Cursor, Windsurf). + +### Progressive Disclosure + +Skills use a three-level loading strategy to minimize context window usage: + +1. **Metadata** (~100 tokens): `name` and `description` loaded at startup for all skills +2. **SKILL.md Body** (<5000 tokens): Loaded when a task matches the skill's description +3. **Bundled Resources**: Loaded on-demand only when referenced (scripts, references, assets) + +### When to Create a Skill vs Updating AGENTS.md + +| Use AGENTS.md for... | Use Skills for... | +| ------------------------------- | ------------------------------- | +| Always-on rules and constraints | On-demand workflows | +| "Always do X, never do Y" | Multi-step repeatable processes | +| Baseline conventions | Specialist domain knowledge | +| Rarely changes | Can be added/refined frequently | + +**Example**: "Use lowercase for skill filenames" → AGENTS.md rule. +"How to run pre-commit checks" → Skill. + +## Core Principles + +### 1. Concise is Key + +**Context window is shared** between system prompt, conversation history, other skills, +and your actual request. Only add context the agent doesn't already have. + +### 2. Set Appropriate Degrees of Freedom + +Match specificity to task fragility: + +- **High freedom** (text-based instructions): multiple approaches valid, context-dependent +- **Medium freedom** (pseudocode): preferred pattern exists, some variation acceptable +- **Low freedom** (specific scripts): operations are fragile, sequence must be followed + +### 3. Anatomy of a Skill + +A skill consists of: + +- **SKILL.md**: Frontmatter (metadata) + body (instructions) +- **Optional bundled resources**: `scripts/`, `references/`, `assets/` + +Keep SKILL.md concise (<500 lines). Move detailed content to reference files. + +### 4. Progressive Disclosure + +Split detailed content into reference files loaded on-demand: + +```markdown +## Advanced Features + +See [specification.md](references/specification.md) for Agent Skills spec. +See [patterns.md](references/patterns.md) for workflow patterns. +``` + +### 5. Content Strategy + +- **Include in SKILL.md**: essential commands and step-by-step workflows +- **Put in `references/`**: detailed descriptions, config options, troubleshooting +- **Link to official docs**: architecture docs, ADRs, contributing guides + +## Skill Creation Process + +### Step 1: Plan the Skill + +Answer: + +- What specific queries should trigger this skill? +- What tasks does it help accomplish? +- Does a similar skill already exist? + +### Step 2: Choose the Location + +Follow the directory layout: + +```text +.github/skills/ + add-new-skill/ + dev/ + git-workflow/ + maintenance/ + planning/ + rust-code-quality/ + testing/ +``` + +### Step 3: Write the SKILL.md + +Frontmatter rules: + +- `name`: lowercase letters, numbers, hyphens only; max 64 chars; no consecutive hyphens +- `description`: max 1024 chars; include trigger phrases; describe WHAT and WHEN +- `metadata.author`: `torrust` +- `metadata.version`: `"1.0"` + +### Step 4: Validate and Commit + +```bash +# Check spelling and markdown +linter cspell +linter markdown + +# Run all linters +linter all + +# Commit +git add .github/skills/ +git commit -S -m "docs(skills): add {skill-name} skill" +``` + +## Directory Layout + +```text +.github/skills/ + / + SKILL.md ← Required + references/ ← Optional: detailed docs + scripts/ ← Optional: executable scripts + assets/ ← Optional: templates, data +``` + +## References + +- Agent Skills specification: [references/specification.md](references/specification.md) +- Skill patterns: [references/patterns.md](references/patterns.md) +- Real examples: [references/examples.md](references/examples.md) diff --git a/.github/skills/add-new-skill/references/specification.md b/.github/skills/add-new-skill/references/specification.md new file mode 100644 index 000000000..90e73b8a6 --- /dev/null +++ b/.github/skills/add-new-skill/references/specification.md @@ -0,0 +1,65 @@ +# Agent Skills Specification Reference + +This document provides a reference to the Agent Skills specification from [agentskills.io](https://agentskills.io). + +## What is Agent Skills? + +Agent Skills is an open format for extending AI agent capabilities with specialized knowledge and +workflows. It's vendor-neutral and works with Claude Code, VS Code Copilot, Cursor, and Windsurf. + +## Core Concepts + +### Progressive Disclosure + +```text +Level 1: Metadata (name + description) - ~100 tokens - Loaded at startup for ALL skills +Level 2: SKILL.md body - <5000 tokens - Loaded when skill matches task +Level 3: Bundled resources - On-demand - Loaded only when referenced +``` + +### Directory Structure + +```text +.github/ +└── skills/ + └── skill-name/ + ├── SKILL.md # Required: frontmatter + instructions + ├── README.md # Optional: human-readable documentation + ├── scripts/ # Optional: executable code + ├── references/ # Optional: detailed docs loaded on-demand + └── assets/ # Optional: templates, images, data +``` + +## SKILL.md Format + +### Frontmatter (YAML) + +```yaml +--- +name: skill-name +description: | + What the skill does and when to use it. Include trigger phrases. +metadata: + author: torrust + version: "1.0" +--- +``` + +### Frontmatter Validation Rules + +**name**: + +- Required; max 64 characters +- Lowercase letters, numbers, hyphens only +- Cannot contain consecutive hyphens or XML tags + +**description**: + +- Required; max 1024 characters +- Should describe WHAT the skill does AND WHEN to use it +- Include trigger phrases/keywords + +## References + +- Official spec: +- GitHub Copilot skills docs: diff --git a/.github/skills/dev/git-workflow/commit-changes/SKILL.md b/.github/skills/dev/git-workflow/commit-changes/SKILL.md new file mode 100644 index 000000000..415ee2895 --- /dev/null +++ b/.github/skills/dev/git-workflow/commit-changes/SKILL.md @@ -0,0 +1,155 @@ +--- +name: commit-changes +description: Guide for committing changes in the torrust-tracker project. Covers conventional commit format, pre-commit verification checklist, GPG signing, and commit quality guidelines. Use when committing code, running pre-commit checks, or following project commit standards. Triggers on "commit", "commit changes", "how to commit", "pre-commit", "commit message", "commit format", or "conventional commits". +metadata: + author: torrust + version: "1.0" +--- + +# Committing Changes + +This skill guides you through the complete commit process for the Torrust Tracker project. + +## Quick Reference + +```bash +# One-time setup: install the pre-commit Git hook +./scripts/install-git-hooks.sh + +# Stage changes +git add + +# Commit with conventional format and GPG signature (MANDATORY) +# The pre-commit hook runs ./scripts/pre-commit.sh automatically +git commit -S -m "[()]: " +``` + +## Conventional Commit Format + +We follow [Conventional Commits](https://www.conventionalcommits.org/) specification. + +### Commit Message Structure + +```text +[optional scope]: + +[optional body] + +[optional footer(s)] +``` + +Scope should reflect the affected package or area (e.g., `tracker-core`, `udp-protocol`, `ci`, `docs`). + +### Commit Types + +| Type | Description | Example | +| ---------- | ------------------------------------- | ------------------------------------------------------------ | +| `feat` | New feature or enhancement | `feat(tracker-core): add peer expiry grace period` | +| `fix` | Bug fix | `fix(udp-protocol): resolve endianness in announce response` | +| `docs` | Documentation changes | `docs(agents): add root AGENTS.md` | +| `style` | Code style changes (formatting, etc.) | `style: apply rustfmt to all source files` | +| `refactor` | Code refactoring | `refactor(tracker-core): extract peer list to own module` | +| `test` | Adding or updating tests | `test(http-tracker-core): add announce response tests` | +| `chore` | Maintenance tasks | `chore: update dependencies` | +| `ci` | CI/CD related changes | `ci: add workflow for container publishing` | +| `perf` | Performance improvements | `perf(torrent-repository): switch to dashmap` | + +## GPG Commit Signing (MANDATORY) + +**All commits must be GPG signed.** Use the `-S` flag: + +```bash +git commit -S -m "your commit message" +``` + +## Pre-commit Verification (MANDATORY) + +### Git Hook + +The repository ships a `pre-commit` Git hook that runs `./scripts/pre-commit.sh` +automatically on every `git commit`. Install it once after cloning: + +```bash +./scripts/install-git-hooks.sh +``` + +Once installed, the hook fires on every commit and you do not need to run the script manually. + +### Automated Checks + +If the hook is not installed, run the script explicitly before committing. +**It must exit with code `0`.** + +> **⏱️ Expected runtime: ~3 minutes** on a modern developer machine. AI agents must set a +> command timeout of **at least 5 minutes** before invoking this script. + +```bash +./scripts/pre-commit.sh +``` + +The script runs: + +1. `cargo machete` — unused dependency check +2. `linter all` — all linters (markdown, YAML, TOML, clippy, rustfmt, shellcheck, cspell) +3. `cargo test --doc --workspace` — documentation tests +4. `cargo test --tests --benches --examples --workspace --all-targets --all-features` — all tests + +### Manual Checks (Cannot Be Automated) + +Verify these by hand before committing: + +- **Self-review the diff**: read through `git diff --staged` and check for obvious mistakes, + debug artifacts, or unintended changes +- **Documentation updated**: if public API or behaviour changed, doc comments and any relevant + `docs/` pages reflect the change +- **`AGENTS.md` updated**: if architecture, package structure, or key workflows changed, the + relevant `AGENTS.md` file is updated +- **New technical terms added to `project-words.txt`**: any new jargon or identifiers that + cspell does not know about are added alphabetically + +### Debugging a Failing Run + +```bash +linter markdown # Markdown +linter yaml # YAML +linter toml # TOML +linter clippy # Rust code analysis +linter rustfmt # Rust formatting +linter shellcheck # Shell scripts +linter cspell # Spell checking +``` + +Fix Rust formatting automatically: + +```bash +cargo fmt +``` + +## Hashtag Usage Warning + +**Only use `#` when intentionally referencing a GitHub issue.** + +GitHub auto-links `#NUMBER` to issues. Avoid accidental references: + +- ✅ `feat(tracker-core): add feature (see #42)` — intentional reference +- ❌ `fix: make feature #1 priority` — accidentally links to issue #1 + +Use ordered Markdown lists or plain numbers instead of `#N` step labels. + +## Commit Quality Guidelines + +### Good Commits (✅) + +- **Atomic**: Each commit represents one logical change +- **Descriptive**: Clear, concise description of what changed +- **Tested**: All tests pass +- **Linted**: All linters pass +- **Conventional**: Follows conventional commit format +- **Signed**: GPG signature present + +### Commits to Avoid (❌) + +- Too large: multiple unrelated changes in one commit +- Vague messages like "fix stuff" or "WIP" +- Missing scope when a package is clearly affected +- Unsigned commits diff --git a/.github/skills/dev/git-workflow/create-feature-branch/SKILL.md b/.github/skills/dev/git-workflow/create-feature-branch/SKILL.md new file mode 100644 index 000000000..bb2c82a55 --- /dev/null +++ b/.github/skills/dev/git-workflow/create-feature-branch/SKILL.md @@ -0,0 +1,113 @@ +--- +name: create-feature-branch +description: Guide for creating feature branches following the torrust-tracker branching conventions. Covers branch naming format, lifecycle, and common patterns. Use when creating branches for issues, starting work on tasks, or setting up development branches. Triggers on "create branch", "new branch", "checkout branch", "branch for issue", or "start working on issue". +metadata: + author: torrust + version: "1.0" +--- + +# Creating Feature Branches + +This skill guides you through creating feature branches following the Torrust Tracker branching +conventions. + +## Branch Naming Convention + +**Format**: `{issue-number}-{short-description}` (preferred) + +Alternative formats (no tracked issue): + +- `feat/{short-description}` +- `fix/{short-description}` +- `chore/{short-description}` + +**Rules**: + +- Always start with the GitHub issue number when one exists +- Use lowercase letters only +- Separate words with hyphens (not underscores) +- Keep description concise but descriptive + +## Creating a Branch + +### Standard Workflow + +```bash +# Ensure you're on latest develop +git checkout develop +git pull --ff-only + +# Create and checkout branch for issue #42 +git checkout -b 42-add-peer-expiry-grace-period +``` + +### With MCP GitHub Tools + +1. Get the issue number and title +2. Format the branch name: `{number}-{kebab-case-description}` +3. Create the branch from `develop` +4. Checkout locally: `git fetch && git checkout {branch-name}` + +## Branch Naming Examples + +✅ **Good branch names**: + +- `42-add-peer-expiry-grace-period` +- `156-refactor-udp-server-socket-binding` +- `203-add-e2e-mysql-tests` +- `1697-ai-agent-configuration` + +❌ **Avoid**: + +- `my-feature` — no issue number +- `FEATURE-123` — all caps +- `fix_bug` — underscores instead of hyphens +- `42_add_support` — underscores + +## Complete Branch Lifecycle + +### 1. Create Branch from `develop` + +```bash +git checkout develop +git pull --ff-only +git checkout -b 42-add-peer-expiry-grace-period +``` + +### 2. Develop + +Make commits following [commit conventions](../commit-changes/SKILL.md). + +### 3. Pre-commit Checks + +```bash +cargo machete +linter all +cargo test --doc --workspace +cargo test --tests --benches --examples --workspace --all-targets --all-features +``` + +### 4. Push to Your Fork + +```bash +git push {your-fork-remote} 42-add-peer-expiry-grace-period +``` + +### 5. Create Pull Request + +Target branch: `torrust/torrust-tracker:develop` + +### 6. Cleanup After Merge + +```bash +git checkout develop +git pull --ff-only +git branch -d 42-add-peer-expiry-grace-period +``` + +## Converting Issue Title to Branch Name + +1. Get issue number (e.g., #42) +2. Take issue title (e.g., "Add Peer Expiry Grace Period") +3. Convert to lowercase kebab-case: `add-peer-expiry-grace-period` +4. Prefix with issue number: `42-add-peer-expiry-grace-period` diff --git a/.github/skills/dev/git-workflow/open-pull-request/SKILL.md b/.github/skills/dev/git-workflow/open-pull-request/SKILL.md new file mode 100644 index 000000000..eca0fae3b --- /dev/null +++ b/.github/skills/dev/git-workflow/open-pull-request/SKILL.md @@ -0,0 +1,73 @@ +--- +name: open-pull-request +description: Open a pull request from a feature branch using GitHub CLI (preferred) or GitHub MCP tools. Covers pre-flight checks, correct base/head configuration for fork workflows, title/body conventions, and post-creation validation. Use when asked to "open PR", "create pull request", or "submit branch for review". +metadata: + author: torrust + version: "1.0" +--- + +# Open a Pull Request + +## CLI vs MCP Decision Rule + +- **Inner loop (fast local branch work):** prefer GitHub CLI (`gh pr create`). +- **Outer loop (cross-system coordination):** use MCP tools for structured/authenticated access. + +## Pre-flight Checks + +Before opening a PR: + +- [ ] Working tree is clean (`git status`) +- [ ] Branch is pushed to your fork remote +- [ ] Commits are GPG signed (`git log --show-signature -n 1`) +- [ ] All pre-commit checks passed (`linter all`, `cargo machete`, tests) + +## Title and Description Convention + +PR title: use Conventional Commit style, include issue reference. + +Examples: + +- `feat(tracker-core): [#42] add peer expiry grace period` +- `docs(agents): set up basic AI agent configuration (#1697)` + +PR body must include: + +- Summary of changes +- Files/packages touched +- Validation performed +- Issue link (`Closes #`) + +## Option A (Preferred): GitHub CLI + +```bash +gh pr create \ + --repo torrust/torrust-tracker \ + --base develop \ + --head : \ + --title "" \ + --body "<body>" +``` + +If successful, `gh` prints the PR URL. + +## Option B: GitHub MCP Tools + +When MCP pull request management tools are available, create the PR with: + +- `base`: `develop` +- `head`: `<fork-owner>:<branch-name>` +- Capture and share the resulting PR URL. + +## Post-creation Validation + +- [ ] PR targets `torrust/torrust-tracker:develop` +- [ ] Head branch is correct +- [ ] CI workflows started +- [ ] Issue linked in description + +## Troubleshooting + +- `fatal: ... does not appear to be a git repository`: push to correct remote (`git remote -v`) +- `A pull request already exists`: open existing PR URL instead of creating new +- Permission errors on upstream: use `owner:branch` fork syntax diff --git a/.github/skills/dev/git-workflow/release-new-version/SKILL.md b/.github/skills/dev/git-workflow/release-new-version/SKILL.md new file mode 100644 index 000000000..f30898511 --- /dev/null +++ b/.github/skills/dev/git-workflow/release-new-version/SKILL.md @@ -0,0 +1,147 @@ +--- +name: release-new-version +description: Guide for releasing a new version of the Torrust Tracker using the standard staging branch, tag, and crate publication workflow. Covers version bump, release commit, staging branch promotion, PR to main, release branch/tag creation, crate publication, and merge-back to develop. Use when asked to "release", "cut a version", "publish a new version", or "create release vX.Y.Z". +metadata: + author: torrust + version: "1.0" +--- + +# Release New Version + +Primary reference: [`docs/release_process.md`](../../../../../docs/release_process.md) + +## Release Steps (Mandatory Order) + +1. Stage `develop` → `staging/main` +2. Create release commit (bump version) +3. PR `staging/main` → `main` +4. Push `main` → `releases/vX.Y.Z` +5. Create signed tag `vX.Y.Z` on that branch +6. Verify deployment workflow + crate publication +7. Create GitHub release +8. Stage `main` → `staging/develop` (merge-back) +9. Bump next dev version, PR `staging/develop` → `develop` + +Do not reorder these steps. + +## Version Naming Rules + +- Version in code: `X.Y.Z` (release) or `X.Y.Z-develop` (development) +- Git tag: `vX.Y.Z` +- Release branch: `releases/vX.Y.Z` +- Staging branches: `staging/main`, `staging/develop` + +## Pre-Flight Checklist + +Before starting: + +- [ ] Clean working tree (`git status`) +- [ ] `develop` branch is up to date with `torrust/develop` +- [ ] All CI checks pass on `develop` +- [ ] Working version in manifests is `X.Y.Z-develop` + +## Commands + +### 1) Stage develop → staging/main + +```bash +git fetch --all +git push --force torrust develop:staging/main +``` + +### 2) Create Release Commit + +```bash +git stash +git switch staging/main +git reset --hard torrust/staging/main +# Edit version in all Cargo.toml files: +# change X.Y.Z-develop → X.Y.Z +git add -A +git commit -S -m "release: version X.Y.Z" +git push torrust +``` + +Edit `version` in: + +- `Cargo.toml` (workspace) +- All packages under `packages/` that publish crates +- `console/tracker-client/Cargo.toml` +- `contrib/bencode/Cargo.toml` + +Also update any internal path dependency `version` constraints. + +### 3) PR staging/main → main + +Create PR: "Release Version X.Y.Z" (title format) +Base: `torrust/torrust-tracker:main` +Head: `staging/main` +Merge after CI passes. + +### 4) Push releases/vX.Y.Z branch + +```bash +git fetch --all +git push torrust main:releases/vX.Y.Z +``` + +### 5) Create Signed Tag + +```bash +git switch releases/vX.Y.Z +git reset --hard torrust/releases/vX.Y.Z +git tag --sign vX.Y.Z +git push --tags torrust +``` + +### 6) Verify Deployment Workflow + +Check the +[deployment workflow](https://github.com/torrust/torrust-tracker/actions/workflows/deployment.yaml) +ran successfully and the following crates were published: + +- `torrust-tracker-contrib-bencode` +- `torrust-tracker-located-error` +- `torrust-tracker-primitives` +- `torrust-tracker-clock` +- `torrust-tracker-configuration` +- `torrust-tracker-torrent-repository` +- `torrust-tracker-test-helpers` +- `torrust-tracker` + +Crates must be published in dependency order. Each must be indexed on crates.io before the next +publishes. + +### 7) Create GitHub Release + +Create a release from tag `vX.Y.Z` after the deployment workflow passes. + +### 8) Merge-back: Stage main → staging/develop + +```bash +git fetch --all +git push --force torrust main:staging/develop +``` + +### 9) Bump Next Dev Version + +```bash +git stash +git switch staging/develop +git reset --hard torrust/staging/develop +# Edit version in all Cargo.toml files: +# change X.Y.Z → (next)X.Y.Z-develop (e.g. 3.0.0 → 3.0.1-develop) +git add -A +git commit -S -m "develop: bump to version (next)X.Y.Z-develop" +git push torrust +``` + +Create PR: "Version X.Y.Z was Released" +Base: `torrust/torrust-tracker:develop` +Head: `staging/develop` + +## Failure Handling + +- **Deployment workflow failed**: fix and rerun on same release branch +- **Crate already published**: do not republish; cut a patch release +- **Partial state (tag exists but branch doesn't)**: investigate before proceeding diff --git a/.github/skills/dev/git-workflow/review-pr/SKILL.md b/.github/skills/dev/git-workflow/review-pr/SKILL.md new file mode 100644 index 000000000..da4be9ca3 --- /dev/null +++ b/.github/skills/dev/git-workflow/review-pr/SKILL.md @@ -0,0 +1,66 @@ +--- +name: review-pr +description: Review a pull request for the torrust-tracker project. Covers checklist-based PR quality verification, code style standards, test requirements, documentation, and how to submit review feedback. Use when asked to review a PR, check a pull request, or provide feedback on code changes. Triggers on "review PR", "review pull request", "check PR quality", or "code review". +metadata: + author: torrust + version: "1.0" +--- + +# Reviewing a Pull Request + +## Quick Overview Approach + +1. Read the PR title and description for context +2. Check the diff for scope of change +3. Identify the affected packages and components +4. Apply the checklist below + +## PR Review Checklist + +### PR Metadata + +- [ ] Title follows Conventional Commits format +- [ ] Description clearly explains what changes were made and why +- [ ] Issue is linked (`Closes #<number>` or `Refs #<number>`) +- [ ] Target branch is `develop` (not `main`) + +### Code Quality + +- [ ] Code follows existing patterns in affected packages +- [ ] No unused imports, variables, or functions +- [ ] No `#[allow(...)]` suppressions unless clearly justified with a comment +- [ ] Errors handled properly (use `thiserror` for structured errors, avoid `.unwrap()`) +- [ ] No security vulnerabilities (OWASP Top 10 awareness) + +### Tests + +- [ ] New functionality has unit tests +- [ ] Integration tests added if applicable +- [ ] All existing tests still pass +- [ ] Test code is clean, readable, and maintainable + +### Documentation + +- [ ] Public API items have doc comments +- [ ] `AGENTS.md` updated if architecture changed +- [ ] Markdown docs updated if user-facing behavior changed +- [ ] Spell check: new technical terms added to `project-words.txt` + +### Rust-Specific + +- [ ] Imports grouped: std → external → internal +- [ ] Line length within `max_width = 130` +- [ ] GPG-signed commits + +## Providing Feedback + +Categorize comments to help the author prioritize: + +- **Blocker** — must fix before merge (correctness, security, breaking changes) +- **Suggestion** — improvement recommended but not blocking +- **Nit** — minor style/readability point + +## Standards Reference + +All code quality standards are defined in the root `AGENTS.md`. When pointing to a +standard, reference the relevant section of `AGENTS.md`. diff --git a/.github/skills/dev/git-workflow/run-linters/SKILL.md b/.github/skills/dev/git-workflow/run-linters/SKILL.md new file mode 100644 index 000000000..c779b413f --- /dev/null +++ b/.github/skills/dev/git-workflow/run-linters/SKILL.md @@ -0,0 +1,121 @@ +--- +name: run-linters +description: Run code quality checks and linters for the torrust-tracker project. Includes Rust clippy, rustfmt, markdown, YAML, TOML, spell checking, and shellcheck. Use when asked to lint code, check formatting, fix code quality issues, or prepare for commit. Triggers on "lint", "run linters", "check code quality", "fix formatting", "run clippy", "run rustfmt", or "pre-commit checks". +metadata: + author: torrust + version: "1.0" +--- + +# Run Linters + +## Quick Reference + +### Run All Linters + +```bash +linter all +``` + +**Always run `linter all` before every commit. It must exit with code `0`.** + +### Run a Single Linter + +```bash +linter markdown # Markdown (markdownlint) +linter yaml # YAML (yamllint) +linter toml # TOML (taplo) +linter cspell # Spell checker (cspell) +linter clippy # Rust code analysis (clippy) +linter rustfmt # Rust formatting (rustfmt) +linter shellcheck # Shell scripts (shellcheck) +``` + +## Common Workflows + +### Before Any Commit + +```bash +linter all # Must pass with exit code 0 +``` + +### Debug a Failing Full Run + +```bash +# Identify which linter is failing +linter markdown +linter yaml +linter toml +linter cspell +linter clippy +linter rustfmt +linter shellcheck +``` + +### During Development (Rust only) + +```bash +linter clippy # Check logic and code quality +linter rustfmt # Check formatting +``` + +## Fixing Common Issues + +### Rust Formatting Errors (rustfmt) + +```bash +cargo fmt # Auto-fix all Rust source files +``` + +Formatting rules from `rustfmt.toml`: + +- `max_width = 130` +- `group_imports = "StdExternalCrate"` +- `imports_granularity = "Module"` + +### Rust Clippy Errors + +Warnings are **errors** (configured as `-D warnings` in `.cargo/config.toml`). +Fix the underlying issue — do not `#[allow(...)]` unless truly unavoidable. + +Example: unused variable → use `_var` prefix or actually use the value. + +### Markdown Errors (markdownlint) + +Common issues: + +- Trailing whitespace +- Missing blank line before headings +- Incorrect heading levels +- Lines exceeding 120 characters + +Configuration in `.markdownlint.json`. + +### YAML Errors (yamllint) + +Common issues: + +- Trailing spaces +- Inconsistent indentation (2 spaces expected) +- Missing newline at end of file + +Configuration in `.yamllint-ci.yml`. + +### TOML Errors (taplo) + +```bash +taplo fmt **/*.toml # Auto-fix TOML formatting +``` + +### Spell Check Errors (cspell) + +For legitimate technical terms not in dictionaries, add them to `project-words.txt` +(alphabetical order, one per line). + +### Shell Script Errors (shellcheck) + +Fix the reported issue in the shell script. Common: use `[[ ]]` instead of `[ ]`, +quote variables, avoid `eval`. + +## Linter Details + +See [references/linters.md](references/linters.md) for detailed documentation on each linter. diff --git a/.github/skills/dev/git-workflow/run-linters/references/linters.md b/.github/skills/dev/git-workflow/run-linters/references/linters.md new file mode 100644 index 000000000..40b3ee5fb --- /dev/null +++ b/.github/skills/dev/git-workflow/run-linters/references/linters.md @@ -0,0 +1,85 @@ +# Linter Documentation + +This document provides detailed documentation for each linter used in the Torrust Tracker project. + +## Overview + +The project uses the `linter` binary from +[torrust/torrust-linting](https://github.com/torrust/torrust-linting) as a unified wrapper around +all linters. + +Install: `cargo install --locked --git https://github.com/torrust/torrust-linting --bin linter` + +## Rust Linters + +### clippy + +**Tool**: Rust's official linter. +**Config**: `.cargo/config.toml` (global `rustflags`) +**Run**: `linter clippy` + +Warnings are treated as errors via `-D warnings` in `.cargo/config.toml`. +Do not suppress warnings with `#[allow(...)]` unless absolutely necessary. + +**Critical flags** (from `.cargo/config.toml`): + +- `-D warnings` — all warnings are errors +- `-D unused` — unused items are errors +- `-D rust-2018-idioms` — enforces Rust 2018 idioms +- `-D future-incompatible` + +### rustfmt + +**Tool**: Rust code formatter. +**Config**: `rustfmt.toml` +**Run**: `linter rustfmt` +**Auto-fix**: `cargo fmt` + +Key formatting settings: + +- `max_width = 130` +- `group_imports = "StdExternalCrate"` +- `imports_granularity = "Module"` + +## Documentation Linters + +### markdownlint + +**Tool**: markdownlint +**Config**: `.markdownlint.json` +**Run**: `linter markdown` + +### cspell (Spell Checker) + +**Tool**: cspell +**Config**: `cspell.json` +**Dictionary**: `project-words.txt` +**Run**: `linter cspell` + +Add technical terms to `project-words.txt` (alphabetical order, one per line). + +## Configuration Linters + +### yamllint + +**Tool**: yamllint +**Config**: `.yamllint-ci.yml` +**Run**: `linter yaml` + +Expected: 2-space indentation, no trailing whitespace, newline at EOF. + +### taplo + +**Tool**: taplo +**Config**: `.taplo.toml` +**Run**: `linter toml` +**Auto-fix**: `taplo fmt **/*.toml` + +## Script Linters + +### shellcheck + +**Tool**: shellcheck +**Run**: `linter shellcheck` + +Checks all shell scripts. Use `[[ ]]` over `[ ]`, quote variables (`"$var"`), and avoid `eval`. diff --git a/.github/skills/dev/git-workflow/run-pre-commit-checks/SKILL.md b/.github/skills/dev/git-workflow/run-pre-commit-checks/SKILL.md new file mode 100644 index 000000000..b0eb24e4d --- /dev/null +++ b/.github/skills/dev/git-workflow/run-pre-commit-checks/SKILL.md @@ -0,0 +1,88 @@ +--- +name: run-pre-commit-checks +description: Run all mandatory pre-commit verification steps for the torrust-tracker project. Covers the pre-commit script (automated checks), manual review steps, and individual linter commands for debugging. Use before any commit or PR to ensure all quality gates pass. Triggers on "pre-commit checks", "run all checks", "verify before commit", or "check everything". +metadata: + author: torrust + version: "1.0" +--- + +# Run Pre-commit Checks + +## Git Hook (Recommended Setup) + +The repository ships a `pre-commit` Git hook that runs `./scripts/pre-commit.sh` +automatically on every `git commit`. Install it once after cloning: + +```bash +./scripts/install-git-hooks.sh +``` + +After installation the hook fires automatically; you do not need to invoke the script +manually before each commit. + +## Automated Checks + +> **⏱️ Expected runtime: ~3 minutes** on a modern developer machine. AI agents must set a +> command timeout of **at least 5 minutes** before invoking `./scripts/pre-commit.sh`. Agents +> with a default per-command timeout below 5 minutes will likely time out and report a false +> failure. + +Run the pre-commit script. **It must exit with code `0` before every commit.** + +```bash +./scripts/pre-commit.sh +``` + +The script runs these steps in order: + +1. `cargo machete` — unused dependency check +2. `linter all` — all linters (markdown, YAML, TOML, clippy, rustfmt, shellcheck, cspell) +3. `cargo test --doc --workspace` — documentation tests +4. `cargo test --tests --benches --examples --workspace --all-targets --all-features` — all tests + +> **MySQL tests**: MySQL-specific tests require a running instance and a feature flag: +> +> ```bash +> TORRUST_TRACKER_CORE_RUN_MYSQL_DRIVER_TEST=true cargo test --package bittorrent-tracker-core +> ``` +> +> These are not run by the pre-commit script. + +## Manual Checks (Cannot Be Automated) + +Verify these by hand before committing: + +- **Self-review the diff**: read through `git diff --staged` for debug artifacts or unintended changes +- **Documentation updated**: if public API or behaviour changed, doc comments and `docs/` pages reflect it +- **`AGENTS.md` updated**: if architecture or key workflows changed, the relevant `AGENTS.md` is updated +- **New technical terms in `project-words.txt`**: new jargon added alphabetically + +## Before Opening a PR (Recommended) + +```bash +cargo +nightly doc --no-deps --bins --examples --workspace --all-features +``` + +## Debugging Individual Linters + +Run individual linters to isolate a failure: + +```bash +linter markdown # Markdown +linter yaml # YAML +linter toml # TOML +linter clippy # Rust code analysis +linter rustfmt # Rust formatting +linter shellcheck # Shell scripts +linter cspell # Spell checking +``` + +| Failure | Fix | +| ------------------- | --------------------------------------- | +| Unused dependency | Remove from `Cargo.toml` | +| Clippy warning | Fix the underlying issue | +| rustfmt error | Run `cargo fmt` | +| Markdown lint error | Fix formatting per `.markdownlint.json` | +| Spell check error | Add term to `project-words.txt` | +| Test failure | Fix the failing test or code | +| Doc build error | Fix Rust doc comment | diff --git a/.github/skills/dev/maintenance/install-linter/SKILL.md b/.github/skills/dev/maintenance/install-linter/SKILL.md new file mode 100644 index 000000000..9112acd31 --- /dev/null +++ b/.github/skills/dev/maintenance/install-linter/SKILL.md @@ -0,0 +1,62 @@ +--- +name: install-linter +description: Install the torrust-linting `linter` binary and its external tool dependencies. Use when setting up a new development environment, after a fresh clone, or when the `linter` binary is missing. Triggers on "install linter", "setup linter", "linter not found", "install torrust-linting", "missing linter binary", or "set up development environment". +metadata: + author: torrust + version: "1.0" +--- + +# Install the Linter + +The project uses a unified `linter` binary from +[torrust/torrust-linting](https://github.com/torrust/torrust-linting) to run all quality checks. + +## Install the `linter` Binary + +```bash +cargo install --locked --git https://github.com/torrust/torrust-linting --bin linter +``` + +Verify the installation: + +```bash +linter --version +``` + +## Install External Tool Dependencies + +The `linter` binary delegates to external tools. Install them if they are not already present: + +| Linter | Tool | Install command | +| ----------- | ---------------- | ------------------------------------- | +| Markdown | markdownlint-cli | `npm install -g markdownlint-cli` | +| YAML | yamllint | `pip3 install yamllint` | +| TOML | taplo | `cargo install taplo-cli --locked` | +| Spell check | cspell | `npm install -g cspell` | +| Shell | shellcheck | `apt install shellcheck` | +| Rust | clippy / rustfmt | bundled with `rustup` (no extra step) | + +> The `linter` binary will attempt to install missing npm-based tools automatically on first run. +> System-packaged tools (`yamllint`, `shellcheck`) must be installed manually. + +## Configuration Files + +The linters read configuration from files in the project root. These are already present in the +repository — no manual setup is needed: + +| File | Used by | +| -------------------- | ------------ | +| `.markdownlint.json` | markdownlint | +| `.yamllint-ci.yml` | yamllint | +| `.taplo.toml` | taplo | +| `cspell.json` | cspell | + +## Verify Full Setup + +After installing the binary and its dependencies, run all linters to confirm everything works: + +```bash +linter all +``` + +It must exit with code `0`. See the `run-linters` skill for day-to-day usage. diff --git a/.github/skills/dev/maintenance/setup-dev-environment/SKILL.md b/.github/skills/dev/maintenance/setup-dev-environment/SKILL.md new file mode 100644 index 000000000..1228611b5 --- /dev/null +++ b/.github/skills/dev/maintenance/setup-dev-environment/SKILL.md @@ -0,0 +1,123 @@ +--- +name: setup-dev-environment +description: Set up a local development environment for torrust-tracker from scratch. Covers system dependencies, Rust toolchain, storage directories, linter binary, git hooks, and smoke tests. Use when onboarding to the project, setting up a new machine, or after a fresh clone. Triggers on "setup dev environment", "fresh clone", "onboarding", "install dependencies", "set up environment", or "getting started". +metadata: + author: torrust + version: "1.0" +--- + +# Set Up the Development Environment + +Full setup guide for a fresh clone of `torrust-tracker`. Follow the steps in order. + +Reference: [How to Set Up the Development Environment](https://torrust.com/blog/how-to-setup-the-development-environment) + +## Step 1: System Dependencies + +Install the required system packages (Debian/Ubuntu): + +```bash +sudo apt-get install libsqlite3-dev pkg-config libssl-dev make +``` + +> For other distributions, install the equivalent packages for SQLite3 development headers, OpenSSL +> development headers, `pkg-config`, and `make`. + +## Step 2: Rust Toolchain + +```bash +rustup show # Confirm toolchain is active +rustup update # Update to latest stable +rustup toolchain install nightly # Required for docs generation +``` + +The project MSRV is **1.72**. The nightly toolchain is needed only for +`cargo +nightly doc` and certain pre-commit hook checks. + +## Step 3: Build + +```bash +cargo build +``` + +This compiles all workspace crates and verifies that all dependencies resolve correctly. + +## Step 4: Create Storage Directories + +The tracker writes runtime data (databases, logs, TLS certs, config) to `storage/`, which is +git-ignored. Create the required folders once: + +```bash +mkdir -p ./storage/tracker/lib/database +mkdir -p ./storage/tracker/lib/tls +mkdir -p ./storage/tracker/etc +``` + +## Step 5: Install the Linter Binary + +```bash +cargo install --locked --git https://github.com/torrust/torrust-linting --bin linter +``` + +See the `install-linter` skill for external tool dependencies (markdownlint, yamllint, etc.). + +## Step 6: Install Additional Cargo Tools + +```bash +cargo install cargo-machete # Unused dependency checker +``` + +## Step 7: Install Git Hooks + +Install the project pre-commit hook (one-time, re-run after hook changes): + +```bash +./scripts/install-git-hooks.sh +``` + +The hook runs `./scripts/pre-commit.sh` automatically on every `git commit`. + +## Step 8: Smoke Test + +Run the tracker with the default development configuration to confirm the build works: + +```bash +cargo run +``` + +Expected output includes lines like: + +```text +Loading configuration from default configuration file: `./share/default/config/tracker.development.sqlite3.toml` +[UDP TRACKER] Starting on: udp://0.0.0.0:6969 +[HTTP TRACKER] Started on: http://0.0.0.0:7070 +[API] Started on http://127.0.0.1:1212 +[HEALTH CHECK API] Started on: http://127.0.0.1:1313 +``` + +Press `Ctrl-C` to stop. + +## Step 9: Verify Full Test Suite + +```bash +cargo test --doc --workspace +cargo test --tests --benches --examples --workspace --all-targets --all-features +``` + +Both commands must exit `0` before any commit. + +## Custom Configuration (Optional) + +To run with a custom config instead of the default template: + +```bash +cp share/default/config/tracker.development.sqlite3.toml storage/tracker/etc/tracker.toml +# Edit storage/tracker/etc/tracker.toml as needed +TORRUST_TRACKER_CONFIG_TOML_PATH="./storage/tracker/etc/tracker.toml" cargo run +``` + +## Useful Development Tools + +- **DB Browser for SQLite** — inspect and edit SQLite databases: <https://sqlitebrowser.org/> +- **qBittorrent** — BitTorrent client for manual testing: <https://www.qbittorrent.org/> +- **imdl** — torrent file editor (`cargo install imdl`): <https://github.com/casey/intermodal> diff --git a/.github/skills/dev/maintenance/update-dependencies/SKILL.md b/.github/skills/dev/maintenance/update-dependencies/SKILL.md new file mode 100644 index 000000000..c0aa1c867 --- /dev/null +++ b/.github/skills/dev/maintenance/update-dependencies/SKILL.md @@ -0,0 +1,120 @@ +--- +name: update-dependencies +description: Guide for updating project dependencies in the torrust-tracker project. Covers the manual cargo update workflow including branch creation, running checks, committing, and pushing. Distinguishes trivial updates (Cargo.lock only) from breaking-change updates (code rework needed). Use when updating dependencies, running cargo update, or bumping deps. Triggers on "update dependencies", "cargo update", "update deps", or "bump dependencies". +metadata: + author: torrust + version: "1.0" +--- + +# Updating Dependencies + +This skill guides you through updating project dependencies for the Torrust Tracker project. + +## Update Categories + +Before starting, decide which category the update falls into: + +| Category | Description | Branch / Issue | +| ------------ | -------------------------------------------- | -------------------------------------------------------------- | +| **Trivial** | `cargo update` only — no code changes needed | Timestamped branch, no issue required | +| **Breaking** | Dependency change requires code rework | If small: same branch. If large: open a separate issue per dep | + +Use `cargo update --dry-run` or read the dependency changelog to classify before starting. + +## Quick Reference + +```bash +# Get a timestamp (YYYYMMDD) +TIMESTAMP=$(date +%Y%m%d) + +# Create branch +git checkout develop && git pull --ff-only +git checkout -b "${TIMESTAMP}-update-dependencies" + +# Update dependencies +cargo update 2>&1 | tee /tmp/cargo-update.txt + +# If Cargo.lock has no changes, nothing to do — stop here. + +# Verify +./scripts/pre-commit.sh + +# Commit and push +git add Cargo.lock +git commit -S -m "chore: update dependencies" -m "$(cat /tmp/cargo-update.txt)" +git push {your-fork-remote} "${TIMESTAMP}-update-dependencies" +``` + +## Complete Workflow + +### Step 1: Create a Branch + +Generate a timestamp prefix to avoid branch name conflicts across repeated runs: + +```bash +TIMESTAMP=$(date +%Y%m%d) +git checkout develop +git pull --ff-only +git checkout -b "${TIMESTAMP}-update-dependencies" +``` + +For breaking-change updates that require a tracked issue: + +```bash +git checkout -b {issue-number}-update-dependencies +``` + +### Step 2: Run Cargo Update + +```bash +cargo update 2>&1 | tee /tmp/cargo-update.txt +``` + +If `Cargo.lock` has no changes, there is nothing to update — exit early. + +Review `/tmp/cargo-update.txt` to identify any major version bumps that may be breaking. + +### Step 3: Handle Breaking Changes + +If any updated dependency introduced a breaking API change: + +- **Small rework** (a few lines, no design decisions): fix it in this branch and continue. +- **Large rework** (architectural impact or significant effort): revert that specific dependency + in `Cargo.toml`, keep the other trivial updates, and open a new issue for the breaking + dependency separately. + +```bash +# Revert a single crate to its current locked version to defer it +cargo update --precise {old-version} {crate-name} +``` + +### Step 4: Verify + +```bash +cargo machete +./scripts/pre-commit.sh +``` + +Fix any failures before proceeding. + +### Step 5: Commit and Push + +```bash +git add Cargo.lock +git commit -S -m "chore: update dependencies" -m "$(cat /tmp/cargo-update.txt)" +git push {your-fork-remote} "${TIMESTAMP}-update-dependencies" +``` + +### Step 6: Open PR + +Target: `torrust/torrust-tracker:develop` +Title: `chore: update dependencies` + +## Decision Guide + +| Scenario | Action | +| ---------------------------------------------- | ---------------------------------------------------------- | +| `cargo update` with no code changes | Trivial — timestamped branch, no issue | +| Breaking change, small rework (< 1 hour) | Fix in the same branch, note in PR description | +| Breaking change, large rework (> 1 hour) | Defer: revert that dep, open a separate issue, separate PR | +| Multiple breaking deps, independent migrations | One issue + PR per dependency to keep diffs reviewable | diff --git a/.github/skills/dev/planning/cleanup-completed-issues/SKILL.md b/.github/skills/dev/planning/cleanup-completed-issues/SKILL.md new file mode 100644 index 000000000..a4c7b3966 --- /dev/null +++ b/.github/skills/dev/planning/cleanup-completed-issues/SKILL.md @@ -0,0 +1,88 @@ +--- +name: cleanup-completed-issues +description: Guide for cleaning up completed and closed issues in the torrust-tracker project. Covers removing issue documentation files from docs/issues/ and committing the cleanup. Supports single issue cleanup or batch cleanup. Use when cleaning up closed issues, removing issue docs, or maintaining the docs/issues/ folder. Triggers on "cleanup issue", "remove issue", "clean completed issues", "delete closed issue", or "maintain issue docs". +metadata: + author: torrust + version: "1.0" +--- + +# Cleaning Up Completed Issues + +## When to Clean Up + +- **After PR merge**: Remove the issue file when its PR is merged +- **Batch cleanup**: Periodically clean up multiple closed issues during maintenance +- **Before releases**: Tidy documentation before major releases + +## Cleanup Approaches + +### Option 1: Single Issue Cleanup (Recommended) + +1. Verify the issue is closed on GitHub +2. Remove the issue file from `docs/issues/` +3. Commit and push changes + +### Option 2: Batch Cleanup + +1. List all issue files in `docs/issues/` +2. Check status of each issue on GitHub +3. Remove all closed issue files +4. Commit and push with a descriptive message + +## Step-by-Step Process + +### Step 1: Verify Issue is Closed on GitHub + +**Single issue:** + +```bash +gh issue view {issue-number} --json state --jq .state +``` + +Expected: `CLOSED` + +**Batch:** + +```bash +for issue in 21 22 23 24; do + state=$(gh issue view "$issue" --json state --jq .state 2>/dev/null || echo "NOT_FOUND") + echo "$issue:$state" +done +``` + +### Step 2: Remove Issue Documentation File + +```bash +# Single issue +git rm docs/issues/42-add-peer-expiry-grace-period.md + +# Batch +git rm docs/issues/21-some-old-issue.md \ + docs/issues/22-another-old-issue.md +``` + +### Step 3: Commit and Push + +```bash +# Single issue +git commit -S -m "chore(issues): remove closed issue #42 documentation" + +# Batch +git commit -S -m "chore(issues): remove documentation for closed issues #21, #22, #23" + +git push {your-fork-remote} {branch} +``` + +## Determining If an Issue File Should Stay + +Keep issue files when: + +- The issue is still open +- The PR is open (still being worked on) +- The specification is referenced from other active docs + +Remove issue files when: + +- The issue is **closed** +- The implementing PR is **merged** +- The file is no longer referenced by active work diff --git a/.github/skills/dev/planning/create-adr/SKILL.md b/.github/skills/dev/planning/create-adr/SKILL.md new file mode 100644 index 000000000..930a4bfc9 --- /dev/null +++ b/.github/skills/dev/planning/create-adr/SKILL.md @@ -0,0 +1,112 @@ +--- +name: create-adr +description: Guide for creating Architectural Decision Records (ADRs) in the torrust-tracker project. Covers the timestamp-based file naming convention, free-form structure, index registration in the docs/adrs/README.md index table, and commit workflow. Use when documenting architectural decisions, recording design choices, or adding decision records. Triggers on "create ADR", "add ADR", "new decision record", "architectural decision", "document decision", or "add decision". +metadata: + author: torrust + version: "1.0" +--- + +# Creating Architectural Decision Records + +## Quick Reference + +```bash +# 1. Generate the filename prefix +date -u +"%Y%m%d%H%M%S" +# e.g. 20241115093012 + +# 2. Create the ADR file +# Format: YYYYMMDDHHMMSS_snake_case_title.md +touch docs/adrs/20241115093012_your_decision_title.md + +# 3. Update the index +# Add entry to docs/adrs/index.md + +# 4. Validate and commit +linter markdown +linter cspell +git commit -S -m "docs(adrs): add ADR for {short description}" +``` + +## When to Create an ADR + +Create an ADR when making a decision that: + +- Affects the project's architecture or design patterns +- Chooses one approach over alternatives that were considered +- Has consequences worth documenting for future contributors +- Answers "why was this done this way?" + +Do **not** create an ADR for trivial implementation choices or style preferences covered by linting. + +## File Naming Convention + +**Format**: `YYYYMMDDHHMMSS_snake_case_title.md` + +Generate the timestamp prefix: + +```bash +date -u +"%Y%m%d%H%M%S" +``` + +**Examples**: + +- `20240227164834_use_plural_for_modules_containing_collections.md` +- `20241115093012_adopt_axum_for_http_server.md` + +Location: `docs/adrs/` + +## ADR Structure + +There is no rigid template — derive structure from context. Use +[docs/templates/ADR.md](../../../docs/templates/ADR.md) as a starting point. + +Optional sections to add when relevant: + +- **Alternatives Considered**: other options explored and why they were rejected +- **Consequences**: positive and negative effects of the decision + +## Step-by-Step Process + +### Step 1: Generate Filename + +```bash +PREFIX=$(date -u +"%Y%m%d%H%M%S") +TITLE="your_decision_title" # snake_case +echo "docs/adrs/${PREFIX}_${TITLE}.md" +``` + +### Step 2: Write the ADR + +- **Description**: Explain the problem thoroughly — enough context for future contributors +- **Agreement**: State clearly what was decided and why +- **Date**: Today's date (`date -u +"%Y-%m-%d"`) +- **References**: Issues, PRs, external docs + +### Step 3: Update the Index + +Add a row to the index table in `docs/adrs/index.md`: + +```markdown +| [YYYYMMDDHHMMSS](YYYYMMDDHHMMSS_your_title.md) | YYYY-MM-DD | Short Title | One-sentence description. | +``` + +- The first column links to the ADR file using the timestamp as display text. +- The short description should allow a reader to understand the decision without opening the file. + +### Step 4: Validate and Commit + +```bash +linter markdown +linter cspell +linter all # full check + +git add docs/adrs/ +git commit -S -m "docs(adrs): add ADR for {short description}" +git push {your-fork-remote} {branch} +``` + +## Example ADR + +For a real example, see +[20240227164834_use_plural_for_modules_containing_collections.md](../../../docs/adrs/20240227164834_use_plural_for_modules_containing_collections.md). diff --git a/.github/skills/dev/planning/create-issue/SKILL.md b/.github/skills/dev/planning/create-issue/SKILL.md new file mode 100644 index 000000000..ed38c9933 --- /dev/null +++ b/.github/skills/dev/planning/create-issue/SKILL.md @@ -0,0 +1,101 @@ +--- +name: create-issue +description: Guide for creating GitHub issues in the torrust-tracker project. Covers the full workflow from specification drafting, user review, to GitHub issue creation with proper documentation and file naming. Supports task, bug, feature, and epic issue types. Use when creating issues, opening tickets, filing bugs, proposing tasks, or adding features. Triggers on "create issue", "open issue", "new issue", "file bug", "add task", "create epic", or "open ticket". +metadata: + author: torrust + version: "1.0" +--- + +# Creating Issues + +## Issue Types + +| Type | Label | When to Use | +| ----------- | --------- | -------------------------------------------- | +| **Task** | `task` | Single implementable unit of work | +| **Bug** | `bug` | Something broken that needs fixing | +| **Feature** | `feature` | New capability or enhancement | +| **Epic** | `epic` | Major feature area containing multiple tasks | + +## Workflow Overview + +The process is **spec-first**: write and review a specification before creating the GitHub issue. + +1. **Draft specification** document in `docs/issues/` (no template — write from scratch) +2. **User reviews** the draft specification +3. **Create GitHub issue** +4. **Rename spec file** to include the issue number +5. **Pre-commit checks** and commit the spec + +**Never create the GitHub issue before the user reviews and approves the specification.** + +## Step-by-Step Process + +### Step 1: Draft Issue Specification + +Create a specification file with a **temporary name** (no issue number yet): + +```bash +touch docs/issues/{short-description}.md +``` + +Use [docs/templates/ISSUE.md](../../../docs/templates/ISSUE.md) as the starting structure. +Use **placeholders** for the issue number until after creation (e.g., `[To be assigned]`). + +After drafting, run linters: + +```bash +linter markdown +linter cspell +``` + +### Step 2: User Reviews the Draft + +**STOP HERE** — present the draft to the user. Iterate until approved. + +### Step 3: Create the GitHub Issue + +After user approval, create the GitHub issue. Options: + +**GitHub CLI:** + +```bash +gh issue create \ + --repo torrust/torrust-tracker \ + --title "{title}" \ + --body "{body}" \ + --label "{label}" +``` + +**MCP GitHub tools** (if available): use `mcp_github_github_issue_write` with `title`, `body`, and `labels`. + +### Step 4: Rename the Spec File + +Rename using the assigned issue number: + +```bash +git mv docs/issues/{short-description}.md \ + docs/issues/{number}-{short-description}.md +``` + +Update any issue number placeholders inside the file. + +### Step 5: Commit and Push + +```bash +linter all # Must pass + +git add docs/issues/ +git commit -S -m "docs(issues): add issue specification for #{number}" +git push {your-fork-remote} {branch} +``` + +## Naming Convention + +File name format: `{number}-{short-description}.md` + +Examples: + +- `1697-ai-agent-configuration.md` +- `42-add-peer-expiry-grace-period.md` +- `523-internal-linting-tool.md` diff --git a/.github/skills/dev/planning/write-markdown-docs/SKILL.md b/.github/skills/dev/planning/write-markdown-docs/SKILL.md new file mode 100644 index 000000000..a2c166efa --- /dev/null +++ b/.github/skills/dev/planning/write-markdown-docs/SKILL.md @@ -0,0 +1,70 @@ +--- +name: write-markdown-docs +description: Guide for writing Markdown documentation in this project. Covers GitHub Flavored Markdown pitfalls, especially the critical #NUMBER pattern that auto-links to GitHub issues and PRs (NEVER use #1, #2, #3 as step/list numbers). Use ordered lists or plain numbers instead. Covers intentional vs accidental autolinks for issues, @mentions, and commit SHAs. Use when writing .md files, documentation, issue descriptions, PR descriptions, or README updates. Triggers on "markdown", "write docs", "documentation", "#number", "github markdown", "autolink", "markdown pitfall", or "GFM". +metadata: + author: torrust + version: "1.0" +--- + +# Writing Markdown Documentation + +## Critical: #NUMBER Auto-links to GitHub Issues + +**GitHub automatically converts `#NUMBER` → link to issue/PR/discussion.** + +```markdown +❌ Bad: accidentally links to issues + +- Task #1: Set up infrastructure ← links to GitHub issue #1 +- Task #2: Configure database ← links to GitHub issue #2 + +Step #1: Install dependencies ← links to GitHub issue #1 +``` + +The links pollute the referenced issues with unrelated backlinks and confuse readers. + +### Fix: Use Ordered Lists or Plain Numbers + +```markdown +✅ Solution 1: Ordered list (automatic numbering) + +1. Set up infrastructure +2. Configure database +3. Deploy application + +✅ Solution 2: Plain numbers (no hash) + +- Task 1: Set up infrastructure +- Task 2: Configure database + +✅ Solution 3: Alternative formats + +- Task (1): Set up infrastructure +- Task [1]: Set up infrastructure +``` + +## When #NUMBER IS Intentional + +Use `#NUMBER` only when you explicitly want to link to that GitHub issue/PR: + +```markdown +✅ Intentional: referencing issue +This implements the behavior described in #42. +Closes #1697. +``` + +## Other GFM Auto-links to Know + +```markdown +@username → links to GitHub user profile (use intentionally for mentions) +abc1234 (SHA) → links to commit (useful for references) +owner/repo#42 → cross-repo issue link +``` + +## Checklist Before Committing Docs + +- [ ] No `#NUMBER` patterns used for enumeration or step numbering +- [ ] Ordered lists use Markdown syntax (`1.` `2.` `3.`) +- [ ] Any `#NUMBER` present is an intentional issue/PR reference +- [ ] Tables are consistently formatted +- [ ] `linter markdown` and `linter cspell` pass diff --git a/.github/skills/dev/rust-code-quality/handle-errors-in-code/SKILL.md b/.github/skills/dev/rust-code-quality/handle-errors-in-code/SKILL.md new file mode 100644 index 000000000..7b326ce60 --- /dev/null +++ b/.github/skills/dev/rust-code-quality/handle-errors-in-code/SKILL.md @@ -0,0 +1,114 @@ +--- +name: handle-errors-in-code +description: Guide for error handling in this Rust project. Covers the four principles (clarity, context, actionability, explicit enums over anyhow), the thiserror pattern for structured errors, including what/where/when/why context, writing actionable help text, and avoiding vague errors. Also covers the located-error package for errors with source location. Use when writing error types, handling Results, adding error variants, or reviewing error messages. Triggers on "error handling", "error type", "Result", "thiserror", "anyhow", "error enum", "error message", "handle error", "add error variant", or "located-error". +metadata: + author: torrust + version: "1.0" +--- + +# Handling Errors in Code + +## Core Principles + +1. **Clarity** — Users immediately understand what went wrong +2. **Context** — Include what/where/when/why +3. **Actionability** — Tell users how to fix it +4. **Explicit enums over `anyhow`** — Prefer structured errors for pattern matching + +## Prefer Explicit Enum Errors + +```rust +// ✅ Correct: explicit, matchable, clear +#[derive(Debug, thiserror::Error)] +pub enum TrackerError { + #[error("Torrent '{info_hash}' not found in whitelist")] + TorrentNotWhitelisted { info_hash: InfoHash }, + + #[error("Peer limit exceeded for torrent '{info_hash}': max {limit}")] + PeerLimitExceeded { info_hash: InfoHash, limit: usize }, +} + +// ❌ Wrong: opaque, hard to match +return Err(anyhow::anyhow!("Something went wrong")); +return Err("Invalid input".into()); +``` + +## Include Actionable Fix Instructions in Display + +When the error is user-facing, add instructions: + +```rust +#[error( + "Configuration file not found at '{path}'.\n\ + Copy the default: cp share/default/config/tracker.toml {path}" +)] +ConfigNotFound { path: PathBuf }, +``` + +## Context Requirements + +Each error should answer: + +- **What**: What operation was being performed? +- **Where**: Which component, file, or resource? +- **When**: Under what conditions? +- **Why**: What caused the failure? + +```rust +// ✅ Good: full context +#[error("UDP socket bind failed for '{addr}': {source}. Is port {port} already in use?")] +SocketBindFailed { addr: SocketAddr, port: u16, source: std::io::Error }, + +// ❌ Bad: no context +return Err("bind failed".into()); +``` + +## The `located-error` Package + +For errors that benefit from source location tracking, use the `located-error` package: + +```toml +[dependencies] +torrust-tracker-located-error = { workspace = true } +``` + +```rust +use torrust_tracker_located_error::Located; + +// Wraps any error with file and line information +let err = Located(my_error).into(); +``` + +## Unwrap and Expect Policy + +| Context | `.unwrap()` | `.expect("msg")` | `?` / `Result` | +| ---------------------- | ----------- | ----------------------------------------- | -------------- | +| Production code | Never | Only when failure is logically impossible | Default | +| Tests and doc examples | Acceptable | Preferred when message adds clarity | — | + +```rust +// ✅ Production: propagate errors with ? +fn load_config(path: &Path) -> Result<Config, ConfigError> { + let content = std::fs::read_to_string(path) + .map_err(|e| ConfigError::FileAccess { path: path.to_path_buf(), source: e })?; + toml::from_str(&content) + .map_err(|e| ConfigError::InvalidToml { path: path.to_path_buf(), source: e }) +} + +// ✅ Tests: unwrap() is fine +#[test] +fn it_should_parse_valid_config() { + let config = Config::parse(VALID_TOML).unwrap(); + assert_eq!(config.http_api.bind_address, "127.0.0.1:1212"); +} +``` + +## Quick Checklist + +- [ ] Error type uses `thiserror::Error` derive +- [ ] Error message includes specific context (names, paths, addresses, values) +- [ ] Error message includes fix instructions where possible +- [ ] Prefer `enum` over `Box<dyn Error>` or `anyhow` in library code +- [ ] No vague messages like "invalid input" or "error occurred" +- [ ] No `.unwrap()` in production code (tests and doc examples are fine) +- [ ] Consider `located-error` for diagnostics-rich errors diff --git a/.github/skills/dev/rust-code-quality/handle-secrets/SKILL.md b/.github/skills/dev/rust-code-quality/handle-secrets/SKILL.md new file mode 100644 index 000000000..b3e6e5d43 --- /dev/null +++ b/.github/skills/dev/rust-code-quality/handle-secrets/SKILL.md @@ -0,0 +1,87 @@ +--- +name: handle-secrets +description: Guide for handling sensitive data (secrets) in this Rust project. NEVER use plain String for API tokens, passwords, or other credentials. Use the secrecy crate's Secret<T> wrapper to prevent accidental exposure through Debug output, logs, and error messages. Call .expose_secret() only when the actual value is needed. Use when working with credentials, API keys, tokens, passwords, or any sensitive configuration. Triggers on "secret", "API token", "password", "credential", "sensitive data", "secrecy", or "expose secret". +metadata: + author: torrust + version: "1.0" +--- + +# Handling Sensitive Data (Secrets) + +## Core Rule + +**NEVER use plain `String` for sensitive data.** Wrap secrets in `secrecy::Secret<String>` +(or similar) to prevent accidental exposure. + +```rust +// ❌ WRONG: secret leaked in Debug output +pub struct ApiConfig { + pub token: String, +} +println!("{config:?}"); // → ApiConfig { token: "secret_abc123" } — LEAKED! +``` + +```rust +// ✅ CORRECT: secret redacted in Debug +use secrecy::Secret; +pub struct ApiConfig { + pub token: Secret<String>, +} +println!("{config:?}"); // → ApiConfig { token: Secret([REDACTED]) } +``` + +## Using the `secrecy` Crate + +Add the dependency: + +```toml +[dependencies] +secrecy = { workspace = true } +``` + +Basic usage: + +```rust +use secrecy::{Secret, ExposeSecret}; + +// Wrap the secret +let token = Secret::new(String::from("my-api-token")); + +// Access the value only when truly needed (e.g., making the actual API call) +let token_str: &str = token.expose_secret(); +``` + +## What to Protect + +Wrap with `Secret<T>` when the value is: + +- API tokens (REST API admin token, external service tokens) +- Passwords (database credentials, service accounts) +- Private keys or certificates + +## Rules for `.expose_secret()` + +- Call **as late as possible** — only at the point where the value is required +- **Never** call in `log!`, `debug!`, `info!`, `warn!`, `error!` macros +- **Never** call in `Display` or `Debug` implementations +- **Never** include in error messages that may be logged or shown to users + +```rust +// ✅ Correct: called at last moment for HTTP header +let response = client + .get(url) + .header("Authorization", format!("Bearer {}", token.expose_secret())) + .send() + .await?; + +// ❌ Wrong: exposed in log +tracing::debug!("Using token: {}", token.expose_secret()); +``` + +## Checklist + +- [ ] No plain `String` fields for tokens, passwords, or private keys +- [ ] `Secret<String>` (or equivalent) used for all sensitive values +- [ ] `.expose_secret()` called only at the last moment +- [ ] No `.expose_secret()` in log statements or error messages +- [ ] No sensitive values in `Display` or `Debug` output diff --git a/.github/skills/dev/testing/write-unit-test/SKILL.md b/.github/skills/dev/testing/write-unit-test/SKILL.md new file mode 100644 index 000000000..5ba1a8381 --- /dev/null +++ b/.github/skills/dev/testing/write-unit-test/SKILL.md @@ -0,0 +1,221 @@ +--- +name: write-unit-test +description: Guide for writing unit tests following project conventions including behavior-driven naming (it*should*\*), AAA pattern, MockClock for deterministic time testing, and parameterized tests with rstest. Use when adding tests for domain entities, value objects, utilities, or tracker logic. Triggers on "write unit test", "add test", "test coverage", "unit testing", or "add unit tests". +metadata: + author: torrust + version: "1.0" +--- + +# Writing Unit Tests + +## Core Principles + +Unit tests in this project are written against the **Test Desiderata** — the 12 properties that +make tests valuable, defined by Kent Beck. Not every property applies equally to every test, but +treat them as the standard to reason about and optimize for. + +| Property | What it means | +| ------------------------- | ----------------------------------------------------------------------------------- | +| **Isolated** | Tests return the same result regardless of run order. No shared mutable state. | +| **Composable** | Different dimensions of variability can be tested separately and results combined. | +| **Deterministic** | Same inputs always produce the same result. No randomness, no wall-clock time. | +| **Fast** | Tests run in milliseconds. Unit tests must never block on I/O or sleep. | +| **Writable** | Writing the test should cost much less than writing the code it covers. | +| **Readable** | A reader can understand what behaviour is being tested and why, without context. | +| **Behavioral** | Tests are sensitive to changes in observable behaviour, not internal structure. | +| **Structure-insensitive** | Refactoring the implementation should not break tests that test the same behaviour. | +| **Automated** | Tests run without human intervention (`cargo test`). | +| **Specific** | When a test fails, the cause is immediately obvious from the failure message. | +| **Predictive** | Passing tests give genuine confidence the code is ready for production. | +| **Inspiring** | Passing the full suite inspires confidence to ship. | + +Some properties support each other (automation makes tests faster). Some trade off against each +other (more predictive tests tend to be slower). Use composability to resolve apparent conflicts. + +Reference: <https://testdesiderata.com/> and Kent Beck's original papers on +[Test Desiderata](https://medium.com/@kentbeck_7670/test-desiderata-94150638a4b3) and +[Programmer Test Principles](https://medium.com/@kentbeck_7670/programmer-test-principles-d01c064d7934). + +### Project-specific conventions + +- **Behavior-driven naming** — test names document what the code does +- **AAA Pattern** — Arrange → Act → Assert (clear structure) +- **Deterministic** — use `MockClock` instead of real time (see Phase 2) +- **Isolated** — no shared mutable state between tests +- **Fast** — unit tests run in milliseconds + +## Phase 1: Basic Unit Test + +### Naming Convention + +**Format**: `it_should_{expected_behavior}_when_{condition}` + +- Always use the `it_should_` prefix +- Never use the `test_` prefix +- Use `when_` or `given_` for conditions +- Be specific and descriptive + +```rust +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_should_return_error_when_info_hash_is_invalid() { + // Arrange + let invalid_hash = "not-a-valid-hash"; + + // Act + let result = InfoHash::from_str(invalid_hash); + + // Assert + assert!(result.is_err()); + } + + #[test] + fn it_should_parse_valid_info_hash() { + // Arrange + let valid_hex = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; + + // Act + let result = InfoHash::from_str(valid_hex); + + // Assert + assert!(result.is_ok()); + } +} +``` + +### Running Tests + +```bash +# Run all tests in a package +cargo test -p bittorrent-tracker-core + +# Run specific test by name +cargo test it_should_return_error_when_info_hash_is_invalid + +# Run tests in a module +cargo test info_hash::tests + +# Run with output +cargo test -- --nocapture +``` + +## Phase 2: Deterministic Time with `clock::Stopped` + +The `clock` workspace package provides `clock::Stopped` for deterministic time testing. +Never call `std::time::SystemTime::now()` or `chrono::Utc::now()` directly in production code +that needs testing. Instead, use the type-level clock abstraction. + +### Use the Type-Level Clock Alias + +Copy the following boilerplate into each crate that needs a clock. The `CurrentClock` alias +automatically selects `Working` in production and `Stopped` in tests: + +```rust +/// Working version, for production. +#[cfg(not(test))] +pub(crate) type CurrentClock = torrust_tracker_clock::clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +pub(crate) type CurrentClock = torrust_tracker_clock::clock::Stopped; +``` + +In production code, obtain the current time via the `Time` trait: + +```rust +use torrust_tracker_clock::clock::Time as _; + +pub fn is_peer_expired(last_seen: std::time::Duration, ttl: u32) -> bool { + let now = CurrentClock::now(); // returns DurationSinceUnixEpoch (= std::time::Duration) + now.saturating_sub(last_seen) > std::time::Duration::from_secs(u64::from(ttl)) +} +``` + +### Control Time in Tests + +Use `clock::Stopped::local_set` to pin the clock to a specific instant. The stopped clock is +thread-local, so tests are isolated from each other by default. + +```rust +#[cfg(test)] +mod tests { + use std::time::Duration; + + use torrust_tracker_clock::clock::{stopped::Stopped as _, Time as _}; + use torrust_tracker_clock::clock::Stopped; + + use super::*; + + #[test] + fn it_should_mark_peer_as_expired_when_ttl_has_elapsed() { + // Arrange — pin the clock to a known instant + let fixed_time = Duration::from_secs(1_700_000_100); + Stopped::local_set(&fixed_time); + + let last_seen = Duration::from_secs(1_700_000_000); + let ttl = 60u32; + + // Act + let expired = is_peer_expired(last_seen, ttl); + + // Assert + assert!(expired); + + // Clean up — reset to zero so other tests start from a clean state + Stopped::local_reset(); + } +} +``` + +> **Key points** +> +> - `Stopped::now()` defaults to `Duration::ZERO` at the start of each test thread. +> - `Stopped::local_set(&duration)` sets the current time for the calling thread only. +> - `Stopped::local_reset()` resets back to `Duration::ZERO`. +> - `Stopped::local_add(&duration)` advances the clock by the given amount. +> - Import the `Stopped` trait (`use …::stopped::Stopped as _`) to bring its methods into scope. + +## Phase 3: Parameterized Tests with rstest + +Use `rstest` for multiple input/output combinations to avoid repetition. + +```toml +[dev-dependencies] +rstest = { workspace = true } +``` + +```rust +use rstest::rstest; + +#[rstest] +#[case("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", true)] +#[case("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", true)] +#[case("not-a-hash", false)] +#[case("", false)] +fn it_should_validate_info_hash(#[case] input: &str, #[case] is_valid: bool) { + let result = InfoHash::from_str(input); + assert_eq!(result.is_ok(), is_valid, "input: {input}"); +} +``` + +## Phase 4: Test Helpers + +The `test-helpers` workspace package provides shared test utilities. + +```toml +[dev-dependencies] +torrust-tracker-test-helpers = { workspace = true } +``` + +Check the package for available mock servers, fixture generators, and utility types. + +## Quick Checklist + +- [ ] Test name uses `it_should_` prefix +- [ ] Test follows AAA pattern with comments (`// Arrange`, `// Act`, `// Assert`) +- [ ] No `std::time::SystemTime::now()` in production code — use the `CurrentClock` type alias instead +- [ ] No shared mutable state between tests +- [ ] `cargo test -p <package>` passes diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 9f51f3124..7e8ffa442 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -26,11 +26,11 @@ jobs: steps: - id: setup name: Setup Toolchain - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v4 - id: build name: Build - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: file: ./Containerfile push: false @@ -46,7 +46,7 @@ jobs: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: compose name: Compose @@ -80,9 +80,15 @@ jobs: echo "continue=true" >> $GITHUB_OUTPUT echo "On \`develop\` Branch, Type: \`development\`" - elif [[ $(echo "${{ github.ref }}" | grep -P '^(refs\/heads\/releases\/)(v)(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$') ]]; then + elif [[ "${{ github.ref }}" =~ ^refs/heads/releases/ ]]; then + semver_regex='^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-((0|[1-9][0-9]*|[0-9]*[A-Za-z-][0-9A-Za-z-]*)(\.(0|[1-9][0-9]*|[0-9]*[A-Za-z-][0-9A-Za-z-]*))*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$' + version=$(echo "${{ github.ref }}" | sed -n -E 's#^refs/heads/releases/##p') + + if [[ ! "$version" =~ $semver_regex ]]; then + echo "Not a valid release branch semver. Will Not Continue" + exit 0 + fi - version=$(echo "${{ github.ref }}" | sed -n -E 's/^(refs\/heads\/releases\/)//p') echo "version=$version" >> $GITHUB_OUTPUT echo "type=release" >> $GITHUB_OUTPUT echo "continue=true" >> $GITHUB_OUTPUT @@ -108,7 +114,7 @@ jobs: steps: - id: meta name: Docker Meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@v6 with: images: | "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" @@ -117,17 +123,17 @@ jobs: - id: login name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: username: ${{ secrets.DOCKER_HUB_USERNAME }} password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} - id: setup name: Setup Toolchain - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v4 - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: file: ./Containerfile push: true @@ -146,7 +152,7 @@ jobs: steps: - id: meta name: Docker Meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@v6 with: images: | "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" @@ -158,17 +164,17 @@ jobs: - id: login name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: username: ${{ secrets.DOCKER_HUB_USERNAME }} password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} - id: setup name: Setup Toolchain - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v4 - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: file: ./Containerfile push: true diff --git a/.github/workflows/contract.yaml b/.github/workflows/contract.yaml deleted file mode 100644 index 2777417e3..000000000 --- a/.github/workflows/contract.yaml +++ /dev/null @@ -1,58 +0,0 @@ -name: Contract - -on: - push: - pull_request: - -env: - CARGO_TERM_COLOR: always - -jobs: - contract: - name: Contract - runs-on: ubuntu-latest - - strategy: - matrix: - toolchain: [nightly, stable] - - steps: - - id: checkout - name: Checkout Repository - uses: actions/checkout@v4 - - - id: setup - name: Setup Toolchain - uses: dtolnay/rust-toolchain@stable - with: - toolchain: ${{ matrix.toolchain }} - components: llvm-tools-preview - - - id: cache - name: Enable Job Cache - uses: Swatinem/rust-cache@v2 - - - id: tools - name: Install Tools - uses: taiki-e/install-action@v2 - with: - tool: cargo-llvm-cov, cargo-nextest - - - id: pretty-test - name: Install pretty-test - run: cargo install cargo-pretty-test - - - id: contract - name: Run contract - run: | - cargo test --lib --bins - cargo pretty-test --lib --bins - - - id: summary - name: Generate contract Summary - run: | - echo "### Tracker Living Contract! :rocket:" >> $GITHUB_STEP_SUMMARY - cargo pretty-test --lib --bins --color=never >> $GITHUB_STEP_SUMMARY - echo '```console' >> $GITHUB_STEP_SUMMARY - echo "$OUTPUT" >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/copilot-setup-steps.yml b/.github/workflows/copilot-setup-steps.yml new file mode 100644 index 000000000..2017038b9 --- /dev/null +++ b/.github/workflows/copilot-setup-steps.yml @@ -0,0 +1,53 @@ +name: "Copilot Setup Steps" + +# Automatically run the setup steps when they are changed to allow for easy +# validation, and allow manual testing through the repository's "Actions" tab. +on: + workflow_dispatch: + push: + paths: + - .github/workflows/copilot-setup-steps.yml + - scripts/install-git-hooks.sh + - scripts/pre-commit.sh + pull_request: + paths: + - .github/workflows/copilot-setup-steps.yml + - scripts/install-git-hooks.sh + - scripts/pre-commit.sh + +jobs: + # The job MUST be called `copilot-setup-steps` or it will not be picked up + # by Copilot. + copilot-setup-steps: + runs-on: ubuntu-latest + timeout-minutes: 30 + + # Set the permissions to the lowest permissions possible needed for your + # steps. Copilot will be given its own token for its operations. + permissions: + contents: read + + steps: + - name: Checkout code + uses: actions/checkout@v6 + + - name: Set up Rust toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Enable Rust cache + uses: Swatinem/rust-cache@v2 + + - name: Build workspace + run: cargo build --workspace + + - name: Install linter + run: cargo install --locked --git https://github.com/torrust/torrust-linting --bin linter + + - name: Install cargo-machete + run: cargo install cargo-machete + + - name: Install Git pre-commit hooks + run: ./scripts/install-git-hooks.sh + + - name: Smoke-check — run all linters + run: linter all diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index e10c5ac66..ada96f77f 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Install LLVM tools run: sudo apt-get update && sudo apt-get install -y llvm @@ -44,14 +44,14 @@ jobs: - id: coverage name: Generate Coverage Report run: | - cargo clean + cargo clean cargo llvm-cov --all-features --workspace --codecov --output-path ./codecov.json - id: upload name: Upload Coverage Report - uses: codecov/codecov-action@v5 + uses: codecov/codecov-action@v6 with: verbose: true token: ${{ secrets.CODECOV_TOKEN }} files: ${{ github.workspace }}/codecov.json - fail_ci_if_error: true \ No newline at end of file + fail_ci_if_error: true diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 1422ec394..b544d1da2 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -17,7 +17,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -42,7 +42,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -73,8 +73,11 @@ jobs: cargo publish -p torrust-tracker-clock cargo publish -p torrust-tracker-configuration cargo publish -p torrust-tracker-contrib-bencode + cargo publish -p torrust-tracker-events cargo publish -p torrust-tracker-located-error + cargo publish -p torrust-tracker-metrics cargo publish -p torrust-tracker-primitives + cargo publish -p torrust-tracker-swarm-coordination-registry cargo publish -p torrust-tracker-test-helpers - cargo publish -p torrust-tracker-torrent-repository + cargo publish -p torrust-tracker-torrent-benchmarking cargo publish -p torrust-udp-tracker-server diff --git a/.github/workflows/generate_coverage_pr.yaml b/.github/workflows/generate_coverage_pr.yaml index d1b241b9d..e07a5a755 100644 --- a/.github/workflows/generate_coverage_pr.yaml +++ b/.github/workflows/generate_coverage_pr.yaml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Install LLVM tools run: sudo apt-get update && sudo apt-get install -y llvm @@ -44,7 +44,7 @@ jobs: - id: coverage name: Generate Coverage Report run: | - cargo clean + cargo clean cargo llvm-cov --all-features --workspace --codecov --output-path ./codecov.json - name: Store PR number and commit SHA @@ -59,13 +59,13 @@ jobs: # Triggered sub-workflow is not able to detect the original commit/PR which is available # in this workflow. - name: Store PR number - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v7 with: name: pr_number path: pr_number.txt - name: Store commit SHA - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v7 with: name: commit_sha path: commit_sha.txt @@ -74,7 +74,7 @@ jobs: # is executed by a different workflow `upload_coverage.yml`. The reason for this # split is because `on.pull_request` workflows don't have access to secrets. - name: Store coverage report in artifacts - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v7 with: name: codecov_report path: ./codecov.json diff --git a/.github/workflows/labels.yaml b/.github/workflows/labels.yaml index bb8283f30..a312c335f 100644 --- a/.github/workflows/labels.yaml +++ b/.github/workflows/labels.yaml @@ -25,7 +25,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: sync name: Apply Labels from File diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 671864fc9..173613ec3 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -15,7 +15,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -33,9 +33,10 @@ jobs: run: cargo fmt --check check: - name: Static Analysis + name: Linting runs-on: ubuntu-latest needs: format + timeout-minutes: 15 strategy: matrix: @@ -44,46 +45,32 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain uses: dtolnay/rust-toolchain@stable with: toolchain: ${{ matrix.toolchain }} - components: clippy + components: clippy, rustfmt + + - id: node + name: Setup Node.js + uses: actions/setup-node@v6 + with: + node-version: "20" - id: cache name: Enable Workflow Cache uses: Swatinem/rust-cache@v2 - id: tools - name: Install Tools - uses: taiki-e/install-action@v2 - with: - tool: cargo-machete - - - id: check - name: Run Build Checks - run: cargo check --tests --benches --examples --workspace --all-targets --all-features + name: Install Internal Linter + run: cargo install --locked --git https://github.com/torrust/torrust-linting --bin linter - id: lint - name: Run Lint Checks - run: cargo clippy --tests --benches --examples --workspace --all-targets --all-features - - - id: docs - name: Lint Documentation - env: - RUSTDOCFLAGS: "-D warnings" - run: cargo doc --no-deps --bins --examples --workspace --all-features - - - id: clean - name: Clean Build Directory - run: cargo clean - - - id: deps - name: Check Unused Dependencies - run: cargo machete + name: Run All Linters + run: linter all build: name: Build on ${{ matrix.os }} (${{ matrix.toolchain }}) @@ -96,7 +83,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -119,7 +106,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -173,7 +160,7 @@ jobs: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: test name: Run E2E Tests diff --git a/.github/workflows/upload_coverage_pr.yaml b/.github/workflows/upload_coverage_pr.yaml index 1ed2f7bcc..442afe31b 100644 --- a/.github/workflows/upload_coverage_pr.yaml +++ b/.github/workflows/upload_coverage_pr.yaml @@ -1,7 +1,7 @@ name: Upload Coverage Report (PR) on: - # This workflow is triggered after every successfull execution + # This workflow is triggered after every successful execution # of `Generate Coverage Report` workflow. workflow_run: workflows: ["Generate Coverage Report (PR)"] @@ -22,7 +22,7 @@ jobs: steps: - name: "Download existing coverage report" id: prepare_report - uses: actions/github-script@v7 + uses: actions/github-script@v9 with: script: | var fs = require('fs'); @@ -96,13 +96,13 @@ jobs: echo "override_commit=$(<commit_sha.txt)" >> "$GITHUB_OUTPUT" - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: ref: ${{ steps.parse_previous_artifacts.outputs.override_commit || '' }} path: repo_root - name: Upload coverage to Codecov - uses: codecov/codecov-action@v5 + uses: codecov/codecov-action@v6 with: verbose: true token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.gitignore b/.gitignore index 8bfa717b7..4b811d59f 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,8 @@ /tracker.toml callgrind.out codecov.json +integration_tests_sqlite3.db lcov.info perf.data* +repomix-output.xml rustc-ice-*.txt diff --git a/.markdownlint.json b/.markdownlint.json new file mode 100644 index 000000000..19ec47c2e --- /dev/null +++ b/.markdownlint.json @@ -0,0 +1,18 @@ +{ + "default": true, + "MD013": false, + "MD031": true, + "MD032": true, + "MD040": true, + "MD022": true, + "MD009": true, + "MD007": { + "indent": 2 + }, + "MD026": false, + "MD041": false, + "MD034": false, + "MD024": false, + "MD033": false, + "MD060": false +} diff --git a/.taplo.toml b/.taplo.toml new file mode 100644 index 000000000..0168711e8 --- /dev/null +++ b/.taplo.toml @@ -0,0 +1,27 @@ +# Taplo configuration file for TOML formatting +# Used by the "Even Better TOML" VS Code extension + +# Exclude generated and runtime folders from linting +exclude = [ ".coverage/**", "storage/**", "target/**" ] + +[formatting] +# Preserve blank lines that exist +allowed_blank_lines = 1 +# Don't reorder keys to maintain structure +reorder_keys = false +# Array formatting +array_auto_collapse = false +array_auto_expand = false +array_trailing_comma = true +# Inline table formatting +compact_arrays = false +compact_inline_tables = false +inline_table_expand = false +# Alignment +align_comments = true +align_entries = false +# Indentation +indent_entries = false +indent_tables = false +# Other +trailing_newline = true diff --git a/.vscode/mcp.json b/.vscode/mcp.json new file mode 100644 index 000000000..506a52259 --- /dev/null +++ b/.vscode/mcp.json @@ -0,0 +1,26 @@ +{ + "inputs": [ + { + "type": "promptString", + "id": "github_token", + "description": "GitHub Personal Access Token", + "password": true + } + ], + "servers": { + "github": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "ghcr.io/github/github-mcp-server" + ], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${input:github_token}" + } + } + } +} \ No newline at end of file diff --git a/.yamllint-ci.yml b/.yamllint-ci.yml new file mode 100644 index 000000000..9380b592a --- /dev/null +++ b/.yamllint-ci.yml @@ -0,0 +1,16 @@ +extends: default + +rules: + line-length: + max: 200 # More reasonable for infrastructure code + comments: + min-spaces-from-content: 1 # Allow single space before comments + document-start: disable # Most project YAML files don't require --- + truthy: + allowed-values: ["true", "false", "yes", "no", "on", "off"] # Allow common GitHub Actions values + +# Ignore generated/runtime directories +ignore: | + target/** + storage/** + .coverage/** diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 000000000..801bf8eef --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,389 @@ +# Torrust Tracker — AI Assistant Instructions + +**Repository**: [torrust/torrust-tracker](https://github.com/torrust/torrust-tracker) + +## 📋 Project Overview + +**Torrust Tracker** is a high-quality, production-grade BitTorrent tracker written in Rust. It +matchmakes peers and collects statistics, supporting the UDP, HTTP, and TLS socket types with +native IPv4/IPv6 support, private/whitelisted mode, and a management REST API. + +- **Language**: Rust (edition 2021, MSRV 1.72) +- **License**: AGPL-3.0-only +- **Version**: 3.0.0-develop +- **Web framework**: [Axum](https://github.com/tokio-rs/axum) +- **Async runtime**: Tokio +- **Protocols**: BitTorrent UDP (BEP 15), HTTP (BEP 3/23), REST management API +- **Databases**: SQLite3, MySQL +- **Workspace type**: Cargo workspace (multi-crate monorepo) + +## 🏗️ Tech Stack + +- **Languages**: Rust, YAML, TOML, Markdown, Shell scripts +- **Web framework**: Axum (HTTP server + REST API) +- **Async runtime**: Tokio (multi-thread) +- **Testing**: testcontainers (E2E) +- **Databases**: SQLite3, MySQL +- **Containerization**: Docker / Podman (`Containerfile`) +- **CI**: GitHub Actions +- **Linting tools**: markdownlint, yamllint, taplo, cspell, shellcheck, clippy, rustfmt (unified + under the `linter` binary from [torrust/torrust-linting](https://github.com/torrust/torrust-linting)) + +## 📁 Key Directories + +- `src/` — Main binary and library entry points (`main.rs`, `lib.rs`, `app.rs`, `container.rs`) +- `src/bin/` — Additional binary targets (`e2e_tests_runner`, `http_health_check`, `profiling`) +- `src/bootstrap/` — Application bootstrap logic +- `src/console/` — Console entry points +- `packages/` — Cargo workspace packages (all domain logic lives here; see package catalog below) +- `console/` — Console tools (e.g., `tracker-client`) +- `contrib/` — Community-contributed utilities (`bencode`) and developer tooling +- `contrib/dev-tools/` — Developer tools: git hooks (`pre-commit.sh`, `pre-push.sh`), + container scripts, and init scripts +- `tests/` — Integration tests (`integration.rs`, `servers/`) +- `docs/` — Project documentation, ADRs, issue specs, and benchmarking guides +- `docs/adrs/` — Architectural Decision Records +- `docs/issues/` — Issue specs / implementation plans +- `share/default/` — Default configuration files and fixtures +- `storage/` — Runtime data (git-ignored); databases, logs, config +- `.github/workflows/` — CI/CD workflows (testing, coverage, container, deployment) +- `.github/skills/` — Agent Skills for specialized workflows and task-specific guidance +- `.github/agents/` — Custom Copilot agents and their repository-specific definitions + +## 📦 Package Catalog + +All packages live under `packages/`. The workspace version is `3.0.0-develop`. + +| Package | Prefix / Layer | Description | +| --------------------------------- | -------------- | ------------------------------------------------ | +| `axum-server` | `axum-*` | Base Axum HTTP server infrastructure | +| `axum-http-tracker-server` | `axum-*` | BitTorrent HTTP tracker server (BEP 3/23) | +| `axum-rest-tracker-api-server` | `axum-*` | Management REST API server | +| `axum-health-check-api-server` | `axum-*` | Health monitoring endpoint | +| `http-tracker-core` | `*-core` | HTTP-specific tracker domain logic | +| `udp-tracker-core` | `*-core` | UDP-specific tracker domain logic | +| `tracker-core` | `*-core` | Central tracker peer-management logic | +| `http-protocol` | `*-protocol` | HTTP tracker protocol (BEP 3/23) parsing | +| `udp-protocol` | `*-protocol` | UDP tracker protocol (BEP 15) framing/parsing | +| `swarm-coordination-registry` | domain | Torrent/peer coordination registry | +| `configuration` | domain | Config file parsing, environment variables | +| `primitives` | domain | Core domain types (InfoHash, PeerId, …) | +| `clock` | utilities | Mockable time source for deterministic testing | +| `located-error` | utilities | Diagnostic errors with source locations | +| `test-helpers` | utilities | Mock servers, test data generation | +| `server-lib` | shared | Shared server library utilities | +| `tracker-client` | client tools | CLI tracker interaction/testing client | +| `rest-tracker-api-client` | client tools | REST API client library | +| `rest-tracker-api-core` | client tools | REST API core logic | +| `udp-tracker-server` | server | UDP tracker server implementation | +| `torrent-repository` | domain | Torrent metadata storage and InfoHash management | +| `events` | domain | Domain event definitions | +| `metrics` | domain | Prometheus metrics integration | +| `torrent-repository-benchmarking` | benchmarking | Torrent storage benchmarks | + +**Console tools** (under `console/`): + +| Tool | Description | +| ---------------- | ------------------------------------ | +| `tracker-client` | Client for interacting with trackers | + +**Community contributions** (under `contrib/`): + +| Crate | Description | +| --------- | ------------------------------- | +| `bencode` | Bencode encode/decode utilities | + +## 🏷️ Package Naming Conventions + +| Prefix | Responsibility | Dependencies | +| ------------ | -------------------------------------- | ------------------------ | +| `axum-*` | HTTP server components using Axum | Axum framework | +| `*-server` | Server implementations | Corresponding `*-core` | +| `*-core` | Domain logic and business rules | Protocol implementations | +| `*-protocol` | BitTorrent protocol implementations | BEP specifications | +| `udp-*` | UDP protocol-specific implementations | Tracker core | +| `http-*` | HTTP protocol-specific implementations | Tracker core | + +## 📄 Key Configuration Files + +| File | Used by | +| -------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | +| `.markdownlint.json` | markdownlint | +| `.yamllint-ci.yml` | yamllint | +| `.taplo.toml` | taplo (TOML formatting) | +| `cspell.json` | cspell (spell checker) configuration | +| `project-words.txt` | cspell project-specific dictionary | +| `rustfmt.toml` | rustfmt (`group_imports = "StdExternalCrate"`, `max_width = 130`) | +| `.cargo/config.toml` | Cargo aliases (`cov`, `cov-lcov`, `cov-html`, `time`) and global `rustflags` (`-D warnings`, `-D unused`, `-D rust-2018-idioms`, …) | +| `Cargo.toml` | Cargo workspace root | +| `compose.yaml` | Docker Compose for local dev and demo | +| `Containerfile` | Container image definition | +| `codecov.yaml` | Code coverage configuration | + +## 🧪 Build & Test + +### Setup + +```sh +rustup show # Check active toolchain +rustup update # Update toolchain +rustup toolchain install nightly # Optional: only needed for manual cargo +nightly doc; the repo hook runs ./scripts/pre-commit.sh +``` + +### Build + +```sh +cargo build # Build all workspace crates +cargo build --release # Release build +cargo build --package <pkg> # Build a specific package +``` + +### Test + +```sh +cargo test --doc --workspace # Documentation tests +cargo test --tests --benches --examples --workspace \ + --all-targets --all-features # All tests +cargo test -p <package-name> # Single package + +# MySQL-specific tests (requires a running MySQL instance) +TORRUST_TRACKER_CORE_RUN_MYSQL_DRIVER_TEST=true \ + cargo test --package bittorrent-tracker-core + +# Integration tests (root) +cargo test --test integration # tests/integration.rs +``` + +### E2E Tests + +```sh +cargo run --bin e2e_tests_runner -- \ + --config-toml-path "./share/default/config/tracker.e2e.container.sqlite3.toml" +``` + +### Documentation + +```sh +cargo +nightly doc --no-deps --bins --examples --workspace --all-features +``` + +### Benchmarks + +```sh +cargo bench --package torrent-repository-benchmarking +``` + +See [docs/benchmarking.md](docs/benchmarking.md) and [docs/profiling.md](docs/profiling.md). + +## 🔍 Lint Commands + +The project uses the `linter` binary from +[torrust/torrust-linting](https://github.com/torrust/torrust-linting). + +```sh +# Install the linter binary +cargo install --locked --git https://github.com/torrust/torrust-linting --bin linter + +# Run all linters (MANDATORY before every commit and PR) +linter all + +# Run individual linters +linter markdown # markdownlint +linter yaml # yamllint +linter toml # taplo +linter cspell # spell checker +linter clippy # Rust linter +linter rustfmt # Rust formatter check +linter shellcheck # shell scripts +``` + +**`linter all` must exit with code `0` before every commit. PRs that fail CI linting are +rejected without review.** + +## 🔗 Dependencies Check + +```sh +cargo machete # Check for unused dependencies (mandatory before commits) +``` + +Install via: `cargo install cargo-machete` + +## 🎨 Code Style + +- **rustfmt**: Format with `cargo fmt` before committing. Config: `rustfmt.toml` + (`group_imports = "StdExternalCrate"`, `imports_granularity = "Module"`, `max_width = 130`). +- **Compile flags**: `.cargo/config.toml` enables strict global `rustflags` (`-D warnings`, + `-D unused`, `-D rust-2018-idioms`, `-D future-incompatible`, and others). All code must + compile cleanly with these flags — no suppressions unless absolutely necessary. +- **clippy**: No warnings allowed (`cargo clippy -- -D warnings`). +- **Imports**: All imports at the top of the file, grouped (std → external crates → internal + crate). Prefer short imported names over fully-qualified paths + (e.g., `Arc<MyType>` not `std::sync::Arc<crate::my::MyType>`). Use full paths only to + disambiguate naming conflicts. +- **TOML**: Must pass `taplo fmt --check **/*.toml`. Auto-fix with `taplo fmt **/*.toml`. +- **Markdown**: Must pass markdownlint. +- **YAML**: Must pass `yamllint -c .yamllint-ci.yml`. +- **Spell checking**: Add new technical terms to `project-words.txt` (one word per line, + alphabetical order). + +## 🤝 Collaboration Principles + +These rules apply repository-wide to every assistant, including custom agents. + +When acting as an assistant in this repository: + +- Do not flatter the user or agree with weak ideas by default. +- Push back when a request, diff, or proposed commit looks wrong. +- Flag unclear but important points before they become problems. +- Ask a clarifying question instead of making a random choice when the decision matters. +- Call out likely misses: naming inconsistencies, accidental generated files, + staged-versus-unstaged mismatches, missing docs updates, or suspicious commit scope. + +When raising a likely mistake or blocker, say so clearly and early instead of burying it after +routine status updates. + +## 🔧 Essential Rules + +1. **Linting gate**: `linter all` must exit `0` before every commit. No exceptions. +2. **GPG commit signing**: All commits **must** be signed with GPG (`git commit -S`). +3. **Never commit `storage/` or `target/`**: These directories contain runtime data and build + artifacts. They are git-ignored; never force-add them. +4. **Unused dependencies**: Run `cargo machete` before committing. Remove any unused + dependencies immediately. +5. **Rust imports**: All imports at the top of the file, grouped (std → external crates → + internal crate). Prefer short imported names over fully-qualified paths. +6. **Continuous self-review**: Review your own work against project quality standards. Apply + self-review at three levels: + - **Mandatory** — before opening a pull request + - **Strongly recommended** — before each commit + - **Recommended** — after completing each small, independent, deployable change +7. **Security**: Do not report security vulnerabilities through public GitHub issues. Send an + email to `info@nautilus-cyberneering.de` instead. See [SECURITY.md](SECURITY.md). + +## 🌿 Git Workflow + +**Branch naming**: + +```text +<issue-number>-<short-description> # e.g. 1697-ai-agent-configuration (preferred) +feat/<short-description> # for features without a tracked issue +fix/<short-description> # for bug fixes +chore/<short-description> # for maintenance tasks +``` + +**Commit messages** follow [Conventional Commits](https://www.conventionalcommits.org/): + +```text +feat(<scope>): add X +fix(<scope>): resolve Y +chore(<scope>): update Z +docs(<scope>): document W +refactor(<scope>): restructure V +ci(<scope>): adjust pipeline U +test(<scope>): add tests for T +``` + +Scope should reflect the affected package or area (e.g., `tracker-core`, `udp-protocol`, `ci`, `docs`). + +**Branch strategy**: + +- Feature branches are cut from `develop` +- PRs target `develop` +- `develop` → `staging/main` → `main` (release pipeline) +- PRs must pass all CI status checks before merge + +See [docs/release_process.md](docs/release_process.md) for the full release workflow. + +## 🧭 Development Principles + +For detailed information see [`docs/`](docs/). + +**Core Principles:** + +- **Observability**: If it happens, we can see it — even after it happens (deep traceability) +- **Testability**: Every component must be testable in isolation and as part of the whole +- **Modularity**: Clear package boundaries; servers contain only network I/O logic +- **Extensibility**: Core logic is framework-agnostic for easy protocol additions + +**Code Quality Standards** — both production and test code must be: + +- **Clean**: Well-structured with clear naming and minimal complexity +- **Maintainable**: Easy to modify and extend without breaking existing functionality +- **Readable**: Clear intent that can be understood by other developers +- **Testable**: Designed to support comprehensive testing at all levels + +**Beck's Four Rules of Simple Design** (in priority order): + +1. **Passes the tests**: The code must work as intended — testing is a first-class activity +2. **Reveals intention**: Code should be easy to understand, expressing purpose clearly +3. **No duplication**: Apply DRY — eliminating duplication drives out good designs +4. **Fewest elements**: Remove anything that doesn't serve the prior three rules + +Reference: [Beck Design Rules](https://martinfowler.com/bliki/BeckDesignRules.html) + +## 🐳 Container / Docker + +```sh +# Run the latest image +docker run -it torrust/tracker:latest +# or with Podman +podman run -it docker.io/torrust/tracker:latest + +# Build and run via Docker Compose +docker compose up -d # Start all services (detached) +docker compose logs -f tracker # Follow tracker logs +docker compose down # Stop and remove containers +``` + +**Volume mappings** (local `storage/` → container paths): + +```text +./storage/tracker/lib → /var/lib/torrust/tracker +./storage/tracker/log → /var/log/torrust/tracker +./storage/tracker/etc → /etc/torrust/tracker +``` + +**Ports**: UDP tracker: `6969`, HTTP tracker: `7070`, REST API: `1212` + +See [docs/containers.md](docs/containers.md) for detailed container documentation. + +## 🎯 Auto-Invoke Skills + +Agent Skills live under [`.github/skills/`](.github/skills/). Each skill is a `SKILL.md` file +with YAML frontmatter and Markdown instructions covering a repeatable workflow. + +> Skills supplement (not replace) the rules in this file. Rules apply always; skills activate +> when their workflows are needed. + +**For VS Code**: Enable `chat.useAgentSkills` in settings to activate skill discovery. + +**Learn more**: See [Agent Skills Specification (agentskills.io)](https://agentskills.io/specification). + +## 📚 Documentation + +- [Documentation Index](docs/index.md) +- [Package Architecture](docs/packages.md) +- [Benchmarking](docs/benchmarking.md) +- [Profiling](docs/profiling.md) +- [Containers](docs/containers.md) +- [Release Process](docs/release_process.md) +- [ADRs](docs/adrs/README.md) +- [Issues / Implementation Plans](docs/issues/) +- [API docs (docs.rs)](https://docs.rs/torrust-tracker/) +- [Report a security vulnerability](SECURITY.md) + +### Quick Navigation + +| Task | Start Here | +| ------------------------------------ | ---------------------------------------------------- | +| Understand the architecture | [`docs/packages.md`](docs/packages.md) | +| Run the tracker in a container | [`docs/containers.md`](docs/containers.md) | +| Read all docs | [`docs/index.md`](docs/index.md) | +| Understand an architectural decision | [`docs/adrs/README.md`](docs/adrs/README.md) | +| Read or write an issue spec | [`docs/issues/`](docs/issues/) | +| Run benchmarks | [`docs/benchmarking.md`](docs/benchmarking.md) | +| Run profiling | [`docs/profiling.md`](docs/profiling.md) | +| Understand the release process | [`docs/release_process.md`](docs/release_process.md) | +| Report a security vulnerability | [`SECURITY.md`](SECURITY.md) | +| Agent skills reference | [`.github/skills/`](.github/skills/) | +| Custom agents reference | [`.github/agents/`](.github/agents/) | diff --git a/Cargo.lock b/Cargo.lock index 1a6a09244..bb8a972b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,18 +4,18 @@ version = 3 [[package]] name = "addr2line" -version = "0.24.2" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" dependencies = [ "gimli", ] [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "ahash" @@ -23,16 +23,16 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.17", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] @@ -53,16 +53,19 @@ dependencies = [ ] [[package]] -name = "allocator-api2" -version = "0.2.21" +name = "alloca" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +checksum = "e5a7d05ea6aea7e9e64d25b9156ba2fee3fdd659e34e41063cd2fc7cd020d7f4" +dependencies = [ + "cc", +] [[package]] -name = "android-tzdata" -version = "0.1.1" +name = "allocator-api2" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "android_system_properties" @@ -81,9 +84,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.18" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "824a212faf96e9acacdbd09febd34438f8f711fb84e09a8916013cd7815ca28d" dependencies = [ "anstyle", "anstyle-parse", @@ -96,44 +99,53 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "940b3a0ca603d1eade50a4846a2afffd5ef57a9feac2c0e2ec2e14f9ead76000" [[package]] name = "anstyle-parse" -version = "0.2.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +checksum = "52ce7f38b242319f7cabaa6813055467063ecdc9d355bbb4ce0c68908cd8130e" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.2" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.7" +version = "3.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", - "once_cell", - "windows-sys 0.59.0", + "once_cell_polyfill", + "windows-sys 0.61.2", ] [[package]] name = "anyhow" -version = "1.0.97" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" + +[[package]] +name = "approx" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" +checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" +dependencies = [ + "num-traits", +] [[package]] name = "aquatic_peer_id" @@ -163,9 +175,12 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.7.1" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +checksum = "6a3a1fd6f75306b68087b831f025c712524bcb19aad54e557b1129cfa0a2b207" +dependencies = [ + "rustversion", +] [[package]] name = "arrayvec" @@ -173,6 +188,22 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +[[package]] +name = "astral-tokio-tar" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c23f3af104b40a3430ccb90ed5f7bd877a8dc5c26fc92fde51a22b40890dcf9" +dependencies = [ + "filetime", + "futures-core", + "libc", + "portable-atomic", + "rustc-hash", + "tokio", + "tokio-stream", + "xattr", +] + [[package]] name = "async-attributes" version = "1.1.2" @@ -196,9 +227,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" dependencies = [ "concurrent-queue", "event-listener-strategy", @@ -208,30 +239,27 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.20" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310c9bcae737a48ef5cdee3174184e6d548b292739ede61a1f955ef76a738861" +checksum = "d0f9ee0f6e02ffd7ad5816e9464499fba7b3effd01123b515c41d1697c43dad1" dependencies = [ - "brotli", - "flate2", - "futures-core", - "memchr", + "compression-codecs", + "compression-core", "pin-project-lite", "tokio", - "zstd", - "zstd-safe", ] [[package]] name = "async-executor" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" +checksum = "c96bf972d85afc50bf5ab8fe2d54d1586b4e0b46c97c50a0c9e71e2f7bcd812a" dependencies = [ "async-task", "concurrent-queue", "fastrand", "futures-lite", + "pin-project-lite", "slab", ] @@ -241,7 +269,7 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel 2.3.1", + "async-channel 2.5.0", "async-executor", "async-io", "async-lock", @@ -253,11 +281,11 @@ dependencies = [ [[package]] name = "async-io" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" +checksum = "456b8a8feb6f42d237746d4b3e9a178494627745c3c56c6ea55d92ba50d026fc" dependencies = [ - "async-lock", + "autocfg", "cfg-if", "concurrent-queue", "futures-io", @@ -266,26 +294,25 @@ dependencies = [ "polling", "rustix", "slab", - "tracing", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "async-lock" -version = "3.4.0" +version = "3.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +checksum = "290f7f2596bd5b78a9fec8088ccd89180d7f9f55b94b0576823bbbdc72ee8311" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.4.1", "event-listener-strategy", "pin-project-lite", ] [[package]] name = "async-std" -version = "1.13.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" +checksum = "2c8e079a4ab67ae52b7403632e4618815d6db36d2a010cfe41b02c1b1578f93b" dependencies = [ "async-attributes", "async-channel 1.9.0", @@ -308,6 +335,28 @@ dependencies = [ "wasm-bindgen-futures", ] +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + [[package]] name = "async-task" version = "4.7.1" @@ -316,20 +365,20 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.87" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d556ec1359574147ec0c4fc5eb525f3f23263a592b1a9c07e0a75b427de55c97" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "atomic" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" +checksum = "a89cbf775b137e9b968e67227ef7f775587cde3fd31b0d8599dbd0f598a48340" dependencies = [ "bytemuck", ] @@ -342,15 +391,37 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "aws-lc-rs" +version = "1.16.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ec6fb3fe69024a75fa7e1bfb48aa6cf59706a101658ea01bfd33b2b248a038f" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "f50037ee5e1e41e7b8f9d161680a725bd1626cb6f8c7e901f91f942850852fe7" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] [[package]] name = "axum" -version = "0.8.1" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d6fd624c75e18b3b4c6b9caf42b1afe24437daaee904069137d8bab077be8b8" +checksum = "31b698c5f9a010f6573133b09e0de5408834d0c82f8d7475a89fc1867a71cd90" dependencies = [ "axum-core", "axum-macros", @@ -368,14 +439,13 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustversion", - "serde", + "serde_core", "serde_json", "serde_path_to_error", "serde_urlencoded", "sync_wrapper", "tokio", - "tower 0.5.2", + "tower", "tower-layer", "tower-service", "tracing", @@ -394,18 +464,17 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.5.0" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df1362f362fd16024ae199c1970ce98f9661bf5ef94b9808fee734bc3698b733" +checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" dependencies = [ "bytes", - "futures-util", + "futures-core", "http", "http-body", "http-body-util", "mime", "pin-project-lite", - "rustversion", "sync_wrapper", "tower-layer", "tower-service", @@ -414,68 +483,67 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.10.0" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "460fc6f625a1f7705c6cf62d0d070794e94668988b1c38111baeec177c715f7b" +checksum = "be44683b41ccb9ab2d23a5230015c9c3c55be97a25e4428366de8873103f7970" dependencies = [ "axum", "axum-core", "bytes", "form_urlencoded", + "futures-core", "futures-util", "http", "http-body", "http-body-util", "mime", "pin-project-lite", - "serde", + "serde_core", "serde_html_form", "serde_path_to_error", - "tower 0.5.2", "tower-layer", "tower-service", + "tracing", ] [[package]] name = "axum-macros" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" +checksum = "7aa268c23bfbbd2c4363b9cd302a4f504fb2a9dfe7e3451d66f35dd392e20aca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "axum-server" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56bac90848f6a9393ac03c63c640925c4b7c8ca21654de40d53f55964667c7d8" +checksum = "b1df331683d982a0b9492b38127151e6453639cd34926eb9c07d4cd8c6d22bfc" dependencies = [ "arc-swap", "bytes", - "futures-util", + "either", + "fs-err", "http", "http-body", - "http-body-util", "hyper", "hyper-util", "pin-project-lite", "rustls", - "rustls-pemfile", "rustls-pki-types", "tokio", "tokio-rustls", - "tower 0.4.13", "tower-service", ] [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" dependencies = [ "addr2line", "cfg-if", @@ -483,7 +551,16 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-targets 0.52.6", + "windows-link", +] + +[[package]] +name = "backtrace-ext" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "537beee3be4a18fb023b570f80e3ae28003db9167a751266b259926e25539d50" +dependencies = [ + "backtrace", ] [[package]] @@ -500,9 +577,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bigdecimal" -version = "0.4.7" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f31f3af01c5c65a07985c804d3366560e6fa7883d640a122819b14ec327482c" +checksum = "4d6867f1565b3aad85681f1015055b087fcfd840d6aeee6eee7f2da317603695" dependencies = [ "autocfg", "libm", @@ -519,11 +596,11 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.71.1" +version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.9.0", + "bitflags", "cexpr", "clang-sys", "itertools 0.13.0", @@ -532,7 +609,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] @@ -543,15 +620,9 @@ checksum = "02b4ff8b16e6076c3e14220b39fbc1fabb6737522281a388998046859400895f" [[package]] name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.9.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "c4512299f36f043ab09a583e57bceb5a5aab7a73db1805848e8fef3c9e8c78b3" [[package]] name = "bittorrent-http-tracker-core" @@ -561,12 +632,21 @@ dependencies = [ "bittorrent-http-tracker-protocol", "bittorrent-primitives", "bittorrent-tracker-core", + "criterion 0.5.1", + "formatjson", "futures", "mockall", - "thiserror 2.0.12", + "serde", + "serde_json", + "thiserror 2.0.18", "tokio", + "tokio-util", + "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-events", + "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", "tracing", ] @@ -583,7 +663,7 @@ dependencies = [ "percent-encoding", "serde", "serde_bencode", - "thiserror 2.0.12", + "thiserror 2.0.18", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-contrib-bencode", @@ -619,7 +699,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_repr", - "thiserror 2.0.12", + "thiserror 2.0.18", "tokio", "torrust-tracker-configuration", "torrust-tracker-located-error", @@ -641,19 +721,22 @@ dependencies = [ "r2d2", "r2d2_mysql", "r2d2_sqlite", - "rand 0.9.0", + "rand 0.10.1", "serde", "serde_json", "testcontainers", - "thiserror 2.0.12", + "thiserror 2.0.18", "tokio", + "tokio-util", "torrust-rest-tracker-api-client", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-events", "torrust-tracker-located-error", + "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "tracing", "url", ] @@ -669,14 +752,21 @@ dependencies = [ "bloom", "blowfish", "cipher", + "criterion 0.5.1", "futures", "lazy_static", "mockall", - "rand 0.9.0", - "thiserror 2.0.12", + "rand 0.10.1", + "serde", + "thiserror 2.0.18", "tokio", + "tokio-util", + "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-events", + "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", "tracing", "zerocopy 0.7.35", @@ -714,11 +804,11 @@ dependencies = [ [[package]] name = "blocking" -version = "1.6.1" +version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" +checksum = "e83f8d02be6967315521be875afa792a316e28d57b5a2d401897e2a7921b7f21" dependencies = [ - "async-channel 2.3.1", + "async-channel 2.5.0", "async-task", "futures-io", "futures-lite", @@ -736,9 +826,9 @@ dependencies = [ [[package]] name = "blowfish" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e412e2cd0f2b2d93e02543ceae7917b3c70331573df19ee046bcbc35e45e87d7" +checksum = "62ce3946557b35e71d1bbe07ec385073ce9eda05043f95de134eb578fcf1a298" dependencies = [ "byteorder", "cipher", @@ -746,11 +836,14 @@ dependencies = [ [[package]] name = "bollard" -version = "0.18.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ccca1260af6a459d75994ad5acc1651bcabcbdbc41467cc9786519ab854c30" +checksum = "ee04c4c84f1f811b017f2fbb7dd8815c976e7ca98593de9c1e2afad0f636bff4" dependencies = [ + "async-stream", "base64 0.22.1", + "bitflags", + "bollard-buildkit-proto", "bollard-stubs", "bytes", "futures-core", @@ -765,63 +858,85 @@ dependencies = [ "hyper-util", "hyperlocal", "log", + "num", "pin-project-lite", + "rand 0.9.4", "rustls", "rustls-native-certs", - "rustls-pemfile", "rustls-pki-types", "serde", "serde_derive", "serde_json", - "serde_repr", "serde_urlencoded", - "thiserror 2.0.12", + "thiserror 2.0.18", + "time", "tokio", + "tokio-stream", "tokio-util", + "tonic", "tower-service", "url", "winapi", ] +[[package]] +name = "bollard-buildkit-proto" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85a885520bf6249ab931a764ffdb87b0ceef48e6e7d807cfdb21b751e086e1ad" +dependencies = [ + "prost", + "prost-types", + "tonic", + "tonic-prost", + "ureq", +] + [[package]] name = "bollard-stubs" -version = "1.47.1-rc.27.3.1" +version = "1.52.1-rc.29.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f179cfbddb6e77a5472703d4b30436bff32929c0aa8a9008ecf23d1d3cdd0da" +checksum = "0f0a8ca8799131c1837d1282c3f81f31e76ceb0ce426e04a7fe1ccee3287c066" dependencies = [ + "base64 0.22.1", + "bollard-buildkit-proto", + "bytes", + "prost", "serde", + "serde_json", "serde_repr", - "serde_with", + "time", ] [[package]] name = "borsh" -version = "1.5.5" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5430e3be710b68d984d1391c854eb431a9d548640711faa54eecb1df93db91cc" +checksum = "cfd1e3f8955a5d7de9fab72fc8373fade9fb8a703968cb200ae3dc6cf08e185a" dependencies = [ "borsh-derive", + "bytes", "cfg_aliases", ] [[package]] name = "borsh-derive" -version = "1.5.5" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8b668d39970baad5356d7c83a86fee3a539e6f93bf6764c97368243e17a0487" +checksum = "bfcfdc083699101d5a7965e49925975f2f55060f94f9a05e7187be95d530ca59" dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "brotli" -version = "7.0.0" +version = "8.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc97b8f16f944bba54f0433f07e30be199b6dc2bd25937444bbad560bcea29bd" +checksum = "4bd8b9603c7aa97359dbd97ecf258968c95f3adddd6db2f7e7a5bef101c84560" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -830,9 +945,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.2" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74fa05ad7d803d413eb8380983b092cbbaf9a85f151b871360e7b00cd7060b37" +checksum = "874bb8112abecc98cbd6d81ea4fa7e94fb9449648c93cc89aa40c81c24d7de03" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -855,9 +970,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" [[package]] name = "bytecheck" @@ -883,9 +998,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.22.0" +version = "1.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b1fc10dbac614ebc03540c9dbd60e83887fda27794998c6528f1782047d540" +checksum = "c8efb64bd706a16a1bdde310ae86b351e4d21550d98d056f22f8a7f7a2183fec" [[package]] name = "byteorder" @@ -895,15 +1010,15 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.1" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" [[package]] name = "camino" -version = "1.1.9" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" +checksum = "dd0b03af37dad7a14518b7691d81acb0f8222604ad3d1b02f6b4bed5188c0cd5" dependencies = [ "serde", ] @@ -916,24 +1031,31 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "castaway" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" +checksum = "dec551ab6e7578819132c713a93c022a05d60159dc86e7a7050223577484c55a" dependencies = [ "rustversion", ] [[package]] name = "cc" -version = "1.2.16" +version = "1.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" +checksum = "43c5703da9466b66a946814e1adf53ea2c90f10063b86290cc9eb67ce3478a20" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cexpr" version = "0.6.0" @@ -945,9 +1067,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "cfg_aliases" @@ -955,13 +1077,23 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" +[[package]] +name = "chacha20" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f8d983286843e49675a4b7a2d174efe136dc93a18d69130dd18198a6c167601" +dependencies = [ + "cfg-if", + "cpufeatures 0.3.0", + "rand_core 0.10.1", +] + [[package]] name = "chrono" -version = "0.4.40" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" dependencies = [ - "android-tzdata", "iana-time-zone", "num-traits", "serde", @@ -997,11 +1129,11 @@ dependencies = [ [[package]] name = "cipher" -version = "0.4.4" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +checksum = "e34d8227fe1ba289043aeb13792056ff80fd6de1a9f49137a5f499de8e8c78ea" dependencies = [ - "crypto-common", + "crypto-common 0.2.1", "inout", ] @@ -1018,9 +1150,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.31" +version = "4.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "027bb0d98429ae334a8698531da7077bdf906419543a35a55c2cb1b66437d767" +checksum = "1ddb117e43bbf7dacf0a4190fef4d345b9bad68dfc649cb349e7d17d28428e51" dependencies = [ "clap_builder", "clap_derive", @@ -1028,9 +1160,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.31" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5589e0cba072e0f3d23791efac0fd8627b49c829c196a492e88168e6a669d863" +checksum = "714a53001bf66416adb0e2ef5ac857140e7dc3a0c48fb28b2f10762fc4b5069f" dependencies = [ "anstream", "anstyle", @@ -1040,36 +1172,46 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.28" +version = "4.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" +checksum = "f2ce8604710f6733aa641a2b3731eaa1e8b3d9973d5e3565da11800813f997a9" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "clap_lex" -version = "0.7.4" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9" [[package]] name = "cmake" -version = "0.1.54" +version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +checksum = "c0f78a02292a74a88ac736019ab962ece0bc380e3f977bf72e376c5d78ff0678" dependencies = [ "cc", ] [[package]] name = "colorchoice" -version = "1.0.3" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570" + +[[package]] +name = "combine" +version = "4.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] [[package]] name = "compact_str" @@ -1084,6 +1226,26 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "compression-codecs" +version = "0.4.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb7b51a7d9c967fc26773061ba86150f19c50c0d65c887cb1fbe295fd16619b7" +dependencies = [ + "brotli", + "compression-core", + "flate2", + "memchr", + "zstd", + "zstd-safe", +] + +[[package]] +name = "compression-core" +version = "0.4.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75984efb6ed102a0d42db99afb6c1948f0380d1d91808d5529916e6c08b49d8d" + [[package]] name = "concurrent-queue" version = "2.5.0" @@ -1093,6 +1255,15 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "convert_case" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -1105,9 +1276,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ "core-foundation-sys", "libc", @@ -1128,11 +1299,20 @@ dependencies = [ "libc", ] +[[package]] +name = "cpufeatures" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b2a41393f66f16b0823bb79094d54ac5fbd34ab292ddafb9a0456ac9f87d201" +dependencies = [ + "libc", +] + [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] @@ -1147,7 +1327,7 @@ dependencies = [ "cast", "ciborium", "clap", - "criterion-plot", + "criterion-plot 0.5.0", "futures", "is-terminal", "itertools 0.10.5", @@ -1165,6 +1345,32 @@ dependencies = [ "walkdir", ] +[[package]] +name = "criterion" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "950046b2aa2492f9a536f5f4f9a3de7b9e2476e575e05bd6c333371add4d98f3" +dependencies = [ + "alloca", + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot 0.8.2", + "itertools 0.13.0", + "num-traits", + "oorandom", + "page_size", + "plotters", + "rayon", + "regex", + "serde", + "serde_json", + "tinytemplate", + "tokio", + "walkdir", +] + [[package]] name = "criterion-plot" version = "0.5.0" @@ -1175,6 +1381,16 @@ dependencies = [ "itertools 0.10.5", ] +[[package]] +name = "criterion-plot" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8d80a2f4f5b554395e47b5d8305bc3d27813bacb73493eb1001e8f76dae29ea" +dependencies = [ + "cast", + "itertools 0.13.0", +] + [[package]] name = "crossbeam" version = "0.8.4" @@ -1190,9 +1406,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.14" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ] @@ -1243,53 +1459,96 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ "generic-array", "typenum", ] [[package]] -name = "darling" -version = "0.20.10" +name = "crypto-common" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +checksum = "77727bb15fa921304124b128af125e7e3b968275d1b108b379190264f4423710" dependencies = [ - "darling_core", - "darling_macro", + "hybrid-array", ] [[package]] -name = "darling_core" -version = "0.20.10" +name = "darling" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim", - "syn 2.0.99", + "darling_core 0.20.11", + "darling_macro 0.20.11", ] [[package]] -name = "darling_macro" -version = "0.20.10" +name = "darling" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d" +dependencies = [ + "darling_core 0.23.0", + "darling_macro 0.23.0", +] + +[[package]] +name = "darling_core" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.117", +] + +[[package]] +name = "darling_core" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0" +dependencies = [ + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.117", +] + +[[package]] +name = "darling_macro" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core 0.20.11", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "darling_macro" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" dependencies = [ - "darling_core", + "darling_core 0.23.0", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] @@ -1308,46 +1567,85 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" dependencies = [ "powerfmt", - "serde", + "serde_core", +] + +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling 0.20.11", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn 2.0.117", ] [[package]] name = "derive_more" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" dependencies = [ + "convert_case", "proc-macro2", "quote", - "syn 2.0.99", + "rustc_version", + "syn 2.0.117", "unicode-xid", ] [[package]] name = "derive_utils" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" +checksum = "362f47930db19fe7735f527e6595e4900316b893ebf6d48ad3d31be928d57dd6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + [[package]] name = "digest" version = "0.10.7" @@ -1355,7 +1653,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", - "crypto-common", + "crypto-common 0.1.7", ] [[package]] @@ -1366,14 +1664,14 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "docker_credential" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31951f49556e34d90ed28342e1df7e1cb7a229c4cab0aecc627b5d91edd41d07" +checksum = "1d89dfcba45b4afad7450a99b39e751590463e45c04728cf555d36bb66940de8" dependencies = [ "base64 0.21.7", "serde", @@ -1386,11 +1684,23 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + [[package]] name = "either" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "encoding_rs" @@ -1402,15 +1712,25 @@ dependencies = [ ] [[package]] -name = "env_logger" -version = "0.8.4" +name = "env_filter" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +checksum = "32e90c2accc4b07a8456ea0debdc2e7587bdd890680d71173a15d4ae604f6eef" dependencies = [ "log", "regex", ] +[[package]] +name = "env_logger" +version = "0.11.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0621c04f2196ac3f488dd583365b9c09be011a4ab8b9f37248ffcc8f6198b56a" +dependencies = [ + "env_filter", + "log", +] + [[package]] name = "equivalent" version = "1.0.2" @@ -1419,23 +1739,22 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.10" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "etcetera" -version = "0.8.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +checksum = "de48cc4d1c1d97a20fd819def54b890cadde72ed3ad0c614822a0a433361be96" dependencies = [ "cfg-if", - "home", - "windows-sys 0.48.0", + "windows-sys 0.61.2", ] [[package]] @@ -1446,9 +1765,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.4.0" +version = "5.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" dependencies = [ "concurrent-queue", "parking", @@ -1457,11 +1776,11 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.4.1", "pin-project-lite", ] @@ -1479,9 +1798,20 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.3.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +checksum = "9f1f227452a390804cdb637b74a86990f2a7d7ba4b7d5693aac9b4dd6defd8d6" + +[[package]] +name = "ferroid" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee93edf3c501f0035bbeffeccfed0b79e14c311f12195ec0e661e114a0f60da4" +dependencies = [ + "portable-atomic", + "rand 0.10.1", + "web-time", +] [[package]] name = "figment" @@ -1494,28 +1824,33 @@ dependencies = [ "pear", "serde", "tempfile", - "toml", + "toml 0.8.23", "uncased", "version_check", ] [[package]] name = "filetime" -version = "0.2.25" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" +checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" dependencies = [ "cfg-if", "libc", "libredox", - "windows-sys 0.59.0", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" + [[package]] name = "flate2" -version = "1.1.0" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" +checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" dependencies = [ "crc32fast", "libz-sys", @@ -1530,9 +1865,15 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.4" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foldhash" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" [[package]] name = "foreign-types" @@ -1551,13 +1892,23 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ "percent-encoding", ] +[[package]] +name = "formatjson" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d3ba17cfe2aff8969f35b2bffec13b34756c51ea53eadcc5d5446f71370e2ed" +dependencies = [ + "miette", + "thiserror 1.0.69", +] + [[package]] name = "forwarded-header-value" version = "0.1.1" @@ -1570,15 +1921,18 @@ dependencies = [ [[package]] name = "fragile" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" +checksum = "8878864ba14bb86e818a412bfd6f18f9eabd4ec0f008a28e8f7eb61db532fcf9" +dependencies = [ + "futures-core", +] [[package]] name = "frunk" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "874b6a17738fc273ec753618bac60ddaeac48cb1d7684c3e7bd472e57a28b817" +checksum = "28aef0f9aa070bce60767c12ba9cb41efeaf1a2bc6427f87b7d83f11239a16d7" dependencies = [ "frunk_core", "frunk_derives", @@ -1588,48 +1942,64 @@ dependencies = [ [[package]] name = "frunk_core" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3529a07095650187788833d585c219761114005d5976185760cf794d265b6a5c" +checksum = "476eeaa382e3462b84da5d6ba3da97b5786823c2d0d3a0d04ef088d073da225c" dependencies = [ "serde", ] [[package]] name = "frunk_derives" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" +checksum = "a0b4095fc99e1d858e5b8c7125d2638372ec85aa0fe6c807105cf10b0265ca6c" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "frunk_proc_macro_helpers" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05a956ef36c377977e512e227dcad20f68c2786ac7a54dacece3746046fea5ce" +checksum = "1952b802269f2db12ab7c0bd328d0ae8feaabf19f352a7b0af7bb0c5693abfce" dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "frunk_proc_macros" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67e86c2c9183662713fea27ea527aad20fb15fee635a71081ff91bf93df4dc51" +checksum = "3462f590fa236005bd7ca4847f81438bd6fe0febd4d04e11968d4c2e96437e78" dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.99", + "syn 2.0.117", +] + +[[package]] +name = "fs-err" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73fde052dbfc920003cfd2c8e2c6e6d4cc7c1091538c3a24226cec0665ab08c0" +dependencies = [ + "autocfg", + "tokio", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "funty" version = "2.0.0" @@ -1638,9 +2008,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" dependencies = [ "futures-channel", "futures-core", @@ -1653,9 +2023,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" dependencies = [ "futures-core", "futures-sink", @@ -1663,15 +2033,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" dependencies = [ "futures-core", "futures-task", @@ -1680,15 +2050,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" [[package]] name = "futures-lite" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" dependencies = [ "fastrand", "futures-core", @@ -1699,26 +2069,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" [[package]] name = "futures-timer" @@ -1728,9 +2098,9 @@ checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" dependencies = [ "futures-channel", "futures-core", @@ -1740,7 +2110,6 @@ dependencies = [ "futures-task", "memchr", "pin-project-lite", - "pin-utils", "slab", ] @@ -1756,38 +2125,68 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", + "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", + "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.3.1" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", + "js-sys", "libc", - "wasi 0.13.3+wasi-0.2.2", - "windows-targets 0.52.6", + "r-efi 5.3.0", + "wasip2", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" +dependencies = [ + "cfg-if", + "libc", + "r-efi 6.0.0", + "rand_core 0.10.1", + "wasip2", + "wasip3", +] + +[[package]] +name = "getset" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.117", ] [[package]] name = "gimli" -version = "0.31.1" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" [[package]] name = "glob" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "gloo-timers" @@ -1803,9 +2202,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.8" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" dependencies = [ "atomic-waker", "bytes", @@ -1813,7 +2212,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.1", + "indexmap 2.14.0", "slab", "tokio", "tokio-util", @@ -1822,12 +2221,13 @@ dependencies = [ [[package]] name = "half" -version = "2.4.1" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", + "zerocopy 0.8.48", ] [[package]] @@ -1847,22 +2247,37 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hashbrown" -version = "0.15.2" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "allocator-api2", "equivalent", - "foldhash", + "foldhash 0.1.5", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +dependencies = [ + "foldhash 0.2.0", ] +[[package]] +name = "hashbrown" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f467dd6dccf739c208452f8014c75c18bb8301b050ad1cfb27153803edb0f51" + [[package]] name = "hashlink" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" +checksum = "ea0b22561a9c04a7cb1a302c013e0259cd3b4bb619f145b32f72b8b4bcbed230" dependencies = [ - "hashbrown 0.15.2", + "hashbrown 0.16.1", ] [[package]] @@ -1873,9 +2288,9 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" @@ -1885,27 +2300,26 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcaaec4551594c969335c98c903c1397853d4198408ea609190f420500f6be71" +checksum = "e712f64ec3850b98572bffac52e2c6f282b29fe6c5fa6d42334b30be438d95c1" [[package]] name = "home" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" +checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "http" -version = "1.2.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" dependencies = [ "bytes", - "fnv", "itoa", ] @@ -1921,12 +2335,12 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", - "futures-util", + "futures-core", "http", "http-body", "pin-project-lite", @@ -1944,15 +2358,25 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +[[package]] +name = "hybrid-array" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3944cf8cf766b40e2a1a333ee5e9b563f854d5fa49d6a8ca2764e97c6eddb214" +dependencies = [ + "typenum", +] + [[package]] name = "hyper" -version = "1.6.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +checksum = "6299f016b246a94207e63da54dbe807655bf9e00044f73ded42c3ac5305fbcca" dependencies = [ + "atomic-waker", "bytes", "futures-channel", - "futures-util", + "futures-core", "h2", "http", "http-body", @@ -1982,54 +2406,55 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.5" +version = "0.27.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +checksum = "33ca68d021ef39cf6463ab54c1d0f5daf03377b70561305bb89a8f83aab66e0f" dependencies = [ - "futures-util", "http", "hyper", "hyper-util", "rustls", - "rustls-pki-types", "tokio", "tokio-rustls", "tower-service", ] [[package]] -name = "hyper-tls" -version = "0.6.0" +name = "hyper-timeout" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "bytes", - "http-body-util", "hyper", "hyper-util", - "native-tls", + "pin-project-lite", "tokio", - "tokio-native-tls", "tower-service", ] [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" dependencies = [ + "base64 0.22.1", "bytes", "futures-channel", "futures-util", "http", "http-body", "hyper", + "ipnet", + "libc", + "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.3", + "system-configuration", "tokio", "tower-service", "tracing", + "windows-registry", ] [[package]] @@ -2049,14 +2474,15 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.61" +version = "0.1.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", + "log", "wasm-bindgen", "windows-core", ] @@ -2072,21 +2498,23 @@ dependencies = [ [[package]] name = "icu_collections" -version = "1.5.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +checksum = "2984d1cd16c883d7935b9e07e44071dca8d917fd52ecc02c04d5fa0b5a3f191c" dependencies = [ "displaydoc", + "potential_utf", + "utf8_iter", "yoke", "zerofrom", "zerovec", ] [[package]] -name = "icu_locid" -version = "1.5.0" +name = "icu_locale_core" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +checksum = "92219b62b3e2b4d88ac5119f8904c10f8f61bf7e95b640d25ba3075e6cac2c29" dependencies = [ "displaydoc", "litemap", @@ -2096,97 +2524,65 @@ dependencies = [ ] [[package]] -name = "icu_locid_transform" -version = "1.5.0" +name = "icu_normalizer" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +checksum = "c56e5ee99d6e3d33bd91c5d85458b6005a22140021cc324cea84dd0e72cff3b4" dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", + "icu_collections", + "icu_normalizer_data", + "icu_properties", "icu_provider", - "tinystr", + "smallvec", "zerovec", ] [[package]] -name = "icu_locid_transform_data" -version = "1.5.0" +name = "icu_normalizer_data" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" +checksum = "da3be0ae77ea334f4da67c12f149704f19f81d1adf7c51cf482943e84a2bad38" [[package]] -name = "icu_normalizer" -version = "1.5.0" +name = "icu_properties" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "bee3b67d0ea5c2cca5003417989af8996f8604e34fb9ddf96208a033901e70de" dependencies = [ - "displaydoc", "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "utf16_iter", - "utf8_iter", - "write16", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" - -[[package]] -name = "icu_properties" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_locid_transform", + "icu_locale_core", "icu_properties_data", "icu_provider", - "tinystr", + "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "1.5.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" +checksum = "8e2bbb201e0c04f7b4b3e14382af113e17ba4f63e2c9d2ee626b720cbce54a14" [[package]] name = "icu_provider" -version = "1.5.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +checksum = "139c4cf31c8b5f33d7e199446eff9c1e02decfc2f0eec2c8d71f65befa45b421" dependencies = [ "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr", + "icu_locale_core", "writeable", "yoke", "zerofrom", + "zerotrie", "zerovec", ] [[package]] -name = "icu_provider_macros" -version = "1.5.0" +name = "id-arena" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.99", -] +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" [[package]] name = "ident_case" @@ -2196,9 +2592,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", @@ -2207,9 +2603,9 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ "icu_normalizer", "icu_properties", @@ -2228,13 +2624,14 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.1" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +checksum = "d466e9454f08e4a911e14806c24e16fba1b4c121d1ea474396f396069cf949d9" dependencies = [ "equivalent", - "hashbrown 0.15.2", + "hashbrown 0.17.0", "serde", + "serde_core", ] [[package]] @@ -2245,44 +2642,60 @@ checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" [[package]] name = "inout" -version = "0.1.4" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" +checksum = "4250ce6452e92010fdf7268ccc5d14faa80bb12fc741938534c58f16804e03c7" dependencies = [ - "generic-array", + "hybrid-array", ] [[package]] name = "io-enum" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d197db2f7ebf90507296df3aebaf65d69f5dce8559d8dbd82776a6cadab61bbf" +checksum = "7de9008599afe8527a8c9d70423437363b321649161e98473f433de802d76107" dependencies = [ "derive_utils", ] [[package]] name = "ipnet" -version = "2.11.0" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" + +[[package]] +name = "iri-string" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +checksum = "25e659a4bb38e810ebc252e53b5814ff908a8c58c2a9ce2fae1bbec24cbf4e20" +dependencies = [ + "memchr", + "serde", +] [[package]] name = "is-terminal" -version = "0.4.15" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] +[[package]] +name = "is_ci" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7655c9839580ee829dfacba1d1278c2b7883e50a277ff7541299489d6bdfdc45" + [[package]] name = "is_terminal_polyfill" -version = "1.70.1" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" [[package]] name = "itertools" @@ -2302,27 +2715,83 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" + +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys 0.3.1", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41a652e1f9b6e0275df1f15b32661cf0d4b78d4d87ddec5e0c3c20f097433258" +dependencies = [ + "jni-sys 0.4.1", +] + +[[package]] +name = "jni-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6377a88cb3910bee9b0fa88d4f42e1d2da8e79915598f65fb0c7ee14c878af2" +dependencies = [ + "jni-sys-macros", +] + +[[package]] +name = "jni-sys-macros" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "38c0b942f458fe50cdac086d2f946512305e5631e720728f2a61aabcd47a6264" +dependencies = [ + "quote", + "syn 2.0.117", +] [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ + "getrandom 0.3.4", "libc", ] [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "2964e92d1d9dc3364cae4d718d93f227e3abb088e747d92e0395bfdedf1c12ca" dependencies = [ + "cfg-if", + "futures-util", "once_cell", "wasm-bindgen", ] @@ -2342,44 +2811,51 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + [[package]] name = "libc" -version = "0.2.170" +version = "0.2.185" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" +checksum = "52ff2c0fe9bc6cb6b14a0592c2ff4fa9ceb83eea9db979b0487cd054946a2b8f" [[package]] name = "libloading" -version = "0.8.6" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-link", ] [[package]] name = "libm" -version = "0.2.11" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "libredox" -version = "0.1.3" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "e02f3bb43d335493c96bf3fd3a321600bf6bd07ed34bc64118e9293bdffea46c" dependencies = [ - "bitflags 2.9.0", + "bitflags", "libc", - "redox_syscall 0.5.10", + "plain", + "redox_syscall 0.7.4", ] [[package]] name = "libsqlite3-sys" -version = "0.31.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad8935b44e7c13394a179a438e0cebba0fe08fe01b54f152e29a93b5cf993fd4" +checksum = "b1f111c8c41e7c61a49cd34e44c7619462967221a6443b0ec299e0ac30cfb9b1" dependencies = [ "cc", "pkg-config", @@ -2388,9 +2864,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.21" +version = "1.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +checksum = "fc3a226e576f50782b3305c5ccf458698f92798987f551c6a02efe8276721e22" dependencies = [ "cc", "pkg-config", @@ -2399,43 +2875,41 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.15" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" +checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" [[package]] name = "litemap" -version = "0.7.5" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" +checksum = "92daf443525c4cce67b150400bc2316076100ce0b3686209eb8cf3c31612e6f0" [[package]] name = "local-ip-address" -version = "0.6.3" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" +checksum = "d4a59a0cb1c7f84471ad5cd38d768c2a29390d17f1ff2827cdf49bc53e8ac70b" dependencies = [ "libc", "neli", - "thiserror 1.0.69", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ - "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.26" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" dependencies = [ "value-bag", ] @@ -2446,9 +2920,15 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.2", + "hashbrown 0.15.5", ] +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + [[package]] name = "matchit" version = "0.8.4" @@ -2457,9 +2937,39 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "memchr" -version = "2.7.4" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + +[[package]] +name = "miette" +version = "7.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "5f98efec8807c63c752b5bd61f862c165c115b0a35685bdcfd9238c7aeb592b7" +dependencies = [ + "backtrace", + "backtrace-ext", + "cfg-if", + "miette-derive", + "owo-colors", + "supports-color", + "supports-hyperlinks", + "supports-unicode", + "terminal_size", + "textwrap", + "unicode-width 0.1.14", +] + +[[package]] +name = "miette-derive" +version = "7.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] [[package]] name = "mime" @@ -2475,29 +2985,30 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.5" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", + "simd-adler32", ] [[package]] name = "mio" -version = "1.0.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "wasi", + "windows-sys 0.61.2", ] [[package]] name = "mockall" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" +checksum = "f58d964098a5f9c6b63d0798e5372fd04708193510a7af313c22e9f29b7b620b" dependencies = [ "cfg-if", "downcast", @@ -2509,21 +3020,21 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" +checksum = "ca41ce716dda6a9be188b385aa78ee5260fc25cd3802cb2a8afdc6afbe6b6dbf" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "multimap" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" +checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" dependencies = [ "serde", ] @@ -2548,7 +3059,7 @@ dependencies = [ "percent-encoding", "serde", "serde_json", - "socket2", + "socket2 0.5.10", "twox-hash", "url", ] @@ -2559,14 +3070,14 @@ version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63c3512cf11487168e0e9db7157801bf5273be13055a9cc95356dc9e0035e49c" dependencies = [ - "darling", + "darling 0.20.11", "heck", "num-bigint", "proc-macro-crate", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", "termcolor", "thiserror 1.0.69", ] @@ -2580,7 +3091,7 @@ dependencies = [ "base64 0.21.7", "bigdecimal", "bindgen", - "bitflags 2.9.0", + "bitflags", "bitvec", "btoi", "byteorder", @@ -2594,7 +3105,7 @@ dependencies = [ "mysql-common-derive", "num-bigint", "num-traits", - "rand 0.8.5", + "rand 0.8.6", "regex", "rust_decimal", "saturating", @@ -2621,9 +3132,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.14" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +checksum = "465500e14ea162429d264d44189adc38b199b62b1c21eea9f69e4b73cb03bbf2" dependencies = [ "libc", "log", @@ -2631,34 +3142,38 @@ dependencies = [ "openssl-probe", "openssl-sys", "schannel", - "security-framework 2.11.1", + "security-framework", "security-framework-sys", "tempfile", ] [[package]] name = "neli" -version = "0.6.5" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9" +checksum = "22f9786d56d972959e1408b6a93be6af13b9c1392036c5c1fafa08a1b0c6ee87" dependencies = [ + "bitflags", "byteorder", + "derive_builder", + "getset", "libc", "log", "neli-proc-macros", + "parking_lot", ] [[package]] name = "neli-proc-macros" -version = "0.1.4" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe" +checksum = "05d8d08c6e98f20a62417478ebf7be8e1425ec9acecc6f63e22da633f6b71609" dependencies = [ "either", "proc-macro2", "quote", "serde", - "syn 1.0.109", + "syn 2.0.117", ] [[package]] @@ -2679,12 +3194,25 @@ checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" [[package]] name = "nu-ansi-term" -version = "0.46.0" +version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "overload", - "winapi", + "windows-sys 0.61.2", +] + +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", ] [[package]] @@ -2697,11 +3225,20 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + [[package]] name = "num-conv" -version = "0.1.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967" [[package]] name = "num-integer" @@ -2712,6 +3249,28 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.19" @@ -2723,32 +3282,38 @@ dependencies = [ [[package]] name = "object" -version = "0.36.7" +version = "0.37.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.20.3" +version = "1.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" + +[[package]] +name = "once_cell_polyfill" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" [[package]] name = "oorandom" -version = "11.1.4" +version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "openssl" -version = "0.10.71" +version = "0.10.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e14130c6a98cd258fdcb0fb6d744152343ff729cbfcb28c656a9d12b999fbcd" +checksum = "f38c4372413cdaaf3cc79dd92d29d7d9f5ab09b51b10dded508fb90bb70b9222" dependencies = [ - "bitflags 2.9.0", + "bitflags", "cfg-if", "foreign-types", "libc", @@ -2765,20 +3330,20 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "openssl-probe" -version = "0.1.6" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" [[package]] name = "openssl-sys" -version = "0.9.106" +version = "0.9.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd" +checksum = "13ce1245cd07fcc4cfdb438f7507b0c7e4f3849a69fd84d52374c66d83741bb6" dependencies = [ "cc", "libc", @@ -2787,10 +3352,20 @@ dependencies = [ ] [[package]] -name = "overload" -version = "0.1.1" +name = "owo-colors" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d211803b9b6b570f68772237e415a029d5a50c65d382910b879fb19d3271f94d" + +[[package]] +name = "page_size" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" +dependencies = [ + "libc", + "winapi", +] [[package]] name = "parking" @@ -2800,9 +3375,9 @@ checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", "parking_lot_core", @@ -2810,15 +3385,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.10", + "redox_syscall 0.5.18", "smallvec", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -2843,7 +3418,7 @@ dependencies = [ "regex", "regex-syntax", "structmeta", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] @@ -2866,24 +3441,24 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "pem" -version = "3.0.5" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" dependencies = [ "base64 0.22.1", - "serde", + "serde_core", ] [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "phf" @@ -2911,7 +3486,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ "phf_shared", - "rand 0.8.5", + "rand 0.8.6", ] [[package]] @@ -2925,29 +3500,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.10" +version = "1.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +checksum = "f1749c7ed4bcaf4c3d0a3efc28538844fb29bcdd7d2b67b2be7e20ba861ff517" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.10" +version = "1.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +checksum = "d9b20ed30f105399776b9c883e68e536ef602a16ae6f596d2c473591d6ad64c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "pin-project-lite" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" +checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" [[package]] name = "pin-utils" @@ -2957,9 +3532,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +checksum = "c835479a4443ded371d6c535cbfd8d31ad92c5d23ae9770a61bc155e4992a3c1" dependencies = [ "atomic-waker", "fastrand", @@ -2968,9 +3543,15 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.32" +version = "0.3.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19f132c84eca552bf34cab8ec81f1c1dcc229b811638f9d283dceabe58c5569e" + +[[package]] +name = "plain" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" [[package]] name = "plotters" @@ -3002,24 +3583,41 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.4" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" +checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218" dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi", "pin-project-lite", "rustix", - "tracing", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" + +[[package]] +name = "portable-atomic-util" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a106d1259c23fac8e543272398ae0e3c0b8d33c88ed73d0cc71b0f1d902618" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "potential_utf" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "0103b1cef7ec0cf76490e969665504990193874ea05c85ff9bab8b911d0a0564" +dependencies = [ + "zerovec", +] [[package]] name = "powerfmt" @@ -3029,18 +3627,18 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.7.35", + "zerocopy 0.8.48", ] [[package]] name = "predicates" -version = "3.1.3" +version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" +checksum = "ada8f2932f28a27ee7b70dd6c1c39ea0675c55a36879ab92f3a715eaa1e63cfe" dependencies = [ "anstyle", "predicates-core", @@ -3048,27 +3646,47 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" +checksum = "cad38746f3166b4031b1a0d39ad9f954dd291e7854fcc0eed52ee41a0b50d144" [[package]] name = "predicates-tree" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" +checksum = "d0de1b847b39c8131db0467e9df1ff60e6d0562ab8e9a16e568ad0fdb372e2f2" dependencies = [ "predicates-core", "termtree", ] +[[package]] +name = "pretty_assertions" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" +dependencies = [ + "diff", + "yansi", +] + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn 2.0.117", +] + [[package]] name = "proc-macro-crate" -version = "3.2.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +checksum = "e67ba7e9b2b56446f1d419b1d807906278ffa1a658a8a5d8a39dcb1f5a78614f" dependencies = [ - "toml_edit", + "toml_edit 0.25.11+spec-1.1.0", ] [[package]] @@ -3090,14 +3708,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] @@ -3110,11 +3728,43 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", "version_check", "yansi", ] +[[package]] +name = "prost" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "prost-types" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8991c4cbdb8bc5b11f0b074ffe286c30e523de90fee5ba8132f1399f23cb3dd7" +dependencies = [ + "prost", +] + [[package]] name = "ptr_meta" version = "0.1.4" @@ -3137,23 +3787,91 @@ dependencies = [ [[package]] name = "quickcheck" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" +checksum = "95c589f335db0f6aaa168a7cd27b1fc6920f5e1470c804f814d9cd6e62a0f70b" dependencies = [ "env_logger", "log", - "rand 0.8.5", + "rand 0.10.1", ] [[package]] -name = "quote" -version = "1.0.39" +name = "quinn" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ - "proc-macro2", -] + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2 0.6.3", + "thiserror 2.0.18", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098" +dependencies = [ + "aws-lc-rs", + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.4", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.18", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.6.3", + "tracing", + "windows-sys 0.60.2", +] + +[[package]] +name = "quote" +version = "1.0.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" [[package]] name = "r2d2" @@ -3178,9 +3896,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.26.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee025287c0188d75ae2563bcb91c9b0d1843cfc56e4bd3ab867597971b5cc256" +checksum = "5576df16239e4e422c4835c8ed00be806d4491855c7847dba60b7aa8408b469b" dependencies = [ "r2d2", "rusqlite", @@ -3195,9 +3913,9 @@ checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" [[package]] name = "rand" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +checksum = "5ca0ecfa931c29007047d1bc58e623ab12e5590e8c7cc53200d5202b69266d8a" dependencies = [ "libc", "rand_chacha 0.3.1", @@ -3206,13 +3924,23 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.0" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +checksum = "44c5af06bb1b7d3216d91932aed5265164bf384dc89cd6ba05cf59a35f5f76ea" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.3", - "zerocopy 0.8.21", + "rand_core 0.9.5", +] + +[[package]] +name = "rand" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2e8e8bcc7961af1fdac401278c6a831614941f6164ee3bf4ce61b7edb162207" +dependencies = [ + "chacha20", + "getrandom 0.4.2", + "rand_core 0.10.1", ] [[package]] @@ -3232,7 +3960,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -3241,23 +3969,29 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.17", ] [[package]] name = "rand_core" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.4", ] +[[package]] +name = "rand_core" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63b8176103e19a2643978565ca18b50549f6101881c443590420e4dc998a3c69" + [[package]] name = "rayon" -version = "1.10.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +checksum = "fb39b166781f92d482534ef4b4b1b2568f42613b53e5b6c160e24cfbfa30926d" dependencies = [ "either", "rayon-core", @@ -3265,9 +3999,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" dependencies = [ "crossbeam-deque", "crossbeam-utils", @@ -3275,27 +4009,47 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.3.5" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 1.3.2", + "bitflags", ] [[package]] name = "redox_syscall" -version = "0.5.10" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" +checksum = "f450ad9c3b1da563fb6948a8e0fb0fb9269711c9c73d9ea1de5058c79c8d643a" dependencies = [ - "bitflags 2.9.0", + "bitflags", +] + +[[package]] +name = "ref-cast" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", ] [[package]] name = "regex" -version = "1.11.1" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -3305,9 +4059,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" dependencies = [ "aho-corasick", "memchr", @@ -3316,9 +4070,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" [[package]] name = "relative-path" @@ -3337,57 +4091,54 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.12" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" +checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801" dependencies = [ "base64 0.22.1", "bytes", "encoding_rs", "futures-core", - "futures-util", "h2", "http", "http-body", "http-body-util", "hyper", "hyper-rustls", - "hyper-tls", "hyper-util", - "ipnet", "js-sys", "log", "mime", - "native-tls", - "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile", + "quinn", + "rustls", + "rustls-pki-types", + "rustls-platform-verifier", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", - "system-configuration", "tokio", - "tokio-native-tls", - "tower 0.5.2", + "tokio-rustls", + "tower", + "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "windows-registry", ] [[package]] name = "ring" -version = "0.17.11" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da5349ae27d3887ca812fb375b45a4fbb36d8d12d2df394968cd86e35683fe73" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.17", "libc", "untrusted", "windows-sys 0.52.0", @@ -3395,19 +4146,20 @@ dependencies = [ [[package]] name = "ringbuf" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "726bb493fe9cac765e8f96a144c3a8396bdf766dedad22e504b70b908dcbceb4" +checksum = "fe47b720588c8702e34b5979cb3271a8b1842c7cb6f57408efa70c779363488c" dependencies = [ "crossbeam-utils", "portable-atomic", + "portable-atomic-util", ] [[package]] name = "rkyv" -version = "0.7.45" +version = "0.7.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" +checksum = "2297bf9c81a3f0dc96bc9521370b88f054168c29826a75e89c55ff196e7ed6a1" dependencies = [ "bitvec", "bytecheck", @@ -3423,15 +4175,25 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.45" +version = "0.7.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" +checksum = "84d7b42d4b8d06048d3ac8db0eb31bcb942cbeb709f0b5f2b2ebde398d3038f5" dependencies = [ "proc-macro2", "quote", "syn 1.0.109", ] +[[package]] +name = "rsqlite-vfs" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a1f2315036ef6b1fbacd1972e8ee7688030b0a2121edfc2a6550febd41574d" +dependencies = [ + "hashbrown 0.16.1", + "thiserror 2.0.18", +] + [[package]] name = "rstest" version = "0.25.0" @@ -3440,10 +4202,21 @@ checksum = "6fc39292f8613e913f7df8fa892b8944ceb47c247b78e1b1ae2f09e019be789d" dependencies = [ "futures-timer", "futures-util", - "rstest_macros", + "rstest_macros 0.25.0", "rustc_version", ] +[[package]] +name = "rstest" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5a3193c063baaa2a95a33f03035c8a72b83d97a54916055ba22d35ed3839d49" +dependencies = [ + "futures-timer", + "futures-util", + "rstest_macros 0.26.1", +] + [[package]] name = "rstest_macros" version = "0.25.0" @@ -3458,51 +4231,71 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.99", + "syn 2.0.117", + "unicode-ident", +] + +[[package]] +name = "rstest_macros" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c845311f0ff7951c5506121a9ad75aec44d083c31583b2ea5a30bcb0b0abba0" +dependencies = [ + "cfg-if", + "glob", + "proc-macro-crate", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version", + "syn 2.0.117", "unicode-ident", ] [[package]] name = "rusqlite" -version = "0.33.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c6d5e5acb6f6129fe3f7ba0a7fc77bca1942cb568535e18e7bc40262baf3110" +checksum = "a0d2b0146dd9661bf67bb107c0bb2a55064d556eeb3fc314151b957f313bcd4e" dependencies = [ - "bitflags 2.9.0", + "bitflags", "fallible-iterator", "fallible-streaming-iterator", "hashlink", "libsqlite3-sys", "smallvec", + "sqlite-wasm-rs", ] [[package]] name = "rust_decimal" -version = "1.36.0" +version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b082d80e3e3cc52b2ed634388d436fe1f4de6af5786cc2de9ba9737527bdf555" +checksum = "2ce901f9a19d251159075a4c37af514c3b8ef99c22e02dd8c19161cf397ee94a" dependencies = [ "arrayvec", "borsh", "bytes", "num-traits", - "rand 0.8.5", + "rand 0.8.6", "rkyv", "serde", "serde_json", + "wasm-bindgen", ] [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" [[package]] name = "rustc-hash" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" +checksum = "94300abf3f1ae2e2b8ffb7b58043de3d399c73fa6f4b73826402a5c457614dbe" [[package]] name = "rustc_version" @@ -3515,23 +4308,25 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.44" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" dependencies = [ - "bitflags 2.9.0", + "bitflags", "errno", "libc", "linux-raw-sys", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "rustls" -version = "0.23.23" +version = "0.23.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" +checksum = "69f9466fb2c14ea04357e91413efb882e2a6d4a406e625449bc0a5d360d53a21" dependencies = [ + "aws-lc-rs", + "log", "once_cell", "ring", "rustls-pki-types", @@ -3542,37 +4337,60 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.2.0", + "security-framework", ] [[package]] -name = "rustls-pemfile" -version = "2.2.0" +name = "rustls-pki-types" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ - "rustls-pki-types", + "web-time", + "zeroize", ] [[package]] -name = "rustls-pki-types" -version = "1.11.0" +name = "rustls-platform-verifier" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" +dependencies = [ + "core-foundation 0.10.1", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.102.8" +version = "0.103.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +checksum = "8279bb85272c9f10811ae6a6c547ff594d6a7f3c6c6b02ee9726d1d0dcfcdd06" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -3580,15 +4398,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" [[package]] name = "same-file" @@ -3607,11 +4425,11 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.27" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "91c1b7e4904c873ef0710c1f407dde2e6287de2bebc1bbbf7d430bb7cbffd939" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -3623,6 +4441,30 @@ dependencies = [ "parking_lot", ] +[[package]] +name = "schemars" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "schemars" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2b42f36aa1cd011945615b92222f6bf73c599a102a300334cd7f8dbeec726cc" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -3637,25 +4479,12 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "security-framework" -version = "2.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" -dependencies = [ - "bitflags 2.9.0", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework" -version = "3.2.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" dependencies = [ - "bitflags 2.9.0", - "core-foundation 0.10.0", + "bitflags", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -3663,9 +4492,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.14.0" +version = "2.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" dependencies = [ "core-foundation-sys", "libc", @@ -3673,16 +4502,17 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "8a7852d02fc848982e0c167ef163aaff9cd91dc640ba85e263cb1ce46fae51cd" [[package]] name = "serde" -version = "1.0.218" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ + "serde_core", "serde_derive", ] @@ -3698,58 +4528,70 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.16" +version = "0.11.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "364fec0df39c49a083c9a8a18a23a6bcfd9af130fe9fe321d18520a0d113e09e" +checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8" dependencies = [ "serde", + "serde_core", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.218" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "serde_html_form" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" +checksum = "b2f2d7ff8a2140333718bb329f5c40fc5f0865b84c426183ce14c97d2ab8154f" dependencies = [ "form_urlencoded", - "indexmap 2.7.1", + "indexmap 2.14.0", "itoa", "ryu", - "serde", + "serde_core", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.14.0", "itoa", "memchr", - "ryu", "serde", + "serde_core", + "zmij", ] [[package]] name = "serde_path_to_error" -version = "0.1.17" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" dependencies = [ "itoa", "serde", + "serde_core", ] [[package]] @@ -3760,18 +4602,27 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "serde_spanned" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6662b5879511e06e8999a8a235d848113e942c9124f211511b16466ee2995f26" +dependencies = [ + "serde_core", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -3786,17 +4637,18 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.12.0" +version = "3.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" +checksum = "dd5414fad8e6907dbdd5bc441a50ae8d6e26151a03b1de04d89a5576de61d01f" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.7.1", - "serde", - "serde_derive", + "indexmap 2.14.0", + "schemars 0.9.0", + "schemars 1.2.1", + "serde_core", "serde_json", "serde_with_macros", "time", @@ -3804,14 +4656,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.12.0" +version = "3.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" +checksum = "d3db8978e608f1fe7357e211969fd9abdcae80bac1ba7a3369bb7eb6b404eb65" dependencies = [ - "darling", + "darling 0.23.0", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] @@ -3821,18 +4673,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", - "cpufeatures", + "cpufeatures 0.2.17", "digest", ] [[package]] name = "sha2" -version = "0.10.8" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", - "cpufeatures", + "cpufeatures 0.2.17", "digest", ] @@ -3853,13 +4705,20 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.2" +version = "1.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" dependencies = [ + "errno", "libc", ] +[[package]] +name = "simd-adler32" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "703d5c7ef118737c72f1af64ad2f6f8c5e1921f818cdcb97b8fe6fc69bf66214" + [[package]] name = "simdutf8" version = "0.1.5" @@ -3868,40 +4727,59 @@ checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] name = "siphasher" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] name = "slab" -version = "0.4.9" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" [[package]] name = "smallvec" -version = "1.14.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "socket2" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "sqlite-wasm-rs" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b2c760607300407ddeaee518acf28c795661b7108c75421303dbefb237d3a36" +dependencies = [ + "cc", + "js-sys", + "rsqlite-vfs", + "wasm-bindgen", +] + [[package]] name = "stable_deref_trait" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "static_assertions" @@ -3924,7 +4802,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] @@ -3935,14 +4813,14 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "subprocess" -version = "0.2.9" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2e86926081dda636c546d8c5e641661049d7562a68f5488be4a1f7f66f6086" +checksum = "2c56e8662b206b9892d7a5a3f2ecdbcb455d3d6b259111373b7e08b8055158a8" dependencies = [ "libc", "winapi", @@ -3954,6 +4832,27 @@ version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +[[package]] +name = "supports-color" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c64fc7232dd8d2e4ac5ce4ef302b1d81e0b80d055b9d77c7c4f51f6aa4c867d6" +dependencies = [ + "is_ci", +] + +[[package]] +name = "supports-hyperlinks" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e396b6523b11ccb83120b115a0b7366de372751aa6edf19844dfb13a6af97e91" + +[[package]] +name = "supports-unicode" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7401a30af6cb5818bb64852270bb722533397edcfc7344954a38f420819ece2" + [[package]] name = "syn" version = "1.0.109" @@ -3967,9 +4866,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.99" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e02e925281e18ffd9d640e234264753c43edc62d64b2d4cf898f1bc5e75f3fc2" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -3987,22 +4886,22 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "system-configuration" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" dependencies = [ - "bitflags 2.9.0", + "bitflags", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -4042,16 +4941,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.17.1" +version = "3.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e5a0acb1f3f55f65cc4a866c361b2fb2a0ff6366785ae6fbb5f85df07ba230" +checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" dependencies = [ - "cfg-if", "fastrand", - "getrandom 0.3.1", + "getrandom 0.4.2", "once_cell", "rustix", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -4063,6 +4961,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230a1b821ccbd75b185820a1f1ff7b14d21da1e442e22c0863ea5f08771a8874" +dependencies = [ + "rustix", + "windows-sys 0.61.2", +] + [[package]] name = "termtree" version = "0.5.1" @@ -4071,18 +4979,21 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.23.3" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a4f01f39bb10fc2a5ab23eb0d888b1e2bb168c157f61a1b98e6c501c639c74" +checksum = "bfd5785b5483672915ed5fe3cddf9f546802779fc1eceff0a6fb7321fac81c1e" dependencies = [ + "astral-tokio-tar", "async-trait", "bollard", - "bollard-stubs", "bytes", "docker_credential", "either", "etcetera", + "ferroid", "futures", + "http", + "itertools 0.14.0", "log", "memchr", "parse-display", @@ -4090,14 +5001,23 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.12", + "thiserror 2.0.18", "tokio", "tokio-stream", - "tokio-tar", "tokio-util", "url", ] +[[package]] +name = "textwrap" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" +dependencies = [ + "unicode-linebreak", + "unicode-width 0.2.2", +] + [[package]] name = "thiserror" version = "1.0.69" @@ -4109,11 +5029,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.12" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "thiserror-impl 2.0.12", + "thiserror-impl 2.0.18", ] [[package]] @@ -4124,56 +5044,55 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "thiserror-impl" -version = "2.0.12" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", - "once_cell", ] [[package]] name = "time" -version = "0.3.38" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb041120f25f8fbe8fd2dbe4671c7c2ed74d83be2e7a77529bf7e0790ae3f472" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", "itoa", "num-conv", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.3" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765c97a5b985b7c11d7bc27fa927dc4fe6af3a6dfb021d28deb60d3bf51e76ef" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.20" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8093bc3e81c3bc5f7879de09619d06c9a5a5e45ca44dfeeb7225bae38005c5c" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" dependencies = [ "num-conv", "time-core", @@ -4181,9 +5100,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.7.6" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "c8323304221c2a851516f22236c5722a72eaa19749016521d6dff0824447d96d" dependencies = [ "displaydoc", "zerovec", @@ -4201,9 +5120,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.9.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +checksum = "3e61e67053d25a4e82c844e8424039d9745781b3fc4f32b8d55ed50f5f667ef3" dependencies = [ "tinyvec_macros", ] @@ -4216,47 +5135,36 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.43.0" +version = "1.52.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "b67dee974fe86fd92cc45b7a95fdd2f99a36a6d7b0d431a231178d3d670bbcc6" dependencies = [ - "backtrace", "bytes", "libc", "mio", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.6.3", "tokio-macros", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "385a6cb71ab9ab790c5fe8d67f1645e6c450a7ce006a33de03daa956cf70a496" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", + "syn 2.0.117", ] [[package]] name = "tokio-rustls" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ "rustls", "tokio", @@ -4264,35 +5172,20 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" dependencies = [ "futures-core", "pin-project-lite", "tokio", ] -[[package]] -name = "tokio-tar" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5714c010ca3e5c27114c1cdeb9d14641ace49874aa5626d7149e47aedace75" -dependencies = [ - "filetime", - "futures-core", - "libc", - "redox_syscall 0.3.5", - "tokio", - "tokio-stream", - "xattr", -] - [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" dependencies = [ "bytes", "futures-core", @@ -4303,36 +5196,143 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.20" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", - "serde_spanned", - "toml_datetime", - "toml_edit", + "serde_spanned 0.6.9", + "toml_datetime 0.6.11", + "toml_edit 0.22.27", +] + +[[package]] +name = "toml" +version = "0.9.12+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" +dependencies = [ + "indexmap 2.14.0", + "serde_core", + "serde_spanned 1.1.1", + "toml_datetime 0.7.5+spec-1.1.0", + "toml_parser", + "toml_writer", + "winnow 0.7.15", ] [[package]] name = "toml_datetime" -version = "0.6.8" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" dependencies = [ "serde", ] +[[package]] +name = "toml_datetime" +version = "0.7.5+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_datetime" +version = "1.1.1+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3165f65f62e28e0115a00b2ebdd37eb6f3b641855f9d636d3cd4103767159ad7" +dependencies = [ + "serde_core", +] + [[package]] name = "toml_edit" -version = "0.22.24" +version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.14.0", "serde", - "serde_spanned", - "toml_datetime", - "winnow", + "serde_spanned 0.6.9", + "toml_datetime 0.6.11", + "toml_write", + "winnow 0.7.15", +] + +[[package]] +name = "toml_edit" +version = "0.25.11+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b59c4d22ed448339746c59b905d24568fcbb3ab65a500494f7b8c3e97739f2b" +dependencies = [ + "indexmap 2.14.0", + "toml_datetime 1.1.1+spec-1.1.0", + "toml_parser", + "winnow 1.0.1", +] + +[[package]] +name = "toml_parser" +version = "1.1.2+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2abe9b86193656635d2411dc43050282ca48aa31c2451210f4202550afb7526" +dependencies = [ + "winnow 1.0.1", +] + +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + +[[package]] +name = "toml_writer" +version = "1.1.1+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "756daf9b1013ebe47a8776667b466417e2d4c5679d441c26230efd9ef78692db" + +[[package]] +name = "tonic" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fec7c61a0695dc1887c1b53952990f3ad2e3a31453e1f49f10e75424943a93ec" +dependencies = [ + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "socket2 0.6.3", + "sync_wrapper", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-prost" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a55376a0bbaa4975a3f10d009ad763d8f4108f067c7c2e74f3001fb49778d309" +dependencies = [ + "bytes", + "prost", + "tonic", ] [[package]] @@ -4354,11 +5354,13 @@ dependencies = [ "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-udp-tracker-server", "tower-http", "tracing", "tracing-subscriber", + "url", ] [[package]] @@ -4378,20 +5380,23 @@ dependencies = [ "hyper", "local-ip-address", "percent-encoding", - "rand 0.9.0", + "rand 0.10.1", "reqwest", "serde", "serde_bencode", "serde_bytes", "serde_repr", "tokio", + "tokio-util", "torrust-axum-server", "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-events", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "tower 0.5.2", + "tower", "tower-http", "tracing", "uuid", @@ -4419,7 +5424,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.12", + "thiserror 2.0.18", "tokio", "torrust-axum-server", "torrust-rest-tracker-api-client", @@ -4427,10 +5432,12 @@ dependencies = [ "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", "torrust-udp-tracker-server", - "tower 0.5.2", + "tower", "tower-http", "tracing", "url", @@ -4448,12 +5455,12 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "thiserror 2.0.12", + "thiserror 2.0.18", "tokio", "torrust-server-lib", "torrust-tracker-configuration", "torrust-tracker-located-error", - "tower 0.5.2", + "tower", "tracing", ] @@ -4464,7 +5471,7 @@ dependencies = [ "hyper", "reqwest", "serde", - "thiserror 2.0.12", + "thiserror 2.0.18", "url", "uuid", ] @@ -4477,8 +5484,12 @@ dependencies = [ "bittorrent-tracker-core", "bittorrent-udp-tracker-core", "tokio", + "tokio-util", "torrust-tracker-configuration", + "torrust-tracker-events", + "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", "torrust-udp-tracker-server", ] @@ -4488,7 +5499,9 @@ name = "torrust-server-lib" version = "3.0.0-develop" dependencies = [ "derive_more", + "rstest 0.25.0", "tokio", + "torrust-tracker-primitives", "tower-http", "tracing", ] @@ -4500,19 +5513,22 @@ dependencies = [ "anyhow", "axum-server", "bittorrent-http-tracker-core", + "bittorrent-primitives", + "bittorrent-tracker-client", "bittorrent-tracker-core", "bittorrent-udp-tracker-core", "chrono", "clap", - "futures", "local-ip-address", "mockall", - "rand 0.9.0", + "rand 0.10.1", "regex", "reqwest", "serde", "serde_json", + "thiserror 2.0.18", "tokio", + "tokio-util", "torrust-axum-health-check-api-server", "torrust-axum-http-tracker-server", "torrust-axum-rest-tracker-api-server", @@ -4522,6 +5538,7 @@ dependencies = [ "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", "torrust-udp-tracker-server", "tracing", @@ -4545,7 +5562,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.18", "tokio", "torrust-tracker-configuration", "tracing", @@ -4573,8 +5590,8 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.12", - "toml", + "thiserror 2.0.18", + "toml 0.9.12+spec-1.1.0", "torrust-tracker-located-error", "tracing", "tracing-subscriber", @@ -4586,15 +5603,41 @@ dependencies = [ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ - "criterion", - "thiserror 2.0.12", + "criterion 0.8.2", + "thiserror 2.0.18", +] + +[[package]] +name = "torrust-tracker-events" +version = "3.0.0-develop" +dependencies = [ + "futures", + "mockall", + "tokio", ] [[package]] name = "torrust-tracker-located-error" version = "3.0.0-develop" dependencies = [ - "thiserror 2.0.12", + "thiserror 2.0.18", + "tracing", +] + +[[package]] +name = "torrust-tracker-metrics" +version = "3.0.0-develop" +dependencies = [ + "approx", + "chrono", + "derive_more", + "formatjson", + "pretty_assertions", + "rstest 0.25.0", + "serde", + "serde_json", + "thiserror 2.0.18", + "torrust-tracker-primitives", "tracing", ] @@ -4606,37 +5649,66 @@ dependencies = [ "binascii", "bittorrent-primitives", "derive_more", + "rstest 0.25.0", "serde", "tdyne-peer-id", "tdyne-peer-id-registry", - "thiserror 2.0.12", + "thiserror 2.0.18", "torrust-tracker-configuration", + "url", "zerocopy 0.7.35", ] +[[package]] +name = "torrust-tracker-swarm-coordination-registry" +version = "3.0.0-develop" +dependencies = [ + "aquatic_udp_protocol", + "async-std", + "bittorrent-primitives", + "chrono", + "criterion 0.8.2", + "crossbeam-skiplist", + "futures", + "mockall", + "rand 0.10.1", + "rstest 0.26.1", + "serde", + "thiserror 2.0.18", + "tokio", + "tokio-util", + "torrust-tracker-clock", + "torrust-tracker-configuration", + "torrust-tracker-events", + "torrust-tracker-metrics", + "torrust-tracker-primitives", + "torrust-tracker-test-helpers", + "tracing", +] + [[package]] name = "torrust-tracker-test-helpers" version = "3.0.0-develop" dependencies = [ - "rand 0.9.0", + "rand 0.10.1", "torrust-tracker-configuration", "tracing", "tracing-subscriber", ] [[package]] -name = "torrust-tracker-torrent-repository" +name = "torrust-tracker-torrent-repository-benchmarking" version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", "async-std", "bittorrent-primitives", - "criterion", + "criterion 0.8.2", "crossbeam-skiplist", "dashmap", "futures", "parking_lot", - "rstest", + "rstest 0.26.1", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", @@ -4658,15 +5730,19 @@ dependencies = [ "futures-util", "local-ip-address", "mockall", - "rand 0.9.0", + "rand 0.10.1", "ringbuf", - "thiserror 2.0.12", + "serde", + "thiserror 2.0.18", "tokio", + "tokio-util", "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", - "torrust-tracker-located-error", + "torrust-tracker-events", + "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", "tracing", "url", @@ -4676,30 +5752,18 @@ dependencies = [ [[package]] name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "pin-project", - "pin-project-lite", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", + "indexmap 2.14.0", "pin-project-lite", + "slab", "sync_wrapper", "tokio", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -4707,19 +5771,22 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.2" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ "async-compression", - "bitflags 2.9.0", + "bitflags", "bytes", "futures-core", + "futures-util", "http", "http-body", + "iri-string", "pin-project-lite", "tokio", "tokio-util", + "tower", "tower-layer", "tower-service", "tracing", @@ -4740,9 +5807,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "log", "pin-project-lite", @@ -4752,20 +5819,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", "valuable", @@ -4794,9 +5861,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.19" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319" dependencies = [ "nu-ansi-term", "serde", @@ -4822,15 +5889,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", - "rand 0.8.5", + "rand 0.8.6", "static_assertions", ] [[package]] name = "typenum" -version = "1.18.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +checksum = "40ce102ab67701b8526c123c1bab5cbe42d7040ccfd0f64af1a385808d2f43de" [[package]] name = "uncased" @@ -4843,9 +5910,33 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" + +[[package]] +name = "unicode-linebreak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" + +[[package]] +name = "unicode-segmentation" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9629274872b2bfaf8d66f5f15725007f635594914870f65218920345aa11aa8c" + +[[package]] +name = "unicode-width" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" + +[[package]] +name = "unicode-width" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" [[package]] name = "unicode-xid" @@ -4859,23 +5950,51 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +[[package]] +name = "ureq" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dea7109cdcd5864d4eeb1b58a1648dc9bf520360d7af16ec26d0a9354bafcfc0" +dependencies = [ + "base64 0.22.1", + "log", + "percent-encoding", + "rustls", + "rustls-pki-types", + "ureq-proto", + "utf8-zero", +] + +[[package]] +name = "ureq-proto" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e994ba84b0bd1b1b0cf92878b7ef898a5c1760108fe7b6010327e274917a808c" +dependencies = [ + "base64 0.22.1", + "http", + "httparse", + "log", +] + [[package]] name = "url" -version = "2.5.4" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", "percent-encoding", "serde", + "serde_derive", ] [[package]] -name = "utf16_iter" -version = "1.0.5" +name = "utf8-zero" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" +checksum = "b8c0a043c9540bae7c578c88f91dda8bd82e59ae27c21baca69c8b191aaf5a6e" [[package]] name = "utf8_iter" @@ -4891,12 +6010,14 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.15.1" +version = "1.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" +checksum = "ddd74a9687298c6858e9b88ec8935ec45d22e8fd5e6394fa1bd4e99a87789c76" dependencies = [ - "getrandom 0.3.1", - "rand 0.9.0", + "getrandom 0.4.2", + "js-sys", + "rand 0.10.1", + "wasm-bindgen", ] [[package]] @@ -4907,9 +6028,9 @@ checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "value-bag" -version = "1.10.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" +checksum = "7ba6f5989077681266825251a52748b8c1d8a4ad098cc37e440103d0ea717fc0" [[package]] name = "vcpkg" @@ -4944,63 +6065,57 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] -name = "wasi" -version = "0.13.3+wasi-0.2.2" +name = "wasip2" +version = "1.0.3+wasi-0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +checksum = "20064672db26d7cdc89c7798c48a0fdfac8213434a1186e5ef29fd560ae223d6" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen 0.57.1", ] [[package]] -name = "wasm-bindgen" -version = "0.2.100" +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" dependencies = [ - "cfg-if", - "once_cell", - "rustversion", - "wasm-bindgen-macro", + "wit-bindgen 0.51.0", ] [[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" +name = "wasm-bindgen" +version = "0.2.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "0bf938a0bacb0469e83c1e148908bd7d5a6010354cf4fb73279b7447422e3a89" dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.99", + "cfg-if", + "once_cell", + "rustversion", + "serde", + "wasm-bindgen-macro", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "f371d383f2fb139252e0bfac3b81b265689bf45b6874af544ffa4c975ac1ebf8" dependencies = [ - "cfg-if", "js-sys", - "once_cell", "wasm-bindgen", - "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "eeff24f84126c0ec2db7a449f0c2ec963c6a49efe0698c4242929da037ca28ed" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5008,36 +6123,89 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "9d08065faf983b2b80a79fd87d8254c409281cf7de75fc4b773019824196c904" dependencies = [ + "bumpalo", "proc-macro2", "quote", - "syn 2.0.99", - "wasm-bindgen-backend", + "syn 2.0.117", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "5fd04d9e306f1907bd13c6361b5c6bfc7b3b3c095ed3f8a9246390f8dbdee129" dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap 2.14.0", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap 2.14.0", + "semver", +] + [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "4f2dfbb17949fa2088e5d39408c48368947b86f7834484e87b73de55bc14d97d" dependencies = [ "js-sys", "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-root-certs" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31141ce3fc3e300ae89b78c0dd67f9708061d1d2eda54b8209346fd6be9a92c" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "winapi" version = "0.3.9" @@ -5056,11 +6224,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -5071,56 +6239,81 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.52.0" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ - "windows-targets 0.52.6", + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", ] [[package]] name = "windows-link" -version = "0.1.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-registry" -version = "0.2.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" dependencies = [ + "windows-link", "windows-result", "windows-strings", - "windows-targets 0.52.6", ] [[package]] name = "windows-result" -version = "0.2.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ - "windows-targets 0.52.6", + "windows-link", ] [[package]] name = "windows-strings" -version = "0.1.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ - "windows-result", - "windows-targets 0.52.6", + "windows-link", ] [[package]] name = "windows-sys" -version = "0.48.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.42.2", ] [[package]] @@ -5134,26 +6327,35 @@ dependencies = [ [[package]] name = "windows-sys" -version = "0.59.0" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", ] [[package]] name = "windows-targets" -version = "0.48.5" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] [[package]] @@ -5165,18 +6367,35 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", + "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.5" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" @@ -5184,11 +6403,17 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + [[package]] name = "windows_aarch64_msvc" -version = "0.48.5" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" @@ -5196,11 +6421,17 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + [[package]] name = "windows_i686_gnu" -version = "0.48.5" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" @@ -5208,17 +6439,29 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + [[package]] name = "windows_i686_msvc" -version = "0.48.5" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" @@ -5226,11 +6469,17 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + [[package]] name = "windows_x86_64_gnu" -version = "0.48.5" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" @@ -5238,11 +6487,17 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.5" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" @@ -5250,11 +6505,17 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + [[package]] name = "windows_x86_64_msvc" -version = "0.48.5" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" @@ -5262,35 +6523,129 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + [[package]] name = "winnow" -version = "0.7.3" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" +checksum = "df79d97927682d2fd8adb29682d1140b343be4ac0f08fd68b7765d9c059d3945" dependencies = [ "memchr", ] [[package]] -name = "wit-bindgen-rt" -version = "0.33.0" +name = "winnow" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +checksum = "09dac053f1cd375980747450bfc7250c264eaae0583872e845c0c7cd578872b5" dependencies = [ - "bitflags 2.9.0", + "memchr", ] [[package]] -name = "write16" -version = "1.0.0" +name = "wit-bindgen" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen" +version = "0.57.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" +checksum = "1ebf944e87a7c253233ad6766e082e3cd714b5d03812acc24c318f549614536e" + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap 2.14.0", + "prettyplease", + "syn 2.0.117", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.117", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap 2.14.0", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap 2.14.0", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] [[package]] name = "writeable" -version = "0.5.5" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +checksum = "1ffae5123b2d3fc086436f8834ae3ab053a283cfac8fe0a0b8eaae044768a4c4" [[package]] name = "wyz" @@ -5303,12 +6658,11 @@ dependencies = [ [[package]] name = "xattr" -version = "1.4.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e105d177a3871454f754b33bb0ee637ecaaac997446375fd3e5d43a2ed00c909" +checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" dependencies = [ "libc", - "linux-raw-sys", "rustix", ] @@ -5320,11 +6674,10 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.7.5" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +checksum = "abe8c5fda708d9ca3df187cae8bfb9ceda00dd96231bed36e445a1a48e66f9ca" dependencies = [ - "serde", "stable_deref_trait", "yoke-derive", "zerofrom", @@ -5332,13 +6685,13 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.5" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +checksum = "de844c262c8848816172cef550288e7dc6c7b7814b4ee56b3e1553f275f1858e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", "synstructure", ] @@ -5354,11 +6707,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.21" +version = "0.8.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf01143b2dd5d134f11f545cf9f1431b13b749695cb33bcce051e7568f99478" +checksum = "eed437bf9d6692032087e337407a86f04cd8d6a16a37199ed57949d415bd68e9" dependencies = [ - "zerocopy-derive 0.8.21", + "zerocopy-derive 0.8.48", ] [[package]] @@ -5369,52 +6722,63 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "zerocopy-derive" -version = "0.8.21" +version = "0.8.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712c8386f4f4299382c9abee219bee7084f78fb939d88b6840fcc1320d5f6da2" +checksum = "70e3cd084b1788766f53af483dd21f93881ff30d7320490ec3ef7526d203bad4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] [[package]] name = "zerofrom" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +checksum = "69faa1f2a1ea75661980b013019ed6687ed0e83d069bc1114e2cc74c6c04c4df" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +checksum = "11532158c46691caf0f2593ea8358fed6bbf68a0315e80aae9bd41fbade684a1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", "synstructure", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + +[[package]] +name = "zerotrie" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "0f9152d31db0792fa83f70fb2f83148effb5c1f5b8c7686c3459e361d9bc20bf" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] [[package]] name = "zerovec" -version = "0.10.4" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +checksum = "90f911cbc359ab6af17377d242225f4d75119aec87ea711a880987b18cd7b239" dependencies = [ "yoke", "zerofrom", @@ -5423,15 +6787,21 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.3" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +checksum = "625dc425cab0dca6dc3c3319506e6593dcb08a9f387ea3b284dbd52a92c40555" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.117", ] +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" + [[package]] name = "zstd" version = "0.13.3" @@ -5443,18 +6813,18 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.2.3" +version = "7.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.14+zstd.1.5.7" +version = "2.0.16+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index bcac4bf66..1eb5f0d35 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,13 +19,13 @@ version.workspace = true name = "torrust_tracker_lib" [workspace.package] -authors = ["Nautilus Cyberneering <info@nautilus-cyberneering.de>, Mick van Dijke <mick@dutchbits.nl>"] -categories = ["network-programming", "web-programming"] +authors = [ "Nautilus Cyberneering <info@nautilus-cyberneering.de>, Mick van Dijke <mick@dutchbits.nl>" ] +categories = [ "network-programming", "web-programming" ] description = "A feature rich BitTorrent tracker." documentation = "https://docs.rs/crate/torrust-tracker/" edition = "2021" homepage = "https://torrust.com/" -keywords = ["bittorrent", "file-sharing", "peer-to-peer", "torrent", "tracker"] +keywords = [ "bittorrent", "file-sharing", "peer-to-peer", "torrent", "tracker" ] license = "AGPL-3.0-only" publish = true repository = "https://github.com/torrust/torrust-tracker" @@ -34,19 +34,20 @@ version = "3.0.0-develop" [dependencies] anyhow = "1" -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "packages/http-tracker-core" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "packages/tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "packages/udp-tracker-core" } -chrono = { version = "0", default-features = false, features = ["clock"] } -clap = { version = "4", features = ["derive", "env"] } -futures = "0" +chrono = { version = "0", default-features = false, features = [ "clock" ] } +clap = { version = "4", features = [ "derive", "env" ] } rand = "0" regex = "1" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } +thiserror = "2.0.12" +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } +tokio-util = "0.7.15" torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "packages/axum-health-check-api-server" } torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "packages/axum-http-tracker-server" } torrust-axum-rest-tracker-api-server = { version = "3.0.0-develop", path = "packages/axum-rest-tracker-api-server" } @@ -55,18 +56,21 @@ torrust-rest-tracker-api-core = { version = "3.0.0-develop", path = "packages/re torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "packages/swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } tracing = "0" -tracing-subscriber = { version = "0", features = ["json"] } +tracing-subscriber = { version = "0", features = [ "json" ] } [dev-dependencies] +bittorrent-primitives = "0.1.0" +bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } local-ip-address = "0" mockall = "0" torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "packages/rest-tracker-api-client" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "packages/test-helpers" } [workspace] -members = ["console/tracker-client"] +members = [ "console/tracker-client", "packages/torrent-repository-benchmarking" ] [profile.dev] debug = 1 diff --git a/Containerfile b/Containerfile index 263053390..e926a5202 100644 --- a/Containerfile +++ b/Containerfile @@ -3,13 +3,13 @@ # Torrust Tracker ## Builder Image -FROM docker.io/library/rust:bookworm AS chef +FROM docker.io/library/rust:trixie AS chef WORKDIR /tmp RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash RUN cargo binstall --no-confirm cargo-chef cargo-nextest ## Tester Image -FROM docker.io/library/rust:slim-bookworm AS tester +FROM docker.io/library/rust:slim-trixie AS tester WORKDIR /tmp RUN apt-get update; apt-get install -y curl sqlite3; apt-get autoclean @@ -21,7 +21,7 @@ RUN mkdir -p /app/share/torrust/default/database/; \ sqlite3 /app/share/torrust/default/database/tracker.sqlite3.db "VACUUM;" ## Su Exe Compile -FROM docker.io/library/gcc:bookworm AS gcc +FROM docker.io/library/gcc:trixie AS gcc COPY ./contrib/dev-tools/su-exec/ /usr/local/src/su-exec/ RUN cc -Wall -Werror -g /usr/local/src/su-exec/su-exec.c -o /usr/local/bin/su-exec; chmod +x /usr/local/bin/su-exec @@ -91,7 +91,7 @@ RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin ## Runtime -FROM gcr.io/distroless/cc-debian12:debug AS runtime +FROM gcr.io/distroless/cc-debian13:debug AS runtime RUN ["/busybox/cp", "-sp", "/busybox/sh","/busybox/cat","/busybox/ls","/busybox/env", "/bin/"] COPY --from=gcc --chmod=0555 /usr/local/bin/su-exec /bin/su-exec diff --git a/README.md b/README.md index 33fc4a028..2fe28db08 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,24 @@ - [x] Support [newTrackon][newtrackon] checks. - [x] Persistent `SQLite3` or `MySQL` Databases. +## Tracker Demo + +Experience the **Torrust Tracker** in action with our comprehensive demo environment! The [Torrust Demo][torrust-demo] repository provides a complete setup showcasing the tracker's capabilities in a real-world scenario. + +The demo takes full advantage of the tracker's powerful metrics system and seamless integration with [Prometheus][prometheus]. This allows you to monitor tracker performance, peer statistics, and system health in real-time. You can build sophisticated Grafana dashboards to visualize all aspects of your tracker's operation. + +![Sample Grafana Dashboard](./docs/media/demo/torrust-tracker-grafana-dashboard.png) + +**Demo Features:** + +- Complete Docker Compose setup. +- Pre-configured Prometheus metrics collection. +- Sample Grafana dashboards for monitoring. +- Real-time tracker statistics and performance metrics. +- Easy deployment for testing and evaluation. + +Visit the [Torrust Demo repository][torrust-demo] to get started with your own tracker instance and explore the monitoring capabilities. + ## Roadmap Core: @@ -49,13 +67,13 @@ Utils: Others: -- [ ] Support for Windows. +- [ ] Intensive testing for Windows. - [ ] Docker images for other architectures. <https://github.com/orgs/torrust/projects/10/views/6> ## Implemented BitTorrent Enhancement Proposals (BEPs) -> + > _[Learn more about BitTorrent Enhancement Proposals][BEP 00]_ - [BEP 03]: The BitTorrent Protocol. @@ -95,8 +113,8 @@ podman run -it docker.io/torrust/tracker:develop ### Development Version -- Please ensure you have the _**[latest stable (or nightly) version of rust][rust]___. -- Please ensure that your computer has enough RAM. _**Recommended 16GB.___ +- Please ensure you have the \_\*\*[latest stable (or nightly) version of rust][rust]\_\_\_. +- Please ensure that your computer has enough RAM. \_\*\*Recommended 16GB.\_\_\_ #### Checkout, Test and Run @@ -199,7 +217,7 @@ This program is free software: you can redistribute it and/or modify it under th This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the [GNU Affero General Public License][AGPL_3_0] for more details. -You should have received a copy of the *GNU Affero General Public License* along with this program. If not, see <https://www.gnu.org/licenses/>. +You should have received a copy of the _GNU Affero General Public License_ along with this program. If not, see <https://www.gnu.org/licenses/>. Some files include explicit copyright notices and/or license notices. @@ -232,18 +250,14 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [deployment_wf_b]: ../../actions/workflows/deployment.yaml/badge.svg [testing_wf]: ../../actions/workflows/testing.yaml [testing_wf_b]: ../../actions/workflows/testing.yaml/badge.svg - [bittorrent]: http://bittorrent.org/ [rust]: https://www.rust-lang.org/ [axum]: https://github.com/tokio-rs/axum [newtrackon]: https://newtrackon.com/ [coverage]: https://app.codecov.io/gh/torrust/torrust-tracker [torrust]: https://torrust.com/ - [dockerhub]: https://hub.docker.com/r/torrust/tracker/tags - [torrent_source_felid]: https://github.com/qbittorrent/qBittorrent/discussions/19406 - [BEP 00]: https://www.bittorrent.org/beps/bep_0000.html [BEP 03]: https://www.bittorrent.org/beps/bep_0003.html [BEP 07]: https://www.bittorrent.org/beps/bep_0007.html @@ -251,26 +265,22 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [BEP 23]: https://www.bittorrent.org/beps/bep_0023.html [BEP 27]: https://www.bittorrent.org/beps/bep_0027.html [BEP 48]: https://www.bittorrent.org/beps/bep_0048.html - [containers.md]: ./docs/containers.md - [docs]: https://docs.rs/torrust-tracker/latest/ [api]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/apis/v1 [http]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/http [udp]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/udp - [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [discussions]: https://github.com/torrust/torrust-tracker/discussions - [guide.md]: https://github.com/torrust/.github/blob/main/info/contributing.md [agreement.md]: https://github.com/torrust/.github/blob/main/info/licensing/contributor_agreement_v01.md - [AGPL_3_0]: ./docs/licenses/LICENSE-AGPL_3_0 [MIT_0]: ./docs/licenses/LICENSE-MIT_0 [FSF]: https://www.fsf.org/ - [nautilus]: https://github.com/orgs/Nautilus-Cyberneering/ [Dutch Bits]: https://dutchbits.nl [Naim A.]: https://github.com/naim94a/udpt [greatest-ape]: https://github.com/greatest-ape/aquatic [Power2All]: https://github.com/power2all +[torrust-demo]: https://github.com/torrust/torrust-demo +[prometheus]: https://prometheus.io/ diff --git a/cSpell.json b/cSpell.json deleted file mode 100644 index 3121d6175..000000000 --- a/cSpell.json +++ /dev/null @@ -1,198 +0,0 @@ -{ - "words": [ - "Addrs", - "adduser", - "alekitto", - "appuser", - "Arvid", - "ASMS", - "asyn", - "autoclean", - "AUTOINCREMENT", - "automock", - "Avicora", - "Azureus", - "bdecode", - "bencode", - "bencoded", - "bencoding", - "beps", - "binascii", - "binstall", - "Bitflu", - "bools", - "Bragilevsky", - "bufs", - "buildid", - "Buildx", - "byteorder", - "callgrind", - "camino", - "canonicalize", - "canonicalized", - "certbot", - "chrono", - "ciphertext", - "clippy", - "codecov", - "codegen", - "completei", - "Condvar", - "connectionless", - "Containerfile", - "conv", - "curr", - "cvar", - "Cyberneering", - "dashmap", - "datagram", - "datetime", - "debuginfo", - "Deque", - "Dijke", - "distroless", - "dockerhub", - "downloadedi", - "dtolnay", - "elif", - "endianness", - "Eray", - "filesd", - "flamegraph", - "Freebox", - "Frostegård", - "gecos", - "Grcov", - "hasher", - "healthcheck", - "heaptrack", - "hexlify", - "hlocalhost", - "Hydranode", - "hyperthread", - "Icelake", - "iiiiiiiiiiiiiiiiiiiid", - "imdl", - "impls", - "incompletei", - "infohash", - "infohashes", - "infoschema", - "Intermodal", - "intervali", - "Joakim", - "kallsyms", - "Karatay", - "kcachegrind", - "kexec", - "keyout", - "kptr", - "lcov", - "leecher", - "leechers", - "libsqlite", - "libtorrent", - "libz", - "LOGNAME", - "Lphant", - "matchmakes", - "metainfo", - "middlewares", - "misresolved", - "mockall", - "multimap", - "myacicontext", - "Naim", - "nanos", - "newkey", - "nextest", - "nocapture", - "nologin", - "nonroot", - "Norberg", - "numwant", - "nvCFlJCq7fz7Qx6KoKTDiMZvns8l5Kw7", - "oneshot", - "ostr", - "Pando", - "peekable", - "peerlist", - "programatik", - "proot", - "proto", - "Quickstart", - "Radeon", - "Rasterbar", - "realpath", - "reannounce", - "Registar", - "repr", - "reqs", - "reqwest", - "rerequests", - "ringbuf", - "ringsize", - "rngs", - "rosegment", - "routable", - "rstest", - "rusqlite", - "rustc", - "RUSTDOCFLAGS", - "RUSTFLAGS", - "rustfmt", - "Rustls", - "Ryzen", - "Seedable", - "serde", - "Shareaza", - "sharktorrent", - "SHLVL", - "skiplist", - "slowloris", - "socketaddr", - "sqllite", - "subsec", - "Swatinem", - "Swiftbit", - "taiki", - "tdyne", - "tempfile", - "testcontainers", - "thiserror", - "tlsv", - "Torrentstorm", - "torrust", - "torrustracker", - "trackerid", - "Trackon", - "typenum", - "Unamed", - "underflows", - "Unsendable", - "untuple", - "uroot", - "Vagaa", - "valgrind", - "Vitaly", - "vmlinux", - "Vuze", - "Weidendorfer", - "Werror", - "whitespaces", - "Xacrimon", - "XBTT", - "Xdebug", - "Xeon", - "Xtorrent", - "Xunlei", - "xxxxxxxxxxxxxxxxxxxxd", - "yyyyyyyyyyyyyyyyyyyyd", - "zerocopy" - ], - "enableFiletypes": [ - "dockerfile", - "shellscript", - "toml" - ] -} diff --git a/console/tracker-client/Cargo.toml b/console/tracker-client/Cargo.toml index d4ab7c9e3..8c12227e9 100644 --- a/console/tracker-client/Cargo.toml +++ b/console/tracker-client/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A collection of console clients to make requests to BitTorrent trackers." -keywords = ["bittorrent", "client", "tracker"] +keywords = [ "bittorrent", "client", "tracker" ] license = "LGPL-3.0" name = "torrust-tracker-client" readme = "README.md" @@ -19,21 +19,21 @@ anyhow = "1" aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "../../packages/tracker-client" } -clap = { version = "4", features = ["derive", "env"] } +clap = { version = "4", features = [ "derive", "env" ] } futures = "0" hex-literal = "1" hyper = "1" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } serde_bencode = "0" serde_bytes = "0" -serde_json = { version = "1", features = ["preserve_order"] } +serde_json = { version = "1", features = [ "preserve_order" ] } thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../../packages/configuration" } tracing = "0" -tracing-subscriber = { version = "0", features = ["json"] } -url = { version = "2", features = ["serde"] } +tracing-subscriber = { version = "0", features = [ "json" ] } +url = { version = "2", features = [ "serde" ] } [package.metadata.cargo-machete] -ignored = ["serde_bytes"] +ignored = [ "serde_bytes" ] diff --git a/console/tracker-client/src/console/clients/checker/checks/udp.rs b/console/tracker-client/src/console/clients/checker/checks/udp.rs index b4edb2e2c..611afafc4 100644 --- a/console/tracker-client/src/console/clients/checker/checks/udp.rs +++ b/console/tracker-client/src/console/clients/checker/checks/udp.rs @@ -29,6 +29,7 @@ pub async fn run(udp_trackers: Vec<Url>, timeout: Duration) -> Vec<Result<Checks tracing::debug!("UDP trackers ..."); + #[allow(clippy::incompatible_msrv)] let info_hash = aquatic_udp_protocol::InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // DevSkim: ignore DS173237 for remote_url in udp_trackers { @@ -117,8 +118,8 @@ mod tests { let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); assert!( - socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) - || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) + || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 8080) ); } @@ -127,8 +128,8 @@ mod tests { let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); assert!( - socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) - || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) + || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 8080) ); } } diff --git a/console/tracker-client/src/console/clients/udp/app.rs b/console/tracker-client/src/console/clients/udp/app.rs index a2736c365..527f46e78 100644 --- a/console/tracker-client/src/console/clients/udp/app.rs +++ b/console/tracker-client/src/console/clients/udp/app.rs @@ -176,8 +176,7 @@ fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result<SocketAddr if parts.len() != 2 { return Err(anyhow::anyhow!( - "invalid address format: `{}`. Expected format is host:port", - tracker_socket_addr_str + "invalid address format: `{tracker_socket_addr_str}`. Expected format is host:port" )); } @@ -196,7 +195,7 @@ fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result<SocketAddr // Perform DNS resolution. let socket_addrs: Vec<_> = resolved_addr.to_socket_addrs()?.collect(); if socket_addrs.is_empty() { - Err(anyhow::anyhow!("DNS resolution failed for `{}`", tracker_socket_addr_str)) + Err(anyhow::anyhow!("DNS resolution failed for `{tracker_socket_addr_str}`")) } else { Ok(socket_addrs[0]) } diff --git a/console/tracker-client/src/console/clients/udp/checker.rs b/console/tracker-client/src/console/clients/udp/checker.rs index bf6b49782..ded5c107e 100644 --- a/console/tracker-client/src/console/clients/udp/checker.rs +++ b/console/tracker-client/src/console/clients/udp/checker.rs @@ -116,7 +116,7 @@ impl Client { bytes_uploaded: NumberOfBytes(0i64.into()), bytes_left: NumberOfBytes(0i64.into()), event: AnnounceEvent::Started.into(), - ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), + ip_address: Ipv4Addr::UNSPECIFIED.into(), key: PeerKey::new(0i32), peers_wanted: NumberOfPeers(1i32.into()), port: Port::new(port), diff --git a/contrib/bencode/Cargo.toml b/contrib/bencode/Cargo.toml index f6355b6fc..5fab1792d 100644 --- a/contrib/bencode/Cargo.toml +++ b/contrib/bencode/Cargo.toml @@ -1,10 +1,10 @@ [package] description = "(contrib) Efficient decoding and encoding for bencode." -keywords = ["bencode", "contrib", "library"] +keywords = [ "bencode", "contrib", "library" ] name = "torrust-tracker-contrib-bencode" readme = "README.md" -authors = ["Nautilus Cyberneering <info@nautilus-cyberneering.de>, Andrew <amiller4421@gmail.com>"] +authors = [ "Nautilus Cyberneering <info@nautilus-cyberneering.de>, Andrew <amiller4421@gmail.com>" ] license = "Apache-2.0" repository = "https://github.com/torrust/bittorrent-infrastructure-project" diff --git a/contrib/bencode/README.md b/contrib/bencode/README.md index 7a203082b..81c09f691 100644 --- a/contrib/bencode/README.md +++ b/contrib/bencode/README.md @@ -1,4 +1,5 @@ # Bencode + This library allows for the creation and parsing of bencode encodings. -Bencode is the binary encoding used throughout bittorrent technologies from metainfo files to DHT messages. Bencode types include integers, byte arrays, lists, and dictionaries, of which the last two can hold any bencode type (they could be recursively constructed). \ No newline at end of file +Bencode is the binary encoding used throughout bittorrent technologies from metainfo files to DHT messages. Bencode types include integers, byte arrays, lists, and dictionaries, of which the last two can hold any bencode type (they could be recursively constructed). diff --git a/contrib/bencode/benches/bencode_benchmark.rs b/contrib/bencode/benches/bencode_benchmark.rs index b79bb0999..b22b286a5 100644 --- a/contrib/bencode/benches/bencode_benchmark.rs +++ b/contrib/bencode/benches/bencode_benchmark.rs @@ -1,4 +1,6 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use std::hint::black_box; + +use criterion::{criterion_group, criterion_main, Criterion}; use torrust_tracker_contrib_bencode::{BDecodeOpt, BencodeRef}; const B_NESTED_LISTS: &[u8; 100] = diff --git a/contrib/dev-tools/benches/run-benches.sh b/contrib/dev-tools/benches/run-benches.sh new file mode 100755 index 000000000..0de356492 --- /dev/null +++ b/contrib/dev-tools/benches/run-benches.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# This script is only intended to be used for local development or testing environments. + +cargo bench --package torrust-tracker-torrent-repository + +cargo bench --package bittorrent-http-tracker-core + +cargo bench --package bittorrent-udp-tracker-core diff --git a/contrib/dev-tools/git/hooks/pre-commit.sh b/contrib/dev-tools/git/hooks/pre-commit.sh index 37b80bb8a..c1b183fde 100755 --- a/contrib/dev-tools/git/hooks/pre-commit.sh +++ b/contrib/dev-tools/git/hooks/pre-commit.sh @@ -6,4 +6,5 @@ cargo +nightly fmt --check && cargo +nightly machete && cargo +stable build && CARGO_INCREMENTAL=0 cargo +stable clippy --no-deps --tests --benches --examples --workspace --all-targets --all-features -- -D clippy::correctness -D clippy::suspicious -D clippy::complexity -D clippy::perf -D clippy::style -D clippy::pedantic && + cargo +stable test --doc --workspace && cargo +stable test --tests --benches --examples --workspace --all-targets --all-features diff --git a/contrib/dev-tools/git/hooks/pre-push.sh b/contrib/dev-tools/git/hooks/pre-push.sh index c1a724156..593068cee 100755 --- a/contrib/dev-tools/git/hooks/pre-push.sh +++ b/contrib/dev-tools/git/hooks/pre-push.sh @@ -6,5 +6,6 @@ cargo +nightly fmt --check && cargo +nightly machete && cargo +stable build && CARGO_INCREMENTAL=0 cargo +stable clippy --no-deps --tests --benches --examples --workspace --all-targets --all-features -- -D clippy::correctness -D clippy::suspicious -D clippy::complexity -D clippy::perf -D clippy::style -D clippy::pedantic && + cargo +stable test --doc --workspace && cargo +stable test --tests --benches --examples --workspace --all-targets --all-features && cargo +stable run --bin e2e_tests_runner -- --config-toml-path "./share/default/config/tracker.e2e.container.sqlite3.toml" diff --git a/contrib/dev-tools/su-exec/README.md b/contrib/dev-tools/su-exec/README.md index 2b0517377..1dd4108ac 100644 --- a/contrib/dev-tools/su-exec/README.md +++ b/contrib/dev-tools/su-exec/README.md @@ -1,4 +1,5 @@ # su-exec + switch user and group id, setgroups and exec ## Purpose @@ -21,7 +22,7 @@ name separated with colon (e.g. `nobody:ftp`). Numeric uid/gid values can be used instead of names. Example: ```shell -$ su-exec apache:1000 /usr/sbin/httpd -f /opt/www/httpd.conf +su-exec apache:1000 /usr/sbin/httpd -f /opt/www/httpd.conf ``` ## TTY & parent/child handling @@ -43,4 +44,3 @@ PID USER TIME COMMAND This does more or less exactly the same thing as [gosu](https://github.com/tianon/gosu) but it is only 10kb instead of 1.8MB. - diff --git a/cspell.json b/cspell.json new file mode 100644 index 000000000..39ddf510e --- /dev/null +++ b/cspell.json @@ -0,0 +1,28 @@ +{ + "$schema": "https://raw.githubusercontent.com/streetsidesoftware/cspell/main/cspell.schema.json", + "version": "0.2", + "dictionaryDefinitions": [ + { + "name": "project-words", + "path": "./project-words.txt", + "addWords": true + } + ], + "dictionaries": [ + "project-words" + ], + "enableFiletypes": [ + "dockerfile", + "shellscript", + "toml" + ], + "ignorePaths": [ + "target", + "docs/media/*.svg", + "contrib/bencode/benches/*.bencode", + "contrib/dev-tools/su-exec/**", + ".github/labels.json", + "/project-words.txt", + "repomix-output.xml" + ] +} diff --git a/docs/adrs/20260420200013_adopt_custom_github_copilot_aligned_agent_framework.md b/docs/adrs/20260420200013_adopt_custom_github_copilot_aligned_agent_framework.md new file mode 100644 index 000000000..556e131fb --- /dev/null +++ b/docs/adrs/20260420200013_adopt_custom_github_copilot_aligned_agent_framework.md @@ -0,0 +1,86 @@ +# Adopt a Custom, GitHub-Copilot-Aligned Agent Framework + +## Description + +As AI coding agents become a more common part of the development workflow, the project needs a +clear strategy for how agents should interact with the codebase. Several third-party "agent +frameworks" exist that promise to give agents structure and purpose, but they each come with +trade-offs that may not fit the tracker's needs. + +This ADR records the decision to build a lightweight, first-party agent framework using the +open standards that GitHub Copilot already supports natively: `AGENTS.md`, Agent Skills, and +Custom Agent profiles. + +## Agreement + +We adopt a custom, GitHub-Copilot-aligned agent framework consisting of: + +- **`AGENTS.md`** at the repository root (and in key subdirectories) — following the + [agents.md](https://agents.md/) open standard stewarded by the Agentic AI Foundation under the + Linux Foundation. Provides AI coding agents with project context, build steps, test commands, + conventions, and essential rules. +- **Agent Skills** under `.github/skills/` — following the + [Agent Skills specification](https://agentskills.io/specification). Each skill is a directory + containing a `SKILL.md` file with YAML frontmatter and Markdown instructions, covering + repeatable tasks such as committing changes, running linters, creating ADRs, or setting up the + development environment. +- **Custom Agent profiles** under `.github/agents/` — Markdown files with YAML frontmatter + defining specialised Copilot agents (e.g. `committer`, `implementer`, `complexity-auditor`) + that can be invoked directly or as subagents. +- **`copilot-setup-steps.yml`** workflow — prepares the GitHub Copilot cloud agent environment + before it starts working on any task. + +### Alternatives Considered + +**[obra/superpowers](https://github.com/obra/superpowers)** + +A framework that adds "superpowers" to coding agents through a set of conventions and tools. +Not adopted for the following reasons: + +1. **Complexity mismatch** — introduces abstractions heavier than what tracker development needs. +1. **Precision requirements** — the tracker involves low-level Rust programming where agent work + must be reviewed carefully; generic productivity frameworks are not designed for that + constraint. +1. **Tooling churn risk** — depending on a third-party framework risks forced refactoring if + that framework is deprecated or pivots. + +**[gsd-build/get-shit-done](https://github.com/gsd-build/get-shit-done)** + +A productivity-oriented agent framework with opinionated workflows. +Not adopted for the same reasons as above, plus: + +1. **GitHub-first ecosystem** — the tracker is hosted on GitHub and makes intensive use of + GitHub resources (Actions, Copilot, MCP tools). Staying aligned with GitHub Copilot avoids + unnecessary integration friction. + +### Why the Custom Approach + +1. **Tailored fit** — shaped precisely to Torrust conventions, commit style, linting gates, and + package structure from day one. +1. **Proven in practice** — the same approach has already been validated during the development + of `torrust-tracker-deployer`. +1. **Agent-agnostic by design** — expressed as plain Markdown files (`AGENTS.md`, `SKILL.md`, + agent profiles), decoupled from any single agent product. Migration or multi-agent use is + straightforward. +1. **Incremental adoption** — individual skills, custom agents, or patterns from evaluated + frameworks can still be cherry-picked and integrated progressively if specific value is + identified. +1. **Stability** — a first-party approach is more stable than depending on a third-party + framework whose roadmap we do not control. + +## Date + +2026-04-20 + +## References + +- Issue: https://github.com/torrust/torrust-tracker/issues/1697 +- PR: https://github.com/torrust/torrust-tracker/pull/1699 +- AGENTS.md specification: https://agents.md/ +- Agent Skills specification: https://agentskills.io/specification +- GitHub Copilot — About agent skills: https://docs.github.com/en/copilot/concepts/agents/about-agent-skills +- GitHub Copilot — About custom agents: https://docs.github.com/en/copilot/concepts/agents/copilot-cli/about-custom-agents +- Customize the Copilot cloud agent environment: https://docs.github.com/en/copilot/how-tos/use-copilot-agents/cloud-agent/customize-the-agent-environment +- obra/superpowers: https://github.com/obra/superpowers +- gsd-build/get-shit-done: https://github.com/gsd-build/get-shit-done +- torrust-tracker-deployer (validated reference implementation): https://github.com/torrust/torrust-tracker-deployer diff --git a/docs/adrs/README.md b/docs/adrs/README.md index 85986fc36..5fd40aa24 100644 --- a/docs/adrs/README.md +++ b/docs/adrs/README.md @@ -1,23 +1,32 @@ # Architectural Decision Records (ADRs) -This directory contains the architectural decision records (ADRs) for the -project. ADRs are a way to document the architectural decisions made in the -project. +This directory contains the architectural decision records (ADRs) for the project. +ADRs document architectural decisions — what was decided, why, and what alternatives +were considered. More info: <https://adr.github.io/>. -## How to add a new record +See [index.md](index.md) for the full list of ADRs. -For the prefix: +## How to Add a New ADR -```s +Generate the timestamp prefix (UTC): + +```shell date -u +"%Y%m%d%H%M%S" ``` -Then you can create a new markdown file with the following format: +Create a new Markdown file using the format `YYYYMMDDHHMMSS_snake_case_title.md`: -```s +```shell 20230510152112_title.md ``` -For the time being, we are not following any specific template. +Then add a row to the [Index](index.md) table. + +There is no rigid template. A typical ADR includes: + +- **Description** — the problem or context motivating the decision +- **Agreement** — what was decided and why +- **Date** — decision date (`YYYY-MM-DD`) +- **References** — related issues, PRs, external docs diff --git a/docs/adrs/index.md b/docs/adrs/index.md new file mode 100644 index 000000000..b6063e3ff --- /dev/null +++ b/docs/adrs/index.md @@ -0,0 +1,6 @@ +# ADR Index + +| ADR | Date | Title | Short Description | +| --------------------------------------------------------------------------------------- | ---------- | ------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------ | +| [20240227164834](20240227164834_use_plural_for_modules_containing_collections.md) | 2024-02-27 | Use plural for modules containing collections | Module names should use plural when they contain multiple types with the same responsibility (e.g. `requests/`, `responses/`). | +| [20260420200013](20260420200013_adopt_custom_github_copilot_aligned_agent_framework.md) | 2026-04-20 | Adopt a custom, GitHub-Copilot-aligned agent framework | Use AGENTS.md, Agent Skills, and Custom Agent profiles instead of third-party agent frameworks. | diff --git a/docs/containers.md b/docs/containers.md index cddd2ba98..a7754d8aa 100644 --- a/docs/containers.md +++ b/docs/containers.md @@ -248,6 +248,10 @@ driver = "mysql" path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" ``` +Important: if the MySQL password contains reserved URL characters (for example `+`, `/`, `@`, or `:`), it must be percent-encoded in the DSN password component. For example, if the raw password is `a+b/c`, use `a%2Bb%2Fc` in the DSN. + +When generating secrets automatically, prefer URL-safe passwords (`A-Z`, `a-z`, `0-9`, `-`, `_`) to avoid DSN parsing issues. + ### Build and Run: ```sh @@ -292,7 +296,7 @@ These are some useful commands for MySQL. Open a shell in the MySQL container using docker or docker-compose. ```s -docker exec -it torrust-mysql-1 /bin/bash +docker exec -it torrust-mysql-1 /bin/bash docker compose exec mysql /bin/bash ``` diff --git a/docs/issues/1697-ai-agent-configuration.md b/docs/issues/1697-ai-agent-configuration.md new file mode 100644 index 000000000..925f04ea5 --- /dev/null +++ b/docs/issues/1697-ai-agent-configuration.md @@ -0,0 +1,358 @@ +# Set Up Basic AI Agent Configuration + +## Goal + +Set up the foundational configuration files in this repository to enable effective collaboration with AI coding agents. This includes adding an `AGENTS.md` file to guide agents on project conventions, adding agent skills for repeatable specialized tasks, and defining custom agents for project-specific workflows. + +## References + +- **AGENTS.md specification**: https://agents.md/ +- **Agent Skills specification**: https://agentskills.io/specification +- **GitHub Copilot — About agent skills**: https://docs.github.com/en/copilot/concepts/agents/about-agent-skills +- **GitHub Copilot — About custom agents**: https://docs.github.com/en/copilot/concepts/agents/copilot-cli/about-custom-agents + +## Background + +### AGENTS.md + +`AGENTS.md` is an open, plain-Markdown format stewarded by the [Agentic AI Foundation](https://aaif.io/) under the Linux Foundation. It acts as a "README for agents": a single, predictable file where coding agents look first for project-specific context (build steps, test commands, conventions, security considerations) that would otherwise clutter the human-focused `README.md`. + +It is supported by a wide ecosystem of tools including GitHub Copilot (VS Code), Cursor, Windsurf, OpenAI Codex, Claude Code, Jules (Google), Warp, and many others. In monorepos, nested `AGENTS.md` files can be placed inside each package; the closest file to the file being edited takes precedence. + +### Agent Skills + +Agent Skills (https://agentskills.io/specification) are directories of instructions, scripts, and resources that an agent can load to perform specialized, repeatable tasks. Each skill lives in a folder named after the skill and contains at minimum a `SKILL.md` file with YAML frontmatter (`name`, `description`, optional `license`, `compatibility`, `metadata`, `allowed-tools`) followed by Markdown instructions. + +GitHub Copilot supports: + +- **Project skills** stored in the repository at `.github/skills/`, `.claude/skills/`, or `.agents/skills/` +- **Personal skills** stored in the home directory at `~/.copilot/skills/`, `~/.claude/skills/`, or `~/.agents/skills/` + +### Custom Agents + +Custom agents are specialized versions of GitHub Copilot that can be tailored to project-specific workflows. They are defined as Markdown files with YAML frontmatter (agent profiles) stored at: + +- **Repository level**: `.github/agents/CUSTOM-AGENT-NAME.md` +- **Organization/enterprise level**: `/agents/CUSTOM-AGENT-NAME.md` inside a `.github-private` repository + +An agent profile includes a `name`, `description`, optional `tools`, and optional `mcp-servers` configurations. The Markdown body of the file acts as the agent's prompt (it is not a YAML frontmatter key). The main Copilot agent can run custom agents as subagents in isolated context windows, including in parallel. + +## Tasks + +### Task 0: Create a local branch + +- Approved branch name: `<issue-number>-ai-agent-configuration` +- Commands: + - `git fetch --all --prune` + - `git checkout develop` + - `git pull --ff-only` + - `git checkout -b <issue-number>-ai-agent-configuration` +- Checkpoint: `git branch --show-current` should output `<issue-number>-ai-agent-configuration`. + +--- + +### Task 1: Add `AGENTS.md` at the repository root + +Provide AI coding agents with a clear, predictable source of project context so they can work +effectively without requiring repeated manual instructions. + +**Inspiration / reference AGENTS.md files from other Torrust projects**: + +- https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/AGENTS.md +- https://raw.githubusercontent.com/torrust/torrust-linting/refs/heads/main/AGENTS.md + +Create `AGENTS.md` in the repository root, adapting the above files to the tracker. At minimum +the file must cover: + +- [x] Repository link and project overview (language, license, MSRV, web framework, protocols, databases) +- [x] Tech stack (languages, frameworks, databases, containerization, linting tools) +- [x] Key directories (`src/`, `src/bin/`, `packages/`, `console/`, `contrib/`, `tests/`, `docs/`, `share/`, `storage/`, `.github/workflows/`) +- [x] Package catalog (all workspace packages with their layer and description) +- [x] Package naming conventions (`axum-*`, `*-server`, `*-core`, `*-protocol`) +- [x] Key configuration files (`.markdownlint.json`, `.yamllint-ci.yml`, `.taplo.toml`, `cspell.json`, `rustfmt.toml`, etc.) +- [x] Build & test commands (`cargo build`, `cargo test --doc`, `cargo test --all-targets`, E2E runner, benchmarks) +- [x] Lint commands (`linter all` and individual linters; how to install the `linter` binary) +- [x] Dependencies check (`cargo machete`) +- [x] Code style (rustfmt rules, clippy policy, import grouping, per-format rules) +- [x] Collaboration principles (no flattery, push back on weak ideas, flag blockers early) +- [x] Essential rules (linting gate, GPG commit signing, no `storage/`/`target/` commits, `cargo machete`) +- [x] Git workflow (branch naming, Conventional Commits, branch strategy: `develop` → `staging/main` → `main`) +- [x] Development principles (observability, testability, modularity, extensibility; Beck's four rules) +- [x] Container / Docker (key commands, ports, volume mount paths) +- [x] Auto-invoke skills placeholder (to be filled in when `.github/skills/` is populated) +- [x] Documentation quick-navigation table +- [x] Add a brief entry to `docs/index.md` pointing contributors to `AGENTS.md`, `.github/skills/`, and `.github/agents/` + +Commit message: `docs(agents): add root AGENTS.md` + +Checkpoint: + +- `linter all` exits with code `0`. +- At least one AI agent (GitHub Copilot, Cursor, etc.) can be confirmed to pick up the file. + +**References**: + +- https://agents.md/ +- https://github.com/openai/codex/blob/-/AGENTS.md (real-world example) +- https://github.com/apache/airflow/blob/-/AGENTS.md (real-world monorepo example) + +--- + +### Task 2: Add Agent Skills + +Define reusable, project-specific skills that agents can load to perform specialized tasks on +this repository consistently. + +- [x] Create `.github/skills/` directory +- [x] Review and confirm the candidate skills listed below (add, remove, or adjust before starting implementation) +- [x] For each skill, create a directory with: + - `SKILL.md` — YAML frontmatter (`name`, `description`, optional `license`, `compatibility`) + step-by-step instructions + - `scripts/` (optional) — executable scripts the agent can run + - `references/` (optional) — additional reference documentation +- [x] Validate skill files against the Agent Skills spec (name rules: lowercase, hyphens, no consecutive hyphens, max 64 chars; description: max 1024 chars) + +**Candidate initial skills** (ported / adapted from `torrust-tracker-deployer`): + +The skills below are modelled on the skills already proven in +[torrust-tracker-deployer](https://github.com/torrust/torrust-tracker-deployer) +(`.github/skills/`). Deployer-specific skills (Ansible, Tera templates, LXD, SDK, +deployer CLI architecture) are excluded because they have no equivalent in the tracker. + +Directory layout to mirror the deployer structure: + +```text +.github/skills/ + add-new-skill/ + dev/ + git-workflow/ + maintenance/ + planning/ + rust-code-quality/ + testing/ +``` + +**`add-new-skill`** ✅ — meta-skill: guide for creating new Agent Skills for this repository. + +**`dev/git-workflow/`**: + +- `commit-changes` ✅ — commit following Conventional Commits; pre-commit verification checklist. +- `create-feature-branch` ✅ — branch naming convention and lifecycle. +- `open-pull-request` ✅ — open a PR via GitHub CLI or GitHub MCP tool; pre-flight checks. +- `release-new-version` ✅ — version bump, signed release commit, signed tag, CI verification. +- `review-pr` ✅ — review a PR against Torrust quality standards and checklist. +- `run-linters` ✅ — run the full linting suite (`linter all`); fix individual linter failures. +- `run-pre-commit-checks` ✅ — mandatory quality gates before every commit. + +**`dev/maintenance/`**: + +- `install-linter` ✅ — install the `linter` binary and its external tool dependencies. +- `setup-dev-environment` ✅ — full onboarding guide: system deps, Rust toolchain, storage dirs, linter, git hooks, smoke test. +- `update-dependencies` ✅ — run `cargo update`, create branch, commit, push, open PR. + +**`dev/planning/`**: + +- `create-adr` ✅ — create an Architectural Decision Record in `docs/adrs/`. +- `create-issue` ✅ — draft and open a GitHub issue following project conventions. +- `write-markdown-docs` ✅ — GFM pitfalls (auto-links, ordered list numbering, etc.). +- `cleanup-completed-issues` ✅ — remove issue doc files and update roadmap after PR merge. + +**`dev/rust-code-quality/`**: + +- `handle-errors-in-code` ✅ — `thiserror`-based structured errors; what/where/when/why context. +- `handle-secrets` ✅ — wrapper types for tokens/passwords; never use plain `String` for secrets. + +**`dev/testing/`**: + +- `write-unit-test` ✅ — `it_should_*` naming, AAA pattern, `MockClock`, `TempDir`, `rstest`. + +Commit message: `docs(agents): add initial agent skills under .github/skills/` + +Checkpoint: + +- `linter all` exits with code `0`. +- At least one skill can be successfully activated by GitHub Copilot. + +**References**: + +- https://agentskills.io/specification +- https://docs.github.com/en/copilot/concepts/agents/about-agent-skills +- https://docs.github.com/en/copilot/how-tos/copilot-cli/customize-copilot/add-skills +- https://github.com/anthropics/skills (community skills examples) +- https://github.com/github/awesome-copilot (community collection) + +--- + +### Task 3: Add Custom Agents + +Define custom GitHub Copilot agents tailored to Torrust project workflows so that specialized +tasks can be delegated to focused agents with the right prompt context. + +- [x] Create `.github/agents/` directory +- [x] Identify workflows that benefit from a dedicated agent +- [x] For each agent, create `.github/agents/<agent-name>.md` with: + - YAML frontmatter: `name` (optional), `description`, optional `tools` + - Prompt body: role definition, scope, constraints, and step-by-step instructions +- [x] Test each custom agent by assigning it to a task or issue in GitHub Copilot CLI + +**Candidate initial agents**: + +- `committer` ✅ — commit specialist: reads branch/diff, runs pre-commit checks + (`./scripts/pre-commit.sh`), proposes a GPG-signed Conventional Commit message, and creates + the commit only after scope and checks are clear. Reference: + [`torrust-tracker-demo/.github/agents/commiter.agent.md`](https://raw.githubusercontent.com/torrust/torrust-tracker-demo/refs/heads/main/.github/agents/commiter.agent.md) +- `implementer` ✅ — software implementer that applies Test-Driven Development and seeks the + simplest solution. Follows a structured process: analyse → decompose into small steps → + implement with TDD → call the Complexity Auditor after each step → call the Committer when + ready. Guided by Beck's Four Rules of Simple Design. +- `complexity-auditor` ✅ — code quality auditor that checks cyclomatic and cognitive complexity + of changes after each implementation step. Reports PASS/WARN/FAIL per function using thresholds + and Clippy's `cognitive_complexity` lint. Called by the Implementer; can also be invoked + directly. + +**Future agents** (not yet implemented): + +- `issue-planner` — given a GitHub issue, produces a detailed implementation plan document + (like those in `docs/issues/`) including branch name, task breakdown, checkpoints, and commit + message suggestions. +- `code-reviewer` — reviews PRs against Torrust coding conventions, clippy rules, and security + considerations. +- `docs-writer` — creates or updates documentation files following the existing docs structure. + +Commit message: `docs(agents): add initial custom agents under .github/agents/` + +Checkpoint: + +- `linter all` exits with code `0`. +- At least one custom agent can be assigned to a task in GitHub Copilot CLI. + +**References**: + +- https://docs.github.com/en/copilot/concepts/agents/copilot-cli/about-custom-agents +- https://docs.github.com/en/copilot/how-tos/copilot-cli/customize-copilot/create-custom-agents-for-cli +- https://docs.github.com/en/copilot/reference/customization-cheat-sheet + +--- + +### Task 4 (optional / follow-up): Add nested `AGENTS.md` files in packages + +Once the root file is stable, evaluate whether any workspace packages have sufficiently different +conventions or setup to warrant their own `AGENTS.md`. This can be tracked as a separate follow-up +issue. + +- [x] Evaluate workspace packages for package-specific conventions +- [x] Add `packages/AGENTS.md` — guidance scoped to all workspace packages +- [x] Add `src/AGENTS.md` — guidance scoped to the main binary/library source + +> **Note**: Completed as part of Task 1. `packages/AGENTS.md` and `src/AGENTS.md` were added +> alongside the root `AGENTS.md`. + +--- + +### Task 5: Add `copilot-setup-steps.yml` workflow + +Create `.github/workflows/copilot-setup-steps.yml` so that the GitHub Copilot cloud agent gets a +fully prepared development environment before it starts working on any task. Without this file, +Copilot discovers and installs dependencies itself via trial-and-error, which is slow and +unreliable. + +The workflow must contain a single `copilot-setup-steps` job (the exact job name is required by +Copilot). Steps run in GitHub Actions before Copilot starts; the file is also automatically +executed as a normal CI workflow whenever it changes, providing built-in validation. + +**Reference example** (from `torrust-tracker-deployer`): +https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.github/workflows/copilot-setup-steps.yml + +Minimum steps to include: + +- [x] Trigger on `workflow_dispatch`, `push` and `pull_request` (scoped to the workflow file path) +- [x] `copilot-setup-steps` job on `ubuntu-latest`, `timeout-minutes: 30`, `permissions: contents: read` +- [x] `actions/checkout@v6` — check out the repository (verify this is still the latest stable + version on the GitHub Marketplace before merging) +- [x] `dtolnay/rust-toolchain@stable` — install the stable Rust toolchain (pin MSRV if needed) +- [x] `Swatinem/rust-cache@v2` — cache `target/` and `~/.cargo` between runs +- [x] `cargo build` warm-up — build the workspace (or key packages) so incremental compilation is + ready when Copilot starts editing +- [x] Install the `linter` binary — + `cargo install --locked --git https://github.com/torrust/torrust-linting --bin linter` +- [x] Install `cargo-machete` — `cargo install cargo-machete`; ensures Copilot can run unused + dependency checks (`cargo machete`) as required by the essential rules +- [x] Smoke-check: run `linter all` to confirm the environment is healthy before Copilot begins +- [x] Install Git pre-commit hooks — `./scripts/install-git-hooks.sh` + +Commit message: `ci(copilot): add copilot-setup-steps workflow` + +Checkpoint: + +- The workflow runs successfully via the repository's **Actions** tab (manual dispatch or push to + the file). +- `linter all` exits with code `0` inside the workflow. + +**References**: + +- https://docs.github.com/en/copilot/how-tos/use-copilot-agents/cloud-agent/customize-the-agent-environment +- https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.github/workflows/copilot-setup-steps.yml + +--- + +### Task 6: Create an ADR for the AI agent framework approach + +> **Note**: This task documents the decision that underlies the whole issue. It can be done +> before Tasks 1–5 if preferred — recording the decision first and then implementing it is +> the conventional ADR practice. + +Document the decision to build a custom, GitHub-Copilot-aligned agent framework (AGENTS.md + +Agent Skills + Custom Agents) rather than adopting one of the existing pre-defined agent +frameworks that were evaluated. + +**Frameworks evaluated and not adopted**: + +- [obra/superpowers](https://github.com/obra/superpowers) +- [gsd-build/get-shit-done](https://github.com/gsd-build/get-shit-done) + +**Reasons for not adopting them**: + +1. Complexity mismatch — they introduce abstractions that are heavier than what tracker + development needs. +2. Precision requirements — the tracker involves low-level programming where agent work must be + reviewed carefully; generic productivity frameworks are not designed around that constraint. +3. GitHub-first ecosystem — the tracker is hosted on GitHub and makes intensive use of GitHub + resources (Actions, Copilot, MCP tools, etc.). Staying aligned with GitHub Copilot avoids + unnecessary integration friction. +4. Tooling churn — the AI agent landscape is evolving rapidly; depending on a third-party + framework risks forced refactoring when that framework is deprecated or pivots. A first-party + approach is more stable. +5. Tailored fit — a custom solution can be shaped precisely to Torrust conventions, commit style, + linting gates, and package structure from day one. +6. Proven in practice — the same approach has already been validated during the development of + `torrust-tracker-deployer`. +7. Agent-agnostic by design — keeping the framework expressed as plain Markdown files + (AGENTS.md, SKILL.md, agent profiles) decouples it from any single agent product, making + migration or multi-agent use straightforward. +8. Incremental adoption — individual skills, custom agents, or patterns from those frameworks can + still be cherry-picked and integrated progressively if specific value is identified. + +- [x] Create `docs/adrs/<YYYYMMDDHHMMSS>_ai-agent-framework-approach.md` using the `create-adr` skill +- [x] Record the decision, the alternatives considered, and the reasoning above + +Commit message: `docs(adrs): add ADR for AI agent framework approach` + +Checkpoint: + +- `linter all` exits with code `0`. + +**References**: + +- `docs/adrs/README.md` — ADR naming convention for this repository +- https://adr.github.io/ + +--- + +## Acceptance Criteria + +- [x] `AGENTS.md` exists at the repo root and contains accurate, up-to-date project guidance. +- [x] At least one skill is available under `.github/skills/` and can be successfully activated by GitHub Copilot. +- [x] At least one custom agent is available under `.github/agents/` and can be assigned to a task. +- [x] `copilot-setup-steps.yml` exists, the workflow runs successfully in the **Actions** tab, and `linter all` exits with code `0` inside it. +- [x] An ADR exists in `docs/adrs/` documenting the decision to use a custom GitHub-Copilot-aligned agent framework. +- [x] All files pass spelling checks (`cspell`) and markdown linting. +- [x] A brief entry in `docs/index.md` points contributors to `AGENTS.md`, `.github/skills/`, and `.github/agents/`. diff --git a/docs/issues/523-internal-linting-tool.md b/docs/issues/523-internal-linting-tool.md new file mode 100644 index 000000000..14593e190 --- /dev/null +++ b/docs/issues/523-internal-linting-tool.md @@ -0,0 +1,141 @@ +# Issue #523 Implementation Plan (Internal Linting Tool) + +## Goal + +Replace the MegaLinter idea with Torrust internal linting tooling and integrate it into CI for this repository. + +## Scope + +- Target issue: https://github.com/torrust/torrust-tracker/issues/523 +- CI workflow to modify: .github/workflows/testing.yaml +- External reference workflow: https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.github/workflows/linting.yml + +## Tasks + +### 0) Create a local branch following GitHub branch naming conventions + +- Approved branch name: `523-internal-linting-tool` +- Commands: + - `git fetch --all --prune` + - `git checkout develop` + - `git pull --ff-only` + - `git checkout -b 523-internal-linting-tool` +- Checkpoint: + - `git branch --show-current` should output `523-internal-linting-tool`. + +### 1) Install and run the linting tool locally; verify it passes in this repo + +- Identify/install internal linting package/tool used by Torrust (likely `torrust-linting` or equivalent wrapper). +- Ensure local runtime dependencies are present (if any). +- Note: linter config files (step 2) must exist in the repo root before a full suite run; it is fine to do a first exploratory run first to discover which linters are active. +- Run the internal linting command against this repository. +- Capture the exact command and output summary for reproducibility. +- Checkpoint: + - Linting command exits with code `0`. + +### 2) Add and adapt linter configuration files + +Some linters require a config file in the repo root. Use the deployer configs as reference and adapt values to this repository. + +| File | Linter | Reference | +| -------------------- | ---------------- | ----------------------------------------------------------------------------------------------------- | +| `.markdownlint.json` | markdownlint | https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.markdownlint.json | +| `.taplo.toml` | taplo (TOML fmt) | https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.taplo.toml | +| `.yamllint-ci.yml` | yamllint | https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.yamllint-ci.yml | + +Key adaptations to make per file: + +- `.markdownlint.json`: review line-length rules and Markdown conventions used in this repo's docs. +- `.taplo.toml`: update `exclude` list to match this repo's generated/runtime folders (e.g. `target/**`, `storage/**`) instead of the deployer-specific ones (`build/**`, `data/**`, `envs/**`). +- `.yamllint-ci.yml`: update `ignore` block to reflect this repo's generated/runtime directories instead of cloud-init and deployer folders. + +Commit message: `ci(lint): add linter config files (.markdownlint.json, .taplo.toml, .yamllint-ci.yml)` + +Checkpoint: + +- Config files are present in the repo root. +- Running each individual linter against the repo with the config produces expected/controlled output. + +### 3) If local linting fails, fix all lint errors; commit fixes independently per linter + +- If the linting suite reports failures: + - Group findings by linter (for example: formatting, clippy, docs, spelling, yaml, etc.). + - Fix only one linter category at a time. + - Create one commit per linter category. +- Commit style proposal: + - `fix(lint/<linter-name>): resolve <brief issue summary>` +- Constraints: + - Do not mix workflow/tooling changes with source lint fixes in the same commit. + - Keep each commit minimal and reviewable. +- Checkpoint: + - Re-run linting suite; all checks pass before moving to workflow integration. + +### 4) Review existing workflow example using internal linting + +- Read and analyze: + - https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.github/workflows/linting.yml +- Extract and adapt: + - Trigger strategy. + - Tool setup/install method. + - Cache strategy. + - Invocation command and CI fail behavior. +- Checkpoint: + - Document a short mapping from deployer workflow pattern to this repo’s `testing.yaml` job structure. + +### 5) Modify `.github/workflows/testing.yaml` to use the internal linting tool + +- Update the current `check`/lint-related section to run the internal linting command. +- Replace existing lint/check execution path with the internal linting tool in this migration (no parallel transition mode). +- Ensure matrix/toolchain compatibility is explicit (nightly/stable behavior decided and documented). +- Validate workflow syntax before commit. +- Checkpoint: + - Workflow is valid and executes linting through internal tool. + +### 6) Commit workflow changes + +- Commit only workflow-related changes in a dedicated commit. +- Commit message proposal: + - `ci(lint): switch testing workflow to internal linting tool` +- Checkpoint: + - `git show --name-only --stat HEAD` includes only expected workflow files (and any required supporting CI files if intentionally added). + +### 7) Push to remote `josecelano` and open PR into `develop` + +- Verify remote exists: + - `git remote -v` +- Push branch: + - `git push -u josecelano 523-internal-linting-tool` +- Open PR targeting `torrust/torrust-tracker:develop` with head `josecelano:523-internal-linting-tool`. +- PR content should include: + - Why internal linting over MegaLinter. + - Summary of lint-fix commits by linter. + - Summary of workflow change. + - Evidence (local run + CI status). +- Checkpoint: + - PR is open, linked to issue #523, and ready for review. + +## Execution Notes + +- Keep PR review-friendly by separating commits by concern: + 1. Linter config files (step 2) + 2. Per-linter source fixes (step 3, only if needed) + 3. CI workflow migration (step 6) +- Use Conventional Commits for all commits in this implementation. +- If lint checks differ between local and CI, align tool versions and execution flags before merging. +- Avoid broad refactors unrelated to lint failures. + +## Decisions Confirmed + +1. Branch name: `523-internal-linting-tool`. +2. CI strategy: replace existing lint/check path with internal linting. +3. Commit convention: yes, use Conventional Commits. +4. PR target: base `torrust/torrust-tracker:develop`, head `josecelano:523-internal-linting-tool`. + +## Risks and Mitigations + +- Risk: Internal linting wrapper may not be version-pinned and may produce unstable CI behavior. + - Mitigation: Pin tool version in workflow installation step. +- Risk: Internal linting may overlap with existing checks, increasing CI time. + - Mitigation: Remove redundant jobs only after verifying coverage parity. +- Risk: Tool may require secrets or environment assumptions not available in CI. + - Mitigation: Run dry-run in GitHub Actions on branch before requesting review. diff --git a/docs/media/demo/torrust-tracker-grafana-dashboard.png b/docs/media/demo/torrust-tracker-grafana-dashboard.png new file mode 100644 index 000000000..090932a8c Binary files /dev/null and b/docs/media/demo/torrust-tracker-grafana-dashboard.png differ diff --git a/docs/templates/ADR.md b/docs/templates/ADR.md new file mode 100644 index 000000000..fa8aebe27 --- /dev/null +++ b/docs/templates/ADR.md @@ -0,0 +1,24 @@ +# [Title] + +## Description + +What is the issue motivating this decision? Provide enough context for future +readers who have no prior background. + +## Agreement + +What was decided and why? Be concrete. Include code examples if the decision +involves specific patterns. + +Optional sub-sections: + +- **Alternatives Considered** — other options explored and why they were rejected +- **Consequences** — positive and negative effects of the decision + +## Date + +YYYY-MM-DD + +## References + +Links to related issues, PRs, ADRs, and external documentation. diff --git a/docs/templates/ISSUE.md b/docs/templates/ISSUE.md new file mode 100644 index 000000000..7c899bacd --- /dev/null +++ b/docs/templates/ISSUE.md @@ -0,0 +1,33 @@ +# Issue: {Title} + +## Overview + +Clear description of what needs to be done and why. + +## Goals + +- [ ] Goal 1 +- [ ] Goal 2 + +## Implementation Plan + +### Task 1: {Task Title} + +- [ ] Sub-task a +- [ ] Sub-task b + +### Task 2: {Task Title} + +- [ ] Sub-task a +- [ ] Sub-task b + +## Acceptance Criteria + +- [ ] All tests pass +- [ ] `linter all` exits with code `0` +- [ ] Documentation updated + +## References + +- Related issues: #{number} +- Related ADRs: `docs/adrs/...` diff --git a/packages/AGENTS.md b/packages/AGENTS.md new file mode 100644 index 000000000..231bfe3a9 --- /dev/null +++ b/packages/AGENTS.md @@ -0,0 +1,152 @@ +# Torrust Tracker — Packages + +This directory contains all Cargo workspace packages. All domain logic, protocol +implementations, server infrastructure, and utility libraries live here. + +For full project context see the [root AGENTS.md](../AGENTS.md). + +## Architecture + +Packages are organized in strict layers. Dependencies only flow downward — a package may only +depend on packages in the same layer or a lower one. + +```text +┌────────────────────────────────────────────────────────────────┐ +│ Servers (delivery layer) │ +│ axum-http-tracker-server axum-rest-tracker-api-server │ +│ axum-health-check-api-server udp-tracker-server │ +├────────────────────────────────────────────────────────────────┤ +│ Core (domain layer) │ +│ http-tracker-core udp-tracker-core tracker-core │ +│ rest-tracker-api-core swarm-coordination-registry │ +├────────────────────────────────────────────────────────────────┤ +│ Protocols │ +│ http-protocol udp-protocol │ +├────────────────────────────────────────────────────────────────┤ +│ Domain / Shared │ +│ torrent-repository configuration primitives │ +│ events metrics clock located-error server-lib │ +├────────────────────────────────────────────────────────────────┤ +│ Utilities / Test support │ +│ test-helpers │ +└────────────────────────────────────────────────────────────────┘ +``` + +**Key architectural rule**: Servers contain only network I/O logic. All business rules live in +`*-core` packages. Protocol parsing is isolated in `*-protocol` packages. + +See [docs/packages.md](../docs/packages.md) for a full diagram. + +## Package Catalog + +### Servers (`axum-*`, `udp-tracker-server`) + +Delivery layer — accept network connections, dispatch to core handlers, return responses. +These packages must not contain business logic. + +| Package | Entry point | Protocol | +| ------------------------------ | ------------ | ----------- | +| `axum-http-tracker-server` | `src/lib.rs` | HTTP BEP 3 | +| `axum-rest-tracker-api-server` | `src/lib.rs` | REST (JSON) | +| `axum-health-check-api-server` | `src/lib.rs` | HTTP | +| `axum-server` | `src/lib.rs` | Axum base | +| `udp-tracker-server` | `src/lib.rs` | UDP BEP 15 | + +### Core (`*-core`) + +Domain layer — business rules, request validation, response building. No Axum or networking +imports. Each core package exposes a `container` module that wires up its dependencies via +dependency injection. + +| Package | Purpose | +| ----------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `tracker-core` | Central peer management: announce/scrape handlers, auth, whitelist, database abstraction (SQLite/MySQL drivers in `src/databases/driver/`) | +| `http-tracker-core` | HTTP-specific validation and response formatting | +| `udp-tracker-core` | UDP connection cookies, crypto, banning logic | +| `rest-tracker-api-core` | REST API statistics and container wiring | +| `swarm-coordination-registry` | Registry of torrents and their peer swarms | + +### Protocols (`*-protocol`) + +Strict BEP implementations — parse and serialize wire formats only. No tracker logic. + +| Package | BEP | Handles | +| --------------- | ------ | -------------------------------------------------------------- | +| `http-protocol` | BEP 3 | URL parameter parsing, bencoded responses, compact peer format | +| `udp-protocol` | BEP 15 | Message framing, connection IDs, transaction IDs | + +### Domain / Shared + +| Package | Purpose | +| -------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `torrent-repository` | Torrent metadata storage; InfoHash management; peer coordination | +| `configuration` | Config file parsing (`share/default/config/`) and env var loading (`TORRUST_TRACKER_CONFIG_TOML`, `TORRUST_TRACKER_CONFIG_TOML_PATH`); versioned under `src/v2_0_0/` | +| `primitives` | Core domain types: `InfoHash`, `PeerId`, `Peer`, `SwarmMetadata`, `ServiceBinding` | +| `events` | Async event bus (broadcaster / receiver / shutdown) used across packages | +| `metrics` | Prometheus-compatible metrics: counters, gauges, labels, samples | +| `server-lib` | Shared HTTP server utilities: logging, service registrar, signal handling | +| `clock` | Mockable time source — use `clock::Working` in production, `clock::Stopped` in tests | +| `located-error` | Error decorator that captures the source file/line of the original error | + +### Client Tools + +| Package | Purpose | +| ------------------------- | -------------------------------------------------------- | +| `tracker-client` | Generic HTTP and UDP tracker clients (used by E2E tests) | +| `rest-tracker-api-client` | Typed REST API client library | + +### Utilities / Test support + +| Package | Purpose | +| --------------------------------- | ---------------------------------------------------------- | +| `test-helpers` | Mock servers, test data generators, shared test fixtures | +| `torrent-repository-benchmarking` | Criterion benchmarks for alternative torrent storage impls | + +## Naming Conventions + +| Prefix / Suffix | Responsibility | May depend on | +| --------------- | ----------------------------------------- | ----------------------------- | +| `axum-*` | HTTP server components using Axum | `*-core`, Axum framework | +| `*-server` | Server implementations | Corresponding `*-core` | +| `*-core` | Domain logic and business rules | `*-protocol`, domain packages | +| `*-protocol` | BitTorrent protocol parsing/serialization | `primitives` | +| `udp-*` | UDP-specific implementations | `tracker-core` | +| `http-*` | HTTP-specific implementations | `tracker-core` | + +## Adding or Modifying a Package + +1. Create the directory under `packages/<new-package>/` with a `Cargo.toml` and `src/lib.rs`. +2. Add the package to the workspace `[members]` in the root `Cargo.toml`. +3. Follow the naming conventions above. +4. Each package must have: + - A crate-level doc comment in `src/lib.rs` explaining its purpose and layer. + - At minimum one unit test (doc-test acceptable for simple utility crates). +5. Run `cargo machete` after adding dependencies — unused deps must not be committed. +6. Run `linter all` before committing. + +## Testing Packages + +```sh +# All tests for a specific package +cargo test -p <package-name> + +# Doc tests only +cargo test --doc -p <package-name> + +# MySQL-specific tests in tracker-core (requires a running MySQL instance) +TORRUST_TRACKER_CORE_RUN_MYSQL_DRIVER_TEST=true cargo test -p bittorrent-tracker-core +``` + +Use `clock::Stopped` (from the `clock` package) in unit tests that need deterministic time. +Use `test-helpers` for mock tracker servers in integration tests. + +## Key Dependency Notes + +- `swarm-coordination-registry` is the authoritative store for peer swarms; `tracker-core` + delegates peer lookups to it. +- `configuration` is the only package that reads from the filesystem or environment at startup; + other packages receive config structs as arguments. +- `located-error` wraps any `std::error::Error` — use it at module boundaries to preserve + error origin context without losing the original error type. +- `events` provides the only sanctioned inter-package async communication channel; avoid direct + `tokio::sync` coupling between packages. diff --git a/packages/axum-health-check-api-server/Cargo.toml b/packages/axum-health-check-api-server/Cargo.toml index e24e609bf..cf9d8d9a3 100644 --- a/packages/axum-health-check-api-server/Cargo.toml +++ b/packages/axum-health-check-api-server/Cargo.toml @@ -4,7 +4,7 @@ description = "The Torrust Bittorrent HTTP tracker." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "bittorrent", "healthcheck", "http", "server", "torrust", "tracker"] +keywords = [ "axum", "bittorrent", "healthcheck", "http", "server", "torrust", "tracker" ] license.workspace = true name = "torrust-axum-health-check-api-server" publish.workspace = true @@ -14,25 +14,27 @@ rust-version.workspace = true version.workspace = true [dependencies] -axum = { version = "0", features = ["macros"] } -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +axum = { version = "0", features = [ "macros" ] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } futures = "0" hyper = "1" -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } -tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +tower-http = { version = "0", features = [ "compression-full", "cors", "propagate-header", "request-id", "trace" ] } tracing = "0" +url = "2.5.4" [dev-dependencies] -reqwest = { version = "0", features = ["json"] } +reqwest = { version = "0", features = [ "json" ] } torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "../axum-health-check-api-server" } torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "../axum-http-tracker-server" } torrust-axum-rest-tracker-api-server = { version = "3.0.0-develop", path = "../axum-rest-tracker-api-server" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } -tracing-subscriber = { version = "0", features = ["json"] } +tracing-subscriber = { version = "0", features = [ "json" ] } diff --git a/packages/axum-health-check-api-server/src/handlers.rs b/packages/axum-health-check-api-server/src/handlers.rs index 0af2ab05d..a26c901d7 100644 --- a/packages/axum-health-check-api-server/src/handlers.rs +++ b/packages/axum-health-check-api-server/src/handlers.rs @@ -31,8 +31,10 @@ pub(crate) async fn health_check_handler(State(register): State<ServiceRegistry> let jobs = checks.drain(..).map(|c| { tokio::spawn(async move { CheckReport { - binding: c.binding, + service_binding: c.service_binding.url(), + binding: c.service_binding.bind_address(), info: c.info.clone(), + service_type: c.service_type, result: c.job.await.expect("it should be able to join into the checking function"), } }) diff --git a/packages/axum-health-check-api-server/src/resources.rs b/packages/axum-health-check-api-server/src/resources.rs index 3302fb966..44e64b24c 100644 --- a/packages/axum-health-check-api-server/src/resources.rs +++ b/packages/axum-health-check-api-server/src/resources.rs @@ -1,6 +1,7 @@ use std::net::SocketAddr; use serde::{Deserialize, Serialize}; +use url::Url; #[derive(Copy, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub enum Status { @@ -11,7 +12,9 @@ pub enum Status { #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct CheckReport { + pub service_binding: Url, pub binding: SocketAddr, + pub service_type: String, pub info: String, pub result: Result<String, String>, } diff --git a/packages/axum-health-check-api-server/src/server.rs b/packages/axum-health-check-api-server/src/server.rs index 733fec3a0..a371f146e 100644 --- a/packages/axum-health-check-api-server/src/server.rs +++ b/packages/axum-health-check-api-server/src/server.rs @@ -18,6 +18,7 @@ use torrust_axum_server::signals::graceful_shutdown; use torrust_server_lib::logging::Latency; use torrust_server_lib::registar::ServiceRegistry; use torrust_server_lib::signals::{Halted, Started}; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use tower_http::classify::ServerErrorsFailureClass; use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; @@ -100,7 +101,12 @@ pub fn start( .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)); let socket = std::net::TcpListener::bind(bind_to).expect("Could not bind tcp_listener to address."); + socket + .set_nonblocking(true) + .expect("Failed to set socket to non-blocking mode"); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); + let protocol = Protocol::HTTP; // The health check API only supports HTTP directly now. Use a reverse proxy for HTTPS. + let service_binding = ServiceBinding::new(protocol.clone(), address).expect("Service binding creation failed"); let handle = Handle::new(); @@ -110,14 +116,19 @@ pub fn start( handle.clone(), rx_halt, format!("Shutting down http server on socket address: {address}"), + address, )); let running = axum_server::from_tcp(socket) + .expect("Failed to create server from TCP socket") .handle(handle) .serve(router.into_make_service_with_connect_info::<SocketAddr>()); - tx.send(Started { address }) - .expect("the Health Check API server should not be dropped"); + tx.send(Started { + service_binding, + address, + }) + .expect("the Health Check API server should not be dropped"); running } diff --git a/packages/axum-health-check-api-server/tests/server/contract.rs b/packages/axum-health-check-api-server/tests/server/contract.rs index 0e0d26b83..af1c0cff9 100644 --- a/packages/axum-health-check-api-server/tests/server/contract.rs +++ b/packages/axum-health-check-api-server/tests/server/contract.rs @@ -119,11 +119,8 @@ mod api { assert_eq!(details.binding, binding); assert!( - details - .result - .as_ref() - .is_err_and(|e| e.contains("error sending request for url")), - "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", + details.result.as_ref().is_err_and(|e| e.contains("error sending request")), + "Expected to contain, \"error sending request\", but have message \"{:?}\".", details.result ); assert_eq!( @@ -205,6 +202,9 @@ mod http { service.server.stop().await.expect("it should stop udp server"); + // Give the OS a moment to fully release the TCP port after the server stops. + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + { let config = configuration.health_check_api.clone(); let env = Started::new(&config.into(), registar).await; @@ -226,11 +226,8 @@ mod http { assert_eq!(details.binding, binding); assert!( - details - .result - .as_ref() - .is_err_and(|e| e.contains("error sending request for url")), - "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", + details.result.as_ref().is_err_and(|e| e.contains("error sending request")), + "Expected to contain, \"error sending request\", but have message \"{:?}\".", details.result ); assert_eq!( diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index 0c64ee986..88d073527 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -4,7 +4,7 @@ description = "The Torrust Bittorrent HTTP tracker." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "bittorrent", "http", "server", "torrust", "tracker"] +keywords = [ "axum", "bittorrent", "http", "server", "torrust", "tracker" ] license.workspace = true name = "torrust-axum-http-tracker-server" publish.workspace = true @@ -15,26 +15,28 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" -axum = { version = "0", features = ["macros"] } +axum = { version = "0", features = [ "macros" ] } axum-client-ip = "0" -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-tracker-core" } bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "../http-protocol" } bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } futures = "0" hyper = "1" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } +tokio-util = "0.7.15" torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -tower = { version = "0", features = ["timeout"] } -tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +tower = { version = "0", features = [ "timeout" ] } +tower-http = { version = "0", features = [ "compression-full", "cors", "propagate-header", "request-id", "trace" ] } tracing = "0" [dev-dependencies] @@ -45,6 +47,7 @@ serde_bencode = "0" serde_bytes = "0" serde_repr = "0" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } -uuid = { version = "1", features = ["v4"] } +uuid = { version = "1", features = [ "v4" ] } zerocopy = "0.7" diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 81f0a1ef3..616973a0f 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -1,13 +1,17 @@ use std::sync::Arc; use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; +use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::container::TrackerCoreContainer; use futures::executor::block_on; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::server::{HttpServer, Launcher, Running, Stopped}; @@ -17,16 +21,18 @@ pub struct Environment<S> { pub container: Arc<EnvContainer>, pub registar: Registar, pub server: HttpServer<S>, + pub event_listener_job: Option<JoinHandle<()>>, + pub cancellation_token: CancellationToken, } impl<S> Environment<S> { /// Add a torrent to the tracker - pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _number_of_downloads_increased = self - .container + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + self.container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None); + .handle_announcement(info_hash, peer, None) + .await; } } @@ -54,22 +60,38 @@ impl Environment<Stopped> { container, registar: Registar::default(), server, + event_listener_job: None, + cancellation_token: CancellationToken::new(), } } + /// Starts the test environment and return a running environment. + /// /// # Panics /// /// Will panic if the server fails to start. #[allow(dead_code)] pub async fn start(self) -> Environment<Running> { + // Start the event listener + let event_listener_job = run_event_listener( + self.container.http_tracker_core_container.event_bus.receiver(), + self.cancellation_token.clone(), + &self.container.http_tracker_core_container.stats_repository, + ); + + // Start the server + let server = self + .server + .start(self.container.http_tracker_core_container.clone(), self.registar.give_form()) + .await + .expect("Failed to start the HTTP tracker server"); + Environment { container: self.container.clone(), registar: self.registar.clone(), - server: self - .server - .start(self.container.http_tracker_core_container.clone(), self.registar.give_form()) - .await - .unwrap(), + server, + event_listener_job: Some(event_listener_job), + cancellation_token: self.cancellation_token, } } } @@ -79,14 +101,28 @@ impl Environment<Running> { Environment::<Stopped>::new(configuration).start().await } + /// Stops the test environment and return a stopped environment. + /// /// # Panics /// /// Will panic if the server fails to stop. pub async fn stop(self) -> Environment<Stopped> { + // Stop the event listener + if let Some(event_listener_job) = self.event_listener_job { + // todo: send a message to the event listener to stop and wait for + // it to finish + event_listener_job.abort(); + } + + // Stop the server + let server = self.server.stop().await.expect("Failed to stop the HTTP tracker server"); + Environment { container: self.container, registar: Registar::default(), - server: self.server.stop().await.unwrap(), + server, + event_listener_job: None, + cancellation_token: self.cancellation_token, } } @@ -114,8 +150,17 @@ impl EnvContainer { .expect("missing HTTP tracker configuration"); let http_tracker_config = Arc::new(http_tracker_config[0].clone()); - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); - let http_tracker_container = HttpTrackerCoreContainer::initialize_from(&tracker_core_container, &http_tracker_config); + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + configuration.core.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &swarm_coordination_registry_container, + )); + + let http_tracker_container = + HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &http_tracker_config); Self { tracker_core_container, diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index ea8003a4f..69f9cb72e 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -13,11 +13,13 @@ use torrust_axum_server::signals::graceful_shutdown; use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use torrust_server_lib::signals::{Halted, Started}; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use tracing::instrument; use super::v1::routes::router; use crate::HTTP_TRACKER_LOG_TARGET; +const TYPE_STRING: &str = "http_tracker"; /// Error that can occur when starting or stopping the HTTP server. /// /// Some errors triggered while starting the server are: @@ -45,11 +47,14 @@ impl Launcher { #[instrument(skip(self, http_tracker_container, tx_start, rx_halt))] fn start( &self, - http_tracker_container: Arc<HttpTrackerCoreContainer>, + http_tracker_container: &Arc<HttpTrackerCoreContainer>, tx_start: Sender<Started>, rx_halt: Receiver<Halted>, ) -> BoxFuture<'static, ()> { let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); + socket + .set_nonblocking(true) + .expect("Failed to set socket to non-blocking mode"); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); let handle = Handle::new(); @@ -58,18 +63,21 @@ impl Launcher { handle.clone(), rx_halt, format!("Shutting down HTTP server on socket address: {address}"), + address, )); let tls = self.tls.clone(); - let protocol = if tls.is_some() { "https" } else { "http" }; + let protocol = if tls.is_some() { Protocol::HTTPS } else { Protocol::HTTP }; + let service_binding = ServiceBinding::new(protocol.clone(), address).expect("Service binding creation failed"); - tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{}", address); + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{address}"); - let app = router(http_tracker_container, address); + let app = router(http_tracker_container, &service_binding); let running = Box::pin(async { match tls { Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) + .expect("Failed to create server from TCP socket with TLS") .handle(handle) // The TimeoutAcceptor is commented because TSL does not work with it. // See: https://github.com/torrust/torrust-index/issues/204#issuecomment-2115529214 @@ -78,6 +86,7 @@ impl Launcher { .await .expect("Axum server crashed."), None => custom_axum_server::from_tcp_with_timeouts(socket) + .expect("Failed to create server from TCP socket") .handle(handle) .acceptor(TimeoutAcceptor) .serve(app.into_make_service_with_connect_info::<std::net::SocketAddr>()) @@ -89,7 +98,10 @@ impl Launcher { tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", address); tx_start - .send(Started { address }) + .send(Started { + service_binding, + address, + }) .expect("the HTTP(s) Tracker service should not be dropped"); running @@ -169,16 +181,19 @@ impl HttpServer<Stopped> { let launcher = self.state.launcher; let task = tokio::spawn(async move { - let server = launcher.start(http_tracker_container, tx_start, rx_halt); + let server = launcher.start(&http_tracker_container, tx_start, rx_halt); server.await; launcher }); - let binding = rx_start.await.expect("it should be able to start the service").address; + let started = rx_start.await.expect("it should be able to start the service"); + + let listen_url = started.service_binding; + let binding = started.address; - form.send(ServiceRegistration::new(binding, check_fn)) + form.send(ServiceRegistration::new(listen_url, check_fn)) .expect("it should be able to send service registration"); Ok(HttpServer { @@ -219,8 +234,8 @@ impl HttpServer<Running> { /// This function will return an error if unable to connect. /// Or if the request returns an error. #[must_use] -pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { - let url = format!("http://{binding}/health_check"); // DevSkim: ignore DS137138 +pub fn check_fn(service_binding: &ServiceBinding) -> ServiceHealthCheckJob { + let url = format!("http://{}/health_check", service_binding.bind_address()); // DevSkim: ignore DS137138 let info = format!("checking http tracker health check at: {url}"); @@ -231,7 +246,7 @@ pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { } }); - ServiceHealthCheckJob::new(*binding, info, job) + ServiceHealthCheckJob::new(service_binding.clone(), info, TYPE_STRING.to_string(), job) } #[cfg(test)] @@ -239,25 +254,25 @@ mod tests { use std::sync::Arc; use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; + use bittorrent_http_tracker_core::event::bus::EventBus; + use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::services::announce::AnnounceService; use bittorrent_http_tracker_core::services::scrape::ScrapeService; - use bittorrent_tracker_core::announce_handler::AnnounceHandler; - use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; - use bittorrent_tracker_core::authentication::service; - use bittorrent_tracker_core::databases::setup::initialize_database; - use bittorrent_tracker_core::scrape_handler::ScrapeHandler; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; - use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; + use bittorrent_http_tracker_core::statistics::repository::Repository; + use bittorrent_tracker_core::container::TrackerCoreContainer; + use tokio_util::sync::CancellationToken; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; + use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::server::{HttpServer, Launcher}; pub fn initialize_container(configuration: &Configuration) -> HttpTrackerCoreContainer { + let cancellation_token = CancellationToken::new(); + let core_config = Arc::new(configuration.core.clone()); let http_trackers = configuration @@ -269,57 +284,50 @@ mod tests { let http_tracker_config = Arc::new(http_tracker_config.clone()); - // HTTP stats - let (http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); - let http_stats_repository = Arc::new(http_stats_repository); - - let database = initialize_database(&configuration.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&configuration.core, &in_memory_whitelist.clone())); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::AuthenticationService::new( - &configuration.core, - &in_memory_key_repository, + // HTTP core stats + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_event_bus = Arc::new(EventBus::new( + configuration.core.tracker_usage_statistics.into(), + http_core_broadcaster.clone(), )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - - let announce_handler = Arc::new(AnnounceHandler::new( - &configuration.core, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, + + let http_stats_event_sender = http_stats_event_bus.sender(); + + if configuration.core.tracker_usage_statistics { + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); + } + + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + configuration.core.tracker_usage_statistics.into(), )); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &swarm_coordination_registry_container, + )); let announce_service = Arc::new(AnnounceService::new( - core_config.clone(), - announce_handler.clone(), - authentication_service.clone(), - whitelist_authorization.clone(), + tracker_core_container.core_config.clone(), + tracker_core_container.announce_handler.clone(), + tracker_core_container.authentication_service.clone(), + tracker_core_container.whitelist_authorization.clone(), http_stats_event_sender.clone(), )); let scrape_service = Arc::new(ScrapeService::new( - core_config.clone(), - scrape_handler.clone(), - authentication_service.clone(), + tracker_core_container.core_config.clone(), + tracker_core_container.scrape_handler.clone(), + tracker_core_container.authentication_service.clone(), http_stats_event_sender.clone(), )); HttpTrackerCoreContainer { - core_config, - announce_handler, - scrape_handler, - whitelist_authorization, - authentication_service, - + tracker_core_container, http_tracker_config, - http_stats_event_sender, - http_stats_repository, + event_bus: http_stats_event_bus, + stats_event_sender: http_stats_event_sender, + stats_repository: http_stats_repository, announce_service, scrape_service, } diff --git a/packages/axum-http-tracker-server/src/v1/extractors/client_ip_sources.rs b/packages/axum-http-tracker-server/src/v1/extractors/client_ip_sources.rs index 8c7a2bf40..ed568e0b9 100644 --- a/packages/axum-http-tracker-server/src/v1/extractors/client_ip_sources.rs +++ b/packages/axum-http-tracker-server/src/v1/extractors/client_ip_sources.rs @@ -63,13 +63,13 @@ where }; let connection_info_ip = match ConnectInfo::<SocketAddr>::from_request_parts(parts, state).await { - Ok(connection_info_socket_addr) => Some(connection_info_socket_addr.0.ip()), + Ok(connection_info_socket_addr) => Some(connection_info_socket_addr.0), Err(_) => None, }; Ok(Extract(ClientIpSources { right_most_x_forwarded_for, - connection_info_ip, + connection_info_socket_address: connection_info_ip, })) } } diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 6c2e4b713..ce718cd30 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -13,6 +13,7 @@ use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSo use bittorrent_tracker_core::authentication::Key; use hyper::StatusCode; use torrust_tracker_primitives::core::AnnounceData; +use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::v1::extractors::announce_request::ExtractRequest; use crate::v1::extractors::authentication_key::Extract as ExtractKey; @@ -22,27 +23,27 @@ use crate::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; /// authentication (no PATH `key` parameter required). #[allow(clippy::unused_async)] pub async fn handle_without_key( - State(state): State<Arc<AnnounceService>>, + State(state): State<(Arc<AnnounceService>, ServiceBinding)>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { tracing::debug!("http announce request: {:#?}", announce_request); - handle(&state, &announce_request, &client_ip_sources, None).await + handle(&state.0, &announce_request, &client_ip_sources, &state.1, None).await } /// It handles the `announce` request when the HTTP tracker requires /// authentication (PATH `key` parameter required). #[allow(clippy::unused_async)] pub async fn handle_with_key( - State(state): State<Arc<AnnounceService>>, + State(state): State<(Arc<AnnounceService>, ServiceBinding)>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, ) -> Response { tracing::debug!("http announce request: {:#?}", announce_request); - handle(&state, &announce_request, &client_ip_sources, Some(key)).await + handle(&state.0, &announce_request, &client_ip_sources, &state.1, Some(key)).await } /// It handles the `announce` request. @@ -53,9 +54,18 @@ async fn handle( announce_service: &Arc<AnnounceService>, announce_request: &Announce, client_ip_sources: &ClientIpSources, + server_service_binding: &ServiceBinding, maybe_key: Option<Key>, ) -> Response { - let announce_data = match handle_announce(announce_service, announce_request, client_ip_sources, maybe_key).await { + let announce_data = match handle_announce( + announce_service, + announce_request, + client_ip_sources, + server_service_binding, + maybe_key, + ) + .await + { Ok(announce_data) => announce_data, Err(error) => { let error_response = responses::error::Error { @@ -71,10 +81,11 @@ async fn handle_announce( announce_service: &Arc<AnnounceService>, announce_request: &Announce, client_ip_sources: &ClientIpSources, + server_service_binding: &ServiceBinding, maybe_key: Option<Key>, ) -> Result<AnnounceData, HttpAnnounceError> { announce_service - .handle_announce(announce_request, client_ip_sources, maybe_key) + .handle_announce(announce_request, client_ip_sources, server_service_binding, maybe_key) .await } @@ -96,7 +107,11 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::PeerId; + use bittorrent_http_tracker_core::event::bus::EventBus; + use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::services::announce::AnnounceService; + use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; + use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -104,10 +119,11 @@ mod tests { use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; @@ -134,6 +150,9 @@ mod tests { } fn initialize_core_tracker_services(config: &Configuration) -> CoreHttpTrackerServices { + let cancellation_token = CancellationToken::new(); + + // Initialize the core tracker services with the provided configuration. let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -141,19 +160,27 @@ mod tests { let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, + )); + + // HTTP core stats + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_event_bus = Arc::new(EventBus::new( + config.core.tracker_usage_statistics.into(), + http_core_broadcaster.clone(), )); - // HTTP stats - let (http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); - let _http_stats_repository = Arc::new(http_stats_repository); + let http_stats_event_sender = http_stats_event_bus.sender(); + + if config.core.tracker_usage_statistics { + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); + } let announce_service = Arc::new(AnnounceService::new( core_config.clone(), @@ -183,7 +210,7 @@ mod tests { fn sample_client_ip_sources() -> ClientIpSources { ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: None, + connection_info_socket_address: None, } } @@ -196,10 +223,12 @@ mod tests { mod with_tracker_in_private_mode { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_tracker_core::authentication; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_private_tracker, sample_announce_request, sample_client_ip_sources}; use crate::v1::handlers::announce::handle_announce; @@ -209,12 +238,16 @@ mod tests { async fn it_should_fail_when_the_authentication_key_is_missing() { let http_core_tracker_services = initialize_private_tracker(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let maybe_key = None; let response = handle_announce( &http_core_tracker_services.announce_service, &sample_announce_request(), &sample_client_ip_sources(), + &server_service_binding, maybe_key, ) .await @@ -236,12 +269,16 @@ mod tests { let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let maybe_key = Some(unregistered_key); let response = handle_announce( &http_core_tracker_services.announce_service, &sample_announce_request(), &sample_client_ip_sources(), + &server_service_binding, maybe_key, ) .await @@ -260,7 +297,10 @@ mod tests { mod with_tracker_in_listed_mode { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use bittorrent_http_tracker_protocol::v1::responses; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_listed_tracker, sample_announce_request, sample_client_ip_sources}; use crate::v1::handlers::announce::handle_announce; @@ -272,10 +312,14 @@ mod tests { let announce_request = sample_announce_request(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let response = handle_announce( &http_core_tracker_services.announce_service, &announce_request, &sample_client_ip_sources(), + &server_service_binding, None, ) .await @@ -297,8 +341,11 @@ mod tests { mod with_tracker_on_reverse_proxy { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_tracker_on_reverse_proxy, sample_announce_request}; use crate::v1::handlers::announce::handle_announce; @@ -310,13 +357,17 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: None, + connection_info_socket_address: None, }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let response = handle_announce( &http_core_tracker_services.announce_service, &sample_announce_request(), &client_ip_sources, + &server_service_binding, None, ) .await @@ -335,8 +386,11 @@ mod tests { mod with_tracker_not_on_reverse_proxy { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_tracker_not_on_reverse_proxy, sample_announce_request}; use crate::v1::handlers::announce::handle_announce; @@ -348,13 +402,17 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: None, + connection_info_socket_address: None, }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let response = handle_announce( &http_core_tracker_services.announce_service, &sample_announce_request(), &client_ip_sources, + &server_service_binding, None, ) .await diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index ae3a35bd3..bdd4378f3 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -13,6 +13,7 @@ use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSo use bittorrent_tracker_core::authentication::Key; use hyper::StatusCode; use torrust_tracker_primitives::core::ScrapeData; +use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::v1::extractors::authentication_key::Extract as ExtractKey; use crate::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; @@ -22,13 +23,13 @@ use crate::v1::extractors::scrape_request::ExtractRequest; /// to run in `public` mode. #[allow(clippy::unused_async)] pub async fn handle_without_key( - State(state): State<Arc<ScrapeService>>, + State(state): State<(Arc<ScrapeService>, ServiceBinding)>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { tracing::debug!("http scrape request: {:#?}", &scrape_request); - handle(&state, &scrape_request, &client_ip_sources, None).await + handle(&state.0, &scrape_request, &client_ip_sources, &state.1, None).await } /// It handles the `scrape` request when the HTTP tracker is configured @@ -37,24 +38,25 @@ pub async fn handle_without_key( /// In this case, the authentication `key` parameter is required. #[allow(clippy::unused_async)] pub async fn handle_with_key( - State(state): State<Arc<ScrapeService>>, + State(state): State<(Arc<ScrapeService>, ServiceBinding)>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, ) -> Response { tracing::debug!("http scrape request: {:#?}", &scrape_request); - handle(&state, &scrape_request, &client_ip_sources, Some(key)).await + handle(&state.0, &scrape_request, &client_ip_sources, &state.1, Some(key)).await } async fn handle( scrape_service: &Arc<ScrapeService>, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, + server_service_binding: &ServiceBinding, maybe_key: Option<Key>, ) -> Response { let scrape_data = match scrape_service - .handle_scrape(scrape_request, client_ip_sources, maybe_key) + .handle_scrape(scrape_request, client_ip_sources, server_service_binding, maybe_key) .await { Ok(scrape_data) => scrape_data, @@ -77,10 +79,14 @@ fn build_response(scrape_data: ScrapeData) -> Response { #[cfg(test)] mod tests { - use std::net::IpAddr; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; use std::sync::Arc; + use bittorrent_http_tracker_core::event::bus::EventBus; + use bittorrent_http_tracker_core::event::sender::Broadcaster; + use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; + use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -91,6 +97,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; @@ -101,7 +108,7 @@ mod tests { } struct CoreHttpTrackerServices { - pub http_stats_event_sender: Arc<Option<Box<dyn bittorrent_http_tracker_core::statistics::event::sender::Sender>>>, + pub http_stats_event_sender: bittorrent_http_tracker_core::event::sender::Sender, } fn initialize_private_tracker() -> (CoreTrackerServices, CoreHttpTrackerServices) { @@ -121,6 +128,8 @@ mod tests { } fn initialize_core_tracker_services(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { + let cancellation_token = CancellationToken::new(); + let core_config = Arc::new(config.core.clone()); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); @@ -129,10 +138,19 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - // HTTP stats - let (http_stats_event_sender, _http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); + // HTTP core stats + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_event_bus = Arc::new(EventBus::new( + config.core.tracker_usage_statistics.into(), + http_core_broadcaster.clone(), + )); + + let http_stats_event_sender = http_stats_event_bus.sender(); + + if config.core.tracker_usage_statistics { + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); + } ( CoreTrackerServices { @@ -153,7 +171,7 @@ mod tests { fn sample_client_ip_sources() -> ClientIpSources { ClientIpSources { right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), - connection_info_ip: Some(IpAddr::from_str("203.0.113.196").unwrap()), + connection_info_socket_address: Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 8080)), } } @@ -165,16 +183,21 @@ mod tests { } mod with_tracker_in_private_mode { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_tracker_core::authentication; use torrust_tracker_primitives::core::ScrapeData; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_private_tracker, sample_client_ip_sources, sample_scrape_request}; #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let (core_tracker_services, core_http_tracker_services) = initialize_private_tracker(); let scrape_request = sample_scrape_request(); @@ -188,7 +211,12 @@ mod tests { ); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &sample_client_ip_sources(), maybe_key) + .handle_scrape( + &scrape_request, + &sample_client_ip_sources(), + &server_service_binding, + maybe_key, + ) .await .unwrap(); @@ -199,6 +227,9 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let (core_tracker_services, core_http_tracker_services) = initialize_private_tracker(); let scrape_request = sample_scrape_request(); @@ -213,7 +244,12 @@ mod tests { ); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &sample_client_ip_sources(), maybe_key) + .handle_scrape( + &scrape_request, + &sample_client_ip_sources(), + &server_service_binding, + maybe_key, + ) .await .unwrap(); @@ -225,8 +261,11 @@ mod tests { mod with_tracker_in_listed_mode { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use bittorrent_http_tracker_core::services::scrape::ScrapeService; use torrust_tracker_primitives::core::ScrapeData; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_listed_tracker, sample_client_ip_sources, sample_scrape_request}; @@ -236,6 +275,9 @@ mod tests { let scrape_request = sample_scrape_request(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let scrape_service = ScrapeService::new( core_tracker_services.core_config.clone(), core_tracker_services.scrape_handler.clone(), @@ -244,7 +286,7 @@ mod tests { ); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &sample_client_ip_sources(), None) + .handle_scrape(&scrape_request, &sample_client_ip_sources(), &server_service_binding, None) .await .unwrap(); @@ -256,9 +298,12 @@ mod tests { mod with_tracker_on_reverse_proxy { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_tracker_on_reverse_proxy, sample_scrape_request}; use crate::v1::handlers::scrape::tests::assert_error_response; @@ -269,9 +314,12 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: None, + connection_info_socket_address: None, }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let scrape_service = ScrapeService::new( core_tracker_services.core_config.clone(), core_tracker_services.scrape_handler.clone(), @@ -280,7 +328,7 @@ mod tests { ); let response = scrape_service - .handle_scrape(&sample_scrape_request(), &client_ip_sources, None) + .handle_scrape(&sample_scrape_request(), &client_ip_sources, &server_service_binding, None) .await .unwrap_err(); @@ -297,9 +345,12 @@ mod tests { mod with_tracker_not_on_reverse_proxy { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_tracker_not_on_reverse_proxy, sample_scrape_request}; use crate::v1::handlers::scrape::tests::assert_error_response; @@ -310,9 +361,12 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: None, + connection_info_socket_address: None, }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let scrape_service = ScrapeService::new( core_tracker_services.core_config.clone(), core_tracker_services.scrape_handler.clone(), @@ -321,7 +375,7 @@ mod tests { ); let response = scrape_service - .handle_scrape(&sample_scrape_request(), &client_ip_sources, None) + .handle_scrape(&sample_scrape_request(), &client_ip_sources, &server_service_binding, None) .await .unwrap_err(); diff --git a/packages/axum-http-tracker-server/src/v1/routes.rs b/packages/axum-http-tracker-server/src/v1/routes.rs index 5f666e9d4..df395cd9a 100644 --- a/packages/axum-http-tracker-server/src/v1/routes.rs +++ b/packages/axum-http-tracker-server/src/v1/routes.rs @@ -1,5 +1,4 @@ //! HTTP server routes for version `v1`. -use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; @@ -13,6 +12,7 @@ use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use hyper::{Request, StatusCode}; use torrust_server_lib::logging::Latency; use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; use tower_http::classify::ServerErrorsFailureClass; @@ -30,28 +30,38 @@ use crate::HTTP_TRACKER_LOG_TARGET; /// /// > **NOTICE**: it's added a layer to get the client IP from the connection /// > info. The tracker could use the connection info to get the client IP. -#[instrument(skip(http_tracker_container, server_socket_addr))] -pub fn router(http_tracker_container: Arc<HttpTrackerCoreContainer>, server_socket_addr: SocketAddr) -> Router { +#[instrument(skip(http_tracker_container, server_service_binding))] +pub fn router(http_tracker_container: &Arc<HttpTrackerCoreContainer>, server_service_binding: &ServiceBinding) -> Router { + let server_socket_addr = server_service_binding.bind_address(); + Router::new() // Health check .route("/health_check", get(health_check::handler)) // Announce request .route( "/announce", - get(announce::handle_without_key).with_state(http_tracker_container.announce_service.clone()), + get(announce::handle_without_key).with_state(( + http_tracker_container.announce_service.clone(), + server_service_binding.clone(), + )), ) .route( "/announce/{key}", - get(announce::handle_with_key).with_state(http_tracker_container.announce_service.clone()), + get(announce::handle_with_key).with_state(( + http_tracker_container.announce_service.clone(), + server_service_binding.clone(), + )), ) // Scrape request .route( "/scrape", - get(scrape::handle_without_key).with_state(http_tracker_container.scrape_service.clone()), + get(scrape::handle_without_key) + .with_state((http_tracker_container.scrape_service.clone(), server_service_binding.clone())), ) .route( "/scrape/{key}", - get(scrape::handle_with_key).with_state(http_tracker_container.scrape_service.clone()), + get(scrape::handle_with_key) + .with_state((http_tracker_container.scrape_service.clone(), server_service_binding.clone())), ) // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) diff --git a/packages/axum-http-tracker-server/tests/server/asserts.rs b/packages/axum-http-tracker-server/tests/server/asserts.rs index 7ab8d93e5..a82014e16 100644 --- a/packages/axum-http-tracker-server/tests/server/asserts.rs +++ b/packages/axum-http-tracker-server/tests/server/asserts.rs @@ -22,6 +22,7 @@ pub fn assert_bencoded_error(response_text: &String, expected_failure_reason: &s ); } +#[allow(dead_code)] pub async fn assert_empty_announce_response(response: Response) { assert_eq!(response.status(), 200); let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); diff --git a/packages/axum-http-tracker-server/tests/server/requests/announce.rs b/packages/axum-http-tracker-server/tests/server/requests/announce.rs index 0775de7e4..5a670b618 100644 --- a/packages/axum-http-tracker-server/tests/server/requests/announce.rs +++ b/packages/axum-http-tracker-server/tests/server/requests/announce.rs @@ -126,6 +126,11 @@ impl QueryBuilder { self } + pub fn with_port(mut self, port: u16) -> Self { + self.announce_query.port = port; + self + } + pub fn without_compact(mut self) -> Self { self.announce_query.compact = None; self diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index ad5b5a482..85792f922 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -105,8 +105,8 @@ mod for_all_config_modes { use crate::common::fixtures::invalid_info_hashes; use crate::server::asserts::{ assert_announce_response, assert_bad_announce_request_error_response, assert_cannot_parse_query_param_error_response, - assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_empty_announce_response, - assert_is_announce_response, assert_missing_query_params_for_announce_request_error_response, + assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_is_announce_response, + assert_missing_query_params_for_announce_request_error_response, }; use crate::server::client::Client; use crate::server::requests::announce::{Compact, QueryBuilder}; @@ -474,7 +474,7 @@ mod for_all_config_modes { let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer); + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2. This new peer is non included on the response peer list let response = Client::new(*env.bind_address()) @@ -517,7 +517,7 @@ mod for_all_config_modes { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) .build(); - env.add_torrent_peer(&info_hash, &peer_using_ipv4); + env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; // Announce a peer using IPV6 let peer_using_ipv6 = PeerBuilder::default() @@ -527,7 +527,7 @@ mod for_all_config_modes { 8080, )) .build(); - env.add_torrent_peer(&info_hash, &peer_using_ipv6); + env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; // Announce the new Peer. let response = Client::new(*env.bind_address()) @@ -559,7 +559,8 @@ mod for_all_config_modes { } #[tokio::test] - async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_socket_address_even_if_the_peer_id_is_different( + ) { logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -567,19 +568,44 @@ mod for_all_config_modes { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 let peer = PeerBuilder::default().build(); - // Add a peer - env.add_torrent_peer(&info_hash, &peer); - - let announce_query = QueryBuilder::default() + let announce_query_1 = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer.peer_id) + .with_peer_addr(&peer.peer_addr.ip()) + .with_port(peer.peer_addr.port()) .query(); - assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); + let announce_query_2 = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&PeerId(*b"-qB00000000000000002")) // Different peer ID + .with_peer_addr(&peer.peer_addr.ip()) + .with_port(peer.peer_addr.port()) + .query(); + + // Same peer socket address + assert_eq!(announce_query_1.peer_addr, announce_query_2.peer_addr); + assert_eq!(announce_query_1.port, announce_query_2.port); + + // Different peer ID + assert_ne!(announce_query_1.peer_id, announce_query_2.peer_id); - let response = Client::new(*env.bind_address()).announce(&announce_query).await; + let _response = Client::new(*env.bind_address()).announce(&announce_query_1).await; + let response = Client::new(*env.bind_address()).announce(&announce_query_2).await; - assert_empty_announce_response(response).await; + let announce_policy = env.container.tracker_core_container.core_config.announce_policy; + + // The response should contain only the first peer. + assert_announce_response( + response, + &Announce { + complete: 1, + incomplete: 0, + interval: announce_policy.interval, + min_interval: announce_policy.interval_min, + peers: vec![], + }, + ) + .await; env.stop().await; } @@ -599,7 +625,7 @@ mod for_all_config_modes { let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer); + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 accepting compact responses let response = Client::new(*env.bind_address()) @@ -640,7 +666,7 @@ mod for_all_config_modes { let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer); + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list @@ -676,14 +702,9 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env - .container - .http_tracker_core_container - .http_stats_repository - .get_stats() - .await; + let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp4_announces_handled, 1); + assert_eq!(stats.tcp4_announces_handled(), 1); drop(stats); @@ -707,14 +728,9 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env - .container - .http_tracker_core_container - .http_stats_repository - .get_stats() - .await; + let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp6_announces_handled, 1); + assert_eq!(stats.tcp6_announces_handled(), 1); drop(stats); @@ -732,19 +748,14 @@ mod for_all_config_modes { Client::new(*env.bind_address()) .announce( &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .with_peer_addr(&IpAddr::V6(Ipv6Addr::LOCALHOST)) .query(), ) .await; - let stats = env - .container - .http_tracker_core_container - .http_stats_repository - .get_stats() - .await; + let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp6_announces_handled, 0); + assert_eq!(stats.tcp6_announces_handled(), 0); drop(stats); @@ -776,7 +787,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -818,7 +830,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!( @@ -867,7 +880,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!( @@ -914,7 +928,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); @@ -997,9 +1012,10 @@ mod for_all_config_modes { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1037,9 +1053,10 @@ mod for_all_config_modes { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_no_bytes_pending_to_download() + .with_no_bytes_left_to_download() .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1130,14 +1147,9 @@ mod for_all_config_modes { ) .await; - let stats = env - .container - .http_tracker_core_container - .http_stats_repository - .get_stats() - .await; + let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp4_scrapes_handled, 1); + assert_eq!(stats.tcp4_scrapes_handled(), 1); drop(stats); @@ -1167,14 +1179,9 @@ mod for_all_config_modes { ) .await; - let stats = env - .container - .http_tracker_core_container - .http_stats_repository - .get_stats() - .await; + let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp6_scrapes_handled, 1); + assert_eq!(stats.tcp6_scrapes_handled(), 1); drop(stats); @@ -1279,9 +1286,10 @@ mod configured_as_whitelisted { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1315,9 +1323,10 @@ mod configured_as_whitelisted { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), - ); + ) + .await; env.container .tracker_core_container @@ -1491,9 +1500,10 @@ mod configured_as_private { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1522,9 +1532,10 @@ mod configured_as_private { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), - ); + ) + .await; let expiring_key = env .container @@ -1573,9 +1584,10 @@ mod configured_as_private { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), - ); + ) + .await; let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); diff --git a/packages/axum-rest-tracker-api-server/Cargo.toml b/packages/axum-rest-tracker-api-server/Cargo.toml index 9c0d2bc2f..7353e66e8 100644 --- a/packages/axum-rest-tracker-api-server/Cargo.toml +++ b/packages/axum-rest-tracker-api-server/Cargo.toml @@ -4,7 +4,7 @@ description = "The Torrust Tracker API." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "bittorrent", "http", "server", "torrust", "tracker"] +keywords = [ "axum", "bittorrent", "http", "server", "torrust", "tracker" ] license.workspace = true name = "torrust-axum-rest-tracker-api-server" publish.workspace = true @@ -15,38 +15,41 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" -axum = { version = "0", features = ["macros"] } -axum-extra = { version = "0", features = ["query"] } -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +axum = { version = "0", features = [ "macros" ] } +axum-extra = { version = "0", features = [ "query" ] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-tracker-core" } bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } futures = "0" hyper = "1" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } -serde_with = { version = "3", features = ["json"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } +serde_with = { version = "3", features = [ "json" ] } thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "../rest-tracker-api-client" } torrust-rest-tracker-api-core = { version = "3.0.0-develop", path = "../rest-tracker-api-core" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } -tower = { version = "0", features = ["timeout"] } -tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +tower = { version = "0", features = [ "timeout" ] } +tower-http = { version = "0", features = [ "compression-full", "cors", "propagate-header", "request-id", "trace" ] } tracing = "0" +url = "2" [dev-dependencies] local-ip-address = "0" mockall = "0" torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "../rest-tracker-api-client" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } -url = { version = "2", features = ["serde"] } -uuid = { version = "1", features = ["v4"] } +url = { version = "2", features = [ "serde" ] } +uuid = { version = "1", features = [ "v4" ] } diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index c2d89e064..cddb45277 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -12,6 +12,7 @@ use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use crate::server::{ApiServer, Launcher, Running, Stopped}; @@ -32,12 +33,12 @@ where S: std::fmt::Debug + std::fmt::Display, { /// Add a torrent to the tracker - pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _number_of_downloads_increased = self - .container + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + self.container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None); + .handle_announcement(info_hash, peer, None) + .await; } } @@ -172,13 +173,25 @@ impl EnvContainer { .clone(), ); - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &swarm_coordination_registry_container, + )); + let http_tracker_core_container = - HttpTrackerCoreContainer::initialize_from(&tracker_core_container, &http_tracker_config); - let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from(&tracker_core_container, &udp_tracker_config); + HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &http_tracker_config); + + let udp_tracker_core_container = + UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); let tracker_http_api_core_container = TrackerHttpApiCoreContainer::initialize_from( + &swarm_coordination_registry_container, &tracker_core_container, &http_tracker_core_container, &udp_tracker_core_container, diff --git a/packages/axum-rest-tracker-api-server/src/routes.rs b/packages/axum-rest-tracker-api-server/src/routes.rs index c18451c89..78b7818d9 100644 --- a/packages/axum-rest-tracker-api-server/src/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/routes.rs @@ -36,7 +36,7 @@ use crate::API_LOG_TARGET; /// Add all API routes to the router. #[instrument(skip(http_api_container, access_tokens))] pub fn router( - http_api_container: Arc<TrackerHttpApiCoreContainer>, + http_api_container: &Arc<TrackerHttpApiCoreContainer>, access_tokens: Arc<AccessTokens>, server_socket_addr: SocketAddr, ) -> Router { @@ -44,7 +44,7 @@ pub fn router( let api_url_prefix = "/api"; - let router = v1::routes::add(api_url_prefix, router, &http_api_container); + let router = v1::routes::add(api_url_prefix, router, http_api_container); let state = State { access_tokens }; diff --git a/packages/axum-rest-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs index fd8f92944..9eef6b71a 100644 --- a/packages/axum-rest-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -40,11 +40,14 @@ use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use torrust_server_lib::signals::{Halted, Started}; use torrust_tracker_configuration::AccessTokens; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use tracing::{instrument, Level}; use super::routes::router; use crate::API_LOG_TARGET; +const TYPE_STRING: &str = "tracker_rest_api"; + /// Errors that can occur when starting or stopping the API server. #[derive(Debug, Error)] pub enum Error { @@ -137,7 +140,7 @@ impl ApiServer<Stopped> { let task = tokio::spawn(async move { tracing::debug!(target: API_LOG_TARGET, "Starting with launcher in spawned task ..."); - let _task = launcher.start(http_api_container, access_tokens, tx_start, rx_halt).await; + let _task = launcher.start(&http_api_container, access_tokens, tx_start, rx_halt).await; tracing::debug!(target: API_LOG_TARGET, "Started with launcher in spawned task"); @@ -146,7 +149,7 @@ impl ApiServer<Stopped> { let api_server = match rx_start.await { Ok(started) => { - form.send(ServiceRegistration::new(started.address, check_fn)) + form.send(ServiceRegistration::new(started.service_binding, check_fn)) .expect("it should be able to send service registration"); ApiServer { @@ -193,8 +196,8 @@ impl ApiServer<Running> { /// Or if there request returns an error code. #[must_use] #[instrument(skip())] -pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { - let url = format!("http://{binding}/api/health_check"); // DevSkim: ignore DS137138 +pub fn check_fn(service_binding: &ServiceBinding) -> ServiceHealthCheckJob { + let url = format!("http://{}/api/health_check", service_binding.bind_address()); // DevSkim: ignore DS137138 let info = format!("checking api health check at: {url}"); @@ -204,7 +207,7 @@ pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { Err(err) => Err(err.to_string()), } }); - ServiceHealthCheckJob::new(*binding, info, job) + ServiceHealthCheckJob::new(service_binding.clone(), info, TYPE_STRING.to_string(), job) } /// A struct responsible for starting the API server. @@ -217,9 +220,9 @@ pub struct Launcher { impl std::fmt::Display for Launcher { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if self.tls.is_some() { - write!(f, "(with socket): {}, using TLS", self.bind_to,) + write!(f, "(with socket): {}, using TLS", self.bind_to) } else { - write!(f, "(with socket): {}, without TLS", self.bind_to,) + write!(f, "(with socket): {}, without TLS", self.bind_to) } } } @@ -238,12 +241,15 @@ impl Launcher { #[instrument(skip(self, http_api_container, access_tokens, tx_start, rx_halt))] pub fn start( &self, - http_api_container: Arc<TrackerHttpApiCoreContainer>, + http_api_container: &Arc<TrackerHttpApiCoreContainer>, access_tokens: Arc<AccessTokens>, tx_start: Sender<Started>, rx_halt: Receiver<Halted>, ) -> BoxFuture<'static, ()> { let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); + socket + .set_nonblocking(true) + .expect("Failed to set socket to non-blocking mode"); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); let router = router(http_api_container, access_tokens, address); @@ -254,16 +260,19 @@ impl Launcher { handle.clone(), rx_halt, format!("Shutting down tracker API server on socket address: {address}"), + address, )); let tls = self.tls.clone(); - let protocol = if tls.is_some() { "https" } else { "http" }; + let protocol = if tls.is_some() { Protocol::HTTPS } else { Protocol::HTTP }; + let service_binding = ServiceBinding::new(protocol.clone(), address).expect("Service binding creation failed"); - tracing::info!(target: API_LOG_TARGET, "Starting on {protocol}://{}", address); + tracing::info!(target: API_LOG_TARGET, "Starting on: {protocol}://{address}"); let running = Box::pin(async { match tls { Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) + .expect("Failed to create server from TCP socket with TLS") .handle(handle) // The TimeoutAcceptor is commented because TSL does not work with it. // See: https://github.com/torrust/torrust-index/issues/204#issuecomment-2115529214 @@ -272,6 +281,7 @@ impl Launcher { .await .expect("Axum server for tracker API crashed."), None => custom_axum_server::from_tcp_with_timeouts(socket) + .expect("Failed to create server from TCP socket") .handle(handle) .acceptor(TimeoutAcceptor) .serve(router.into_make_service_with_connect_info::<std::net::SocketAddr>()) @@ -280,10 +290,13 @@ impl Launcher { } }); - tracing::info!(target: API_LOG_TARGET, "{STARTED_ON} {protocol}://{}", address); + tracing::info!(target: API_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", address); tx_start - .send(Started { address }) + .send(Started { + service_binding, + address, + }) .expect("the HTTP(s) Tracker API service should not be dropped"); running diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 5273df332..1b1f670a0 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -9,9 +9,9 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_udp_tracker_core::services::banning::BanService; use serde::Deserialize; use tokio::sync::RwLock; -use torrust_rest_tracker_api_core::statistics::services::get_metrics; +use torrust_rest_tracker_api_core::statistics::services::{get_labeled_metrics, get_metrics}; -use super::responses::{metrics_response, stats_response}; +use super::responses::{labeled_metrics_response, labeled_stats_response, metrics_response, stats_response}; #[derive(Deserialize, Debug, Default)] #[serde(rename_all = "lowercase")] @@ -28,7 +28,7 @@ pub struct QueryParams { pub format: Option<Format>, } -/// It handles the request to get the tracker statistics. +/// It handles the request to get the tracker global metrics. /// /// By default it returns a `200` response with the stats in JSON format. /// @@ -39,29 +39,60 @@ pub struct QueryParams { /// for more information about this endpoint. #[allow(clippy::type_complexity)] pub async fn get_stats_handler( + State(state): State<( + Arc<InMemoryTorrentRepository>, + Arc<bittorrent_tracker_core::statistics::repository::Repository>, + Arc<bittorrent_http_tracker_core::statistics::repository::Repository>, + Arc<torrust_udp_tracker_server::statistics::repository::Repository>, + )>, + params: Query<QueryParams>, +) -> Response { + let metrics = get_metrics(state.0.clone(), state.1.clone(), state.2.clone(), state.3.clone()).await; + + match params.0.format { + Some(format) => match format { + Format::Json => stats_response(metrics), + Format::Prometheus => metrics_response(&metrics), + }, + None => stats_response(metrics), + } +} + +/// It handles the request to get the tracker extendable metrics. +/// +/// By default it returns a `200` response with the stats in JSON format. +/// +/// You can add the GET parameter `format=prometheus` to get the stats in +/// Prometheus Text Exposition Format. +#[allow(clippy::type_complexity)] +pub async fn get_metrics_handler( State(state): State<( Arc<InMemoryTorrentRepository>, Arc<RwLock<BanService>>, + Arc<torrust_tracker_swarm_coordination_registry::statistics::repository::Repository>, + Arc<bittorrent_tracker_core::statistics::repository::Repository>, Arc<bittorrent_http_tracker_core::statistics::repository::Repository>, Arc<bittorrent_udp_tracker_core::statistics::repository::Repository>, Arc<torrust_udp_tracker_server::statistics::repository::Repository>, )>, params: Query<QueryParams>, ) -> Response { - let metrics = get_metrics( + let metrics = get_labeled_metrics( state.0.clone(), state.1.clone(), state.2.clone(), state.3.clone(), state.4.clone(), + state.5.clone(), + state.6.clone(), ) .await; match params.0.format { Some(format) => match format { - Format::Json => stats_response(metrics), - Format::Prometheus => metrics_response(&metrics), + Format::Json => labeled_stats_response(metrics), + Format::Prometheus => labeled_metrics_response(&metrics), }, - None => stats_response(metrics), + None => labeled_stats_response(metrics), } } diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs index d9480259e..ece50383b 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs @@ -1,7 +1,8 @@ //! API resources for the [`stats`](crate::v1::context::stats) //! API context. use serde::{Deserialize, Serialize}; -use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; +use torrust_rest_tracker_api_core::statistics::services::{TrackerLabeledMetrics, TrackerMetrics}; +use torrust_tracker_metrics::metric_collection::MetricCollection; /// It contains all the statistics generated by the tracker. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -116,11 +117,25 @@ impl From<TrackerMetrics> for Stats { } } +/// It contains all the statistics generated by the tracker. +#[derive(Serialize, Debug, PartialEq)] +pub struct LabeledStats { + metrics: MetricCollection, +} + +impl From<TrackerLabeledMetrics> for LabeledStats { + #[allow(deprecated)] + fn from(metrics: TrackerLabeledMetrics) -> Self { + Self { + metrics: metrics.metrics, + } + } +} + #[cfg(test)] mod tests { - use torrust_rest_tracker_api_core::statistics::metrics::Metrics; + use torrust_rest_tracker_api_core::statistics::metrics::{ProtocolMetrics, TorrentsMetrics}; use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use super::Stats; @@ -129,13 +144,13 @@ mod tests { fn stats_resource_should_be_converted_from_tracker_metrics() { assert_eq!( Stats::from(TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata { + torrents_metrics: TorrentsMetrics { total_complete: 1, total_downloaded: 2, total_incomplete: 3, total_torrents: 4 }, - protocol_metrics: Metrics { + protocol_metrics: ProtocolMetrics { // TCP tcp4_connections_handled: 5, tcp4_announces_handled: 6, diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs index 853fdd2e2..e79f7e562 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs @@ -1,9 +1,21 @@ //! API responses for the [`stats`](crate::v1::context::stats) //! API context. use axum::response::{IntoResponse, Json, Response}; -use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; +use torrust_rest_tracker_api_core::statistics::services::{TrackerLabeledMetrics, TrackerMetrics}; +use torrust_tracker_metrics::prometheus::PrometheusSerializable; -use super::resources::Stats; +use super::resources::{LabeledStats, Stats}; + +/// `200` response that contains the [`LabeledStats`] resource as json. +#[must_use] +pub fn labeled_stats_response(tracker_metrics: TrackerLabeledMetrics) -> Response { + Json(LabeledStats::from(tracker_metrics)).into_response() +} + +#[must_use] +pub fn labeled_metrics_response(tracker_metrics: &TrackerLabeledMetrics) -> Response { + tracker_metrics.metrics.to_prometheus().into_response() +} /// `200` response that contains the [`Stats`] resource as json. #[must_use] diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index 1334c0d70..2bf3776fd 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -9,18 +9,34 @@ use axum::routing::get; use axum::Router; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; -use super::handlers::get_stats_handler; +use super::handlers::{get_metrics_handler, get_stats_handler}; /// It adds the routes to the router for the [`stats`](crate::v1::context::stats) API context. pub fn add(prefix: &str, router: Router, http_api_container: &Arc<TrackerHttpApiCoreContainer>) -> Router { - router.route( - &format!("{prefix}/stats"), - get(get_stats_handler).with_state(( - http_api_container.in_memory_torrent_repository.clone(), - http_api_container.ban_service.clone(), - http_api_container.http_stats_repository.clone(), - http_api_container.udp_core_stats_repository.clone(), - http_api_container.udp_server_stats_repository.clone(), - )), - ) + router + .route( + &format!("{prefix}/stats"), + get(get_stats_handler).with_state(( + http_api_container.tracker_core_container.in_memory_torrent_repository.clone(), + http_api_container.tracker_core_container.stats_repository.clone(), + http_api_container.http_stats_repository.clone(), + http_api_container.udp_server_stats_repository.clone(), + )), + ) + .route( + &format!("{prefix}/metrics"), + get(get_metrics_handler).with_state(( + http_api_container.tracker_core_container.in_memory_torrent_repository.clone(), + http_api_container.ban_service.clone(), + // Stats + http_api_container + .swarm_coordination_registry_container + .stats_repository + .clone(), + http_api_container.tracker_core_container.stats_repository.clone(), + http_api_container.http_stats_repository.clone(), + http_api_container.udp_core_stats_repository.clone(), + http_api_container.udp_server_stats_repository.clone(), + )), + ) } diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/torrent/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/torrent/handlers.rs index 613abbdeb..eecbd9ac3 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/torrent/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/torrent/handlers.rs @@ -33,7 +33,7 @@ pub async fn get_torrent_handler( ) -> Response { match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), - Ok(info_hash) => match get_torrent_info(&in_memory_torrent_repository, &info_hash) { + Ok(info_hash) => match get_torrent_info(&in_memory_torrent_repository, &info_hash).await { Some(info) => torrent_info_response(info).into_response(), None => torrent_not_known_response(), }, @@ -85,14 +85,19 @@ pub async fn get_torrents_handler( tracing::debug!("pagination: {:?}", pagination); if pagination.0.info_hashes.is_empty() { - torrent_list_response(&get_torrents_page( - &in_memory_torrent_repository, - Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), - )) + torrent_list_response( + &get_torrents_page( + &in_memory_torrent_repository, + Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), + ) + .await, + ) .into_response() } else { match parse_info_hashes(pagination.0.info_hashes) { - Ok(info_hashes) => torrent_list_response(&get_torrents(&in_memory_torrent_repository, &info_hashes)).into_response(), + Ok(info_hashes) => { + torrent_list_response(&get_torrents(&in_memory_torrent_repository, &info_hashes).await).into_response() + } Err(err) => match err { QueryParamError::InvalidInfoHash { info_hash } => invalid_info_hash_param_response(&info_hash), }, diff --git a/packages/axum-rest-tracker-api-server/src/v1/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/routes.rs index b36a20eac..f7057a852 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/routes.rs @@ -10,9 +10,21 @@ use super::context::{auth_key, stats, torrent, whitelist}; pub fn add(prefix: &str, router: Router, http_api_container: &Arc<TrackerHttpApiCoreContainer>) -> Router { let v1_prefix = format!("{prefix}/v1"); - let router = auth_key::routes::add(&v1_prefix, router, &http_api_container.keys_handler.clone()); + let router = auth_key::routes::add( + &v1_prefix, + router, + &http_api_container.tracker_core_container.keys_handler.clone(), + ); let router = stats::routes::add(&v1_prefix, router, http_api_container); - let router = whitelist::routes::add(&v1_prefix, router, &http_api_container.whitelist_manager); + let router = whitelist::routes::add( + &v1_prefix, + router, + &http_api_container.tracker_core_container.whitelist_manager, + ); - torrent::routes::add(&v1_prefix, router, &http_api_container.in_memory_torrent_repository.clone()) + torrent::routes::add( + &v1_prefix, + router, + &http_api_container.tracker_core_container.in_memory_torrent_repository.clone(), + ) } diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs index 51a4804e7..7cae0abbf 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs @@ -21,7 +21,8 @@ async fn should_allow_getting_tracker_statistics() { env.add_torrent_peer( &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), // DevSkim: ignore DS173237 &PeerBuilder::default().into(), - ); + ) + .await; let request_id = Uuid::new_v4(); diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs index 42421db99..ae9819785 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs @@ -26,7 +26,7 @@ async fn should_allow_getting_all_torrents() { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -59,8 +59,8 @@ async fn should_allow_limiting_the_torrents_in_the_result() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -96,8 +96,8 @@ async fn should_allow_the_torrents_result_pagination() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -132,8 +132,8 @@ async fn should_allow_getting_a_list_of_torrents_providing_infohashes() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -307,7 +307,7 @@ async fn should_allow_getting_a_torrent_info() { let peer = PeerBuilder::default().into(); - env.add_torrent_peer(&info_hash, &peer); + env.add_torrent_peer(&info_hash, &peer).await; let request_id = Uuid::new_v4(); @@ -389,7 +389,7 @@ async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); diff --git a/packages/axum-server/Cargo.toml b/packages/axum-server/Cargo.toml index a60bab885..45eddd3b0 100644 --- a/packages/axum-server/Cargo.toml +++ b/packages/axum-server/Cargo.toml @@ -4,7 +4,7 @@ description = "A wrapper for the Axum server for Torrust HTTP servers to add tim documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "server", "torrust", "wrapper"] +keywords = [ "axum", "server", "torrust", "wrapper" ] license.workspace = true name = "torrust-axum-server" publish.workspace = true @@ -14,19 +14,19 @@ rust-version.workspace = true version.workspace = true [dependencies] -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } -camino = { version = "1", features = ["serde", "serde1"] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } +camino = { version = "1", features = [ "serde", "serde1" ] } futures-util = "0" http-body = "1" hyper = "1" -hyper-util = { version = "0", features = ["http1", "http2", "tokio"] } +hyper-util = { version = "0", features = [ "http1", "http2", "tokio" ] } pin-project-lite = "0" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } -tower = { version = "0", features = ["timeout"] } +tower = { version = "0", features = [ "timeout" ] } tracing = "0" [dev-dependencies] diff --git a/packages/axum-server/src/custom_axum_server.rs b/packages/axum-server/src/custom_axum_server.rs index 5705ef24e..0328198ec 100644 --- a/packages/axum-server/src/custom_axum_server.rs +++ b/packages/axum-server/src/custom_axum_server.rs @@ -18,7 +18,7 @@ //! If you want to know more about Axum and timeouts see <https://github.com/josecelano/axum-server-timeout>. use std::future::Ready; use std::io::ErrorKind; -use std::net::TcpListener; +use std::net::{SocketAddr, TcpListener}; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; @@ -36,21 +36,32 @@ use tokio::sync::mpsc::{self, UnboundedReceiver, UnboundedSender}; use tokio::time::{Instant, Sleep}; use tower::Service; +type RustlsServerResult = Result<Server<SocketAddr, RustlsAcceptor>, std::io::Error>; +type ServerResult = Result<Server<SocketAddr>, std::io::Error>; + const HTTP1_HEADER_READ_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(5); -#[must_use] -pub fn from_tcp_with_timeouts(socket: TcpListener) -> Server { - add_timeouts(axum_server::from_tcp(socket)) +/// Creates an Axum server from a TCP listener with configured timeouts. +/// +/// # Errors +/// +/// Returns an error if the server cannot be created from the TCP socket. +pub fn from_tcp_with_timeouts(socket: TcpListener) -> ServerResult { + axum_server::from_tcp(socket).map(add_timeouts) } -#[must_use] -pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> Server<RustlsAcceptor> { - add_timeouts(axum_server::from_tcp_rustls(socket, tls)) +/// Creates an Axum server from a TCP listener with TLS and configured timeouts. +/// +/// # Errors +/// +/// Returns an error if the server cannot be created from the TCP socket or if TLS configuration fails. +pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> RustlsServerResult { + axum_server::from_tcp_rustls(socket, tls).map(add_timeouts) } -fn add_timeouts<A>(mut server: Server<A>) -> Server<A> { +fn add_timeouts<Addr: axum_server::Address, Acc>(mut server: Server<Addr, Acc>) -> Server<Addr, Acc> { server.http_builder().http1().timer(TokioTimer::new()); server.http_builder().http2().timer(TokioTimer::new()); diff --git a/packages/axum-server/src/signals.rs b/packages/axum-server/src/signals.rs index af69cbb6e..360879e32 100644 --- a/packages/axum-server/src/signals.rs +++ b/packages/axum-server/src/signals.rs @@ -1,21 +1,49 @@ +use std::net::SocketAddr; use std::time::Duration; -use tokio::time::sleep; +use tokio::time::{sleep, Instant}; use torrust_server_lib::signals::{shutdown_signal_with_message, Halted}; use tracing::instrument; #[instrument(skip(handle, rx_halt, message))] -pub async fn graceful_shutdown(handle: axum_server::Handle, rx_halt: tokio::sync::oneshot::Receiver<Halted>, message: String) { - shutdown_signal_with_message(rx_halt, message).await; +pub async fn graceful_shutdown( + handle: axum_server::Handle<SocketAddr>, + rx_halt: tokio::sync::oneshot::Receiver<Halted>, + message: String, + address: SocketAddr, +) { + shutdown_signal_with_message(rx_halt, message.clone()).await; - tracing::debug!("Sending graceful shutdown signal"); - handle.graceful_shutdown(Some(Duration::from_secs(90))); + let grace_period = Duration::from_secs(90); + let max_wait = Duration::from_secs(95); + let start = Instant::now(); - println!("!! shuting down in 90 seconds !!"); + handle.graceful_shutdown(Some(grace_period)); + + tracing::info!("!! {} in {} seconds !!", message, grace_period.as_secs()); loop { - sleep(Duration::from_secs(1)).await; + if handle.connection_count() == 0 { + tracing::info!("All connections closed, shutting down server in address {}", address); + break; + } + + if start.elapsed() >= max_wait { + tracing::warn!( + "Shutdown timeout of {} seconds reached. Forcing shutdown in address {} with {} active connections.", + max_wait.as_secs(), + address, + handle.connection_count() + ); + break; + } - tracing::info!("remaining alive connections: {}", handle.connection_count()); + tracing::info!( + "Remaining alive connections: {} ({}s elapsed)", + handle.connection_count(), + start.elapsed().as_secs() + ); + + sleep(Duration::from_secs(1)).await; } } diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml index 3bd00d2b0..c0cafff0a 100644 --- a/packages/clock/Cargo.toml +++ b/packages/clock/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to a clock for the torrust tracker." -keywords = ["clock", "library", "torrents"] +keywords = [ "clock", "library", "torrents" ] name = "torrust-tracker-clock" readme = "README.md" @@ -16,7 +16,7 @@ rust-version.workspace = true version.workspace = true [dependencies] -chrono = { version = "0", default-features = false, features = ["clock"] } +chrono = { version = "0", default-features = false, features = [ "clock" ] } lazy_static = "1" tracing = "0" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index e213f7c0c..1155ba417 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to provide configuration to the Torrust Tracker." -keywords = ["config", "library", "settings"] +keywords = [ "config", "library", "settings" ] name = "torrust-tracker-configuration" readme = "README.md" @@ -15,18 +15,18 @@ rust-version.workspace = true version.workspace = true [dependencies] -camino = { version = "1", features = ["serde", "serde1"] } -derive_more = { version = "2", features = ["constructor", "display"] } -figment = { version = "0", features = ["env", "test", "toml"] } -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } +camino = { version = "1", features = [ "serde", "serde1" ] } +derive_more = { version = "2", features = [ "constructor", "display" ] } +figment = { version = "0", features = [ "env", "test", "toml" ] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } serde_with = "3" thiserror = "2" toml = "0" torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } tracing = "0" -tracing-subscriber = { version = "0", features = ["json"] } +tracing-subscriber = { version = "0", features = [ "json" ] } url = "2" [dev-dependencies] -uuid = { version = "1", features = ["v4"] } +uuid = { version = "1", features = [ "v4" ] } diff --git a/packages/configuration/src/v2_0_0/core.rs b/packages/configuration/src/v2_0_0/core.rs index ed3e6aeb7..32dac8b3c 100644 --- a/packages/configuration/src/v2_0_0/core.rs +++ b/packages/configuration/src/v2_0_0/core.rs @@ -103,6 +103,7 @@ impl Core { fn default_tracker_policy() -> TrackerPolicy { TrackerPolicy::default() } + fn default_tracker_usage_statistics() -> bool { true } diff --git a/packages/configuration/src/v2_0_0/database.rs b/packages/configuration/src/v2_0_0/database.rs index c2b24d809..457b3c925 100644 --- a/packages/configuration/src/v2_0_0/database.rs +++ b/packages/configuration/src/v2_0_0/database.rs @@ -12,8 +12,10 @@ pub struct Database { /// Database connection string. The format depends on the database driver. /// For `sqlite3`, the format is `path/to/database.db`, for example: /// `./storage/tracker/lib/database/sqlite3.db`. - /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for + /// For `mysql`, the format is `mysql://db_user:db_user_password@host:port/db_name`, for /// example: `mysql://root:password@localhost:3306/torrust`. + /// If the password contains reserved URL characters (for example `+` or `/`), + /// percent-encode it in the URL. #[serde(default = "Database::default_path")] pub path: String, } diff --git a/packages/configuration/src/v2_0_0/health_check_api.rs b/packages/configuration/src/v2_0_0/health_check_api.rs index 61178fa80..368f26c42 100644 --- a/packages/configuration/src/v2_0_0/health_check_api.rs +++ b/packages/configuration/src/v2_0_0/health_check_api.rs @@ -25,6 +25,6 @@ impl Default for HealthCheckApi { impl HealthCheckApi { fn default_bind_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1313) + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1313) } } diff --git a/packages/configuration/src/v2_0_0/http_tracker.rs b/packages/configuration/src/v2_0_0/http_tracker.rs index 42ec02bf2..ae00257d8 100644 --- a/packages/configuration/src/v2_0_0/http_tracker.rs +++ b/packages/configuration/src/v2_0_0/http_tracker.rs @@ -19,6 +19,10 @@ pub struct HttpTracker { /// TSL config. #[serde(default = "HttpTracker::default_tsl_config")] pub tsl_config: Option<TslConfig>, + + /// Weather the tracker should collect statistics about tracker usage. + #[serde(default = "HttpTracker::default_tracker_usage_statistics")] + pub tracker_usage_statistics: bool, } impl Default for HttpTracker { @@ -26,16 +30,21 @@ impl Default for HttpTracker { Self { bind_address: Self::default_bind_address(), tsl_config: Self::default_tsl_config(), + tracker_usage_statistics: Self::default_tracker_usage_statistics(), } } } impl HttpTracker { fn default_bind_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 7070) + SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 7070) } fn default_tsl_config() -> Option<TslConfig> { None } + + fn default_tracker_usage_statistics() -> bool { + false + } } diff --git a/packages/configuration/src/v2_0_0/mod.rs b/packages/configuration/src/v2_0_0/mod.rs index fd742d8d2..b3fbc881e 100644 --- a/packages/configuration/src/v2_0_0/mod.rs +++ b/packages/configuration/src/v2_0_0/mod.rs @@ -492,10 +492,7 @@ mod tests { fn configuration_should_contain_the_external_ip() { let configuration = Configuration::default(); - assert_eq!( - configuration.core.net.external_ip, - Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) - ); + assert_eq!(configuration.core.net.external_ip, Some(IpAddr::V4(Ipv4Addr::UNSPECIFIED))); } #[test] @@ -524,6 +521,7 @@ mod tests { } #[test] + #[allow(clippy::result_large_err)] fn configuration_should_use_the_default_values_when_only_the_mandatory_options_are_provided_by_the_user_via_toml_file() { figment::Jail::expect_with(|jail| { jail.create_file( @@ -555,6 +553,7 @@ mod tests { } #[test] + #[allow(clippy::result_large_err)] fn configuration_should_use_the_default_values_when_only_the_mandatory_options_are_provided_by_the_user_via_toml_content() { figment::Jail::expect_with(|_jail| { let config_toml = r#" @@ -584,6 +583,7 @@ mod tests { } #[test] + #[allow(clippy::result_large_err)] fn default_configuration_could_be_overwritten_from_a_single_env_var_with_toml_contents() { figment::Jail::expect_with(|_jail| { let config_toml = r#" @@ -616,6 +616,7 @@ mod tests { } #[test] + #[allow(clippy::result_large_err)] fn default_configuration_could_be_overwritten_from_a_toml_config_file() { figment::Jail::expect_with(|jail| { jail.create_file( @@ -649,6 +650,7 @@ mod tests { }); } + #[allow(clippy::result_large_err)] #[test] fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin_with_an_env_var() { figment::Jail::expect_with(|jail| { diff --git a/packages/configuration/src/v2_0_0/network.rs b/packages/configuration/src/v2_0_0/network.rs index 8e53d419c..7a4668727 100644 --- a/packages/configuration/src/v2_0_0/network.rs +++ b/packages/configuration/src/v2_0_0/network.rs @@ -32,7 +32,7 @@ impl Default for Network { impl Network { #[allow(clippy::unnecessary_wraps)] fn default_external_ip() -> Option<IpAddr> { - Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) + Some(IpAddr::V4(Ipv4Addr::UNSPECIFIED)) } fn default_on_reverse_proxy() -> bool { diff --git a/packages/configuration/src/v2_0_0/tracker_api.rs b/packages/configuration/src/v2_0_0/tracker_api.rs index 2da21758b..9433c8c8c 100644 --- a/packages/configuration/src/v2_0_0/tracker_api.rs +++ b/packages/configuration/src/v2_0_0/tracker_api.rs @@ -43,7 +43,7 @@ impl Default for HttpApi { impl HttpApi { fn default_bind_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1212) + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1212) } #[allow(clippy::unnecessary_wraps)] diff --git a/packages/configuration/src/v2_0_0/udp_tracker.rs b/packages/configuration/src/v2_0_0/udp_tracker.rs index 0eee87700..133018e86 100644 --- a/packages/configuration/src/v2_0_0/udp_tracker.rs +++ b/packages/configuration/src/v2_0_0/udp_tracker.rs @@ -16,22 +16,31 @@ pub struct UdpTracker { /// the client as the `ConnectionId`. #[serde(default = "UdpTracker::default_cookie_lifetime")] pub cookie_lifetime: Duration, + + /// Weather the tracker should collect statistics about tracker usage. + #[serde(default = "UdpTracker::default_tracker_usage_statistics")] + pub tracker_usage_statistics: bool, } impl Default for UdpTracker { fn default() -> Self { Self { bind_address: Self::default_bind_address(), cookie_lifetime: Self::default_cookie_lifetime(), + tracker_usage_statistics: Self::default_tracker_usage_statistics(), } } } impl UdpTracker { fn default_bind_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 6969) + SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 6969) } fn default_cookie_lifetime() -> Duration { Duration::from_secs(120) } + + fn default_tracker_usage_statistics() -> bool { + false + } } diff --git a/packages/events/.gitignore b/packages/events/.gitignore new file mode 100644 index 000000000..0b1372e5c --- /dev/null +++ b/packages/events/.gitignore @@ -0,0 +1 @@ +./.coverage diff --git a/packages/events/Cargo.toml b/packages/events/Cargo.toml new file mode 100644 index 000000000..165ecca68 --- /dev/null +++ b/packages/events/Cargo.toml @@ -0,0 +1,22 @@ +[package] +description = "A library with functionality to handle events in Torrust tracker packages." +keywords = [ "events", "library", "rust", "torrust", "tracker" ] +name = "torrust-tracker-events" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +futures = "0" +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync", "time" ] } + +[dev-dependencies] +mockall = "0" diff --git a/packages/events/LICENSE b/packages/events/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/events/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see <https://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +<https://www.gnu.org/licenses/>. diff --git a/packages/events/README.md b/packages/events/README.md new file mode 100644 index 000000000..42a5a2f61 --- /dev/null +++ b/packages/events/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Events + +A library with functionality to handle events in [Torrust Tracker](https://github.com/torrust/torrust-tracker) packages. + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-events). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/events/src/broadcaster.rs b/packages/events/src/broadcaster.rs new file mode 100644 index 000000000..79c83df8a --- /dev/null +++ b/packages/events/src/broadcaster.rs @@ -0,0 +1,117 @@ +use futures::future::BoxFuture; +use futures::FutureExt; +use tokio::sync::broadcast::{self}; + +use crate::receiver::{Receiver, RecvError}; +use crate::sender::{SendError, Sender}; + +const CHANNEL_CAPACITY: usize = 65536; + +/// An event sender and receiver implementation using a broadcast channel. +#[derive(Clone, Debug)] +pub struct Broadcaster<Event: Sync + Send + Clone> { + pub(crate) sender: broadcast::Sender<Event>, +} + +impl<Event: Sync + Send + Clone> Default for Broadcaster<Event> { + fn default() -> Self { + let (sender, _receiver) = broadcast::channel(CHANNEL_CAPACITY); + Self { sender } + } +} + +impl<Event: Sync + Send + Clone> Broadcaster<Event> { + #[must_use] + pub fn subscribe(&self) -> broadcast::Receiver<Event> { + self.sender.subscribe() + } +} + +impl<Event: Sync + Send + Clone> Sender for Broadcaster<Event> { + type Event = Event; + + fn send(&self, event: Event) -> BoxFuture<'_, Option<Result<usize, SendError<Event>>>> { + async move { Some(self.sender.send(event).map_err(std::convert::Into::into)) }.boxed() + } +} + +impl<Event: Sync + Send + Clone> Receiver for broadcast::Receiver<Event> { + type Event = Event; + + fn recv(&mut self) -> BoxFuture<'_, Result<Self::Event, RecvError>> { + async move { self.recv().await.map_err(std::convert::Into::into) }.boxed() + } +} + +impl<Event> From<broadcast::error::SendError<Event>> for SendError<Event> { + fn from(err: broadcast::error::SendError<Event>) -> Self { + SendError(err.0) + } +} + +impl From<broadcast::error::RecvError> for RecvError { + fn from(err: broadcast::error::RecvError) -> Self { + match err { + broadcast::error::RecvError::Lagged(amt) => RecvError::Lagged(amt), + broadcast::error::RecvError::Closed => RecvError::Closed, + } + } +} + +#[cfg(test)] +mod tests { + use tokio::time::{timeout, Duration}; + + use super::*; + + #[tokio::test] + async fn it_should_allow_sending_an_event_and_received_it() { + let broadcaster = Broadcaster::<String>::default(); + + let mut receiver = broadcaster.subscribe(); + + let event = "test"; + + let _unused = broadcaster.send(event.to_owned()).await.unwrap().unwrap(); + + let received_event = receiver.recv().await.unwrap(); + + assert_eq!(received_event, event); + } + + #[tokio::test] + async fn it_should_return_the_number_of_receivers_when_and_event_is_sent() { + let broadcaster = Broadcaster::<String>::default(); + let mut _receiver = broadcaster.subscribe(); + + let number_of_receivers = broadcaster.send("test".into()).await; + + assert!(matches!(number_of_receivers, Some(Ok(1)))); + } + + #[tokio::test] + async fn it_should_fail_when_trying_tos_send_with_no_subscribers() { + let event = String::from("test"); + + let broadcaster = Broadcaster::<String>::default(); + + let result: Result<usize, SendError<String>> = broadcaster.send(event).await.unwrap(); + + assert!(matches!(result, Err(SendError::<String>(_event)))); + } + + #[tokio::test] + async fn it_should_allow_subscribing_multiple_receivers() { + let broadcaster = Broadcaster::<u8>::default(); + let mut r1 = broadcaster.subscribe(); + let mut r2 = broadcaster.subscribe(); + + let _ = broadcaster.send(1).await; + + let val1 = timeout(Duration::from_secs(1), r1.recv()).await.unwrap().unwrap(); + let val2 = timeout(Duration::from_secs(1), r2.recv()).await.unwrap().unwrap(); + + assert_eq!(val1, 1); + assert_eq!(val2, 1); + } +} diff --git a/packages/events/src/bus.rs b/packages/events/src/bus.rs new file mode 100644 index 000000000..b42fb4fc5 --- /dev/null +++ b/packages/events/src/bus.rs @@ -0,0 +1,125 @@ +use std::sync::Arc; + +use crate::broadcaster::Broadcaster; +use crate::{receiver, sender}; + +#[derive(Clone, Debug)] +pub enum SenderStatus { + Enabled, + Disabled, +} + +impl From<bool> for SenderStatus { + fn from(enabled: bool) -> Self { + if enabled { + Self::Enabled + } else { + Self::Disabled + } + } +} + +impl From<SenderStatus> for bool { + fn from(sender_status: SenderStatus) -> Self { + match sender_status { + SenderStatus::Enabled => true, + SenderStatus::Disabled => false, + } + } +} + +#[derive(Clone, Debug)] +pub struct EventBus<Event: Sync + Send + Clone + 'static> { + pub sender_status: SenderStatus, + pub broadcaster: Broadcaster<Event>, +} + +impl<Event: Sync + Send + Clone + 'static> Default for EventBus<Event> { + fn default() -> Self { + let sender_status = SenderStatus::Enabled; + let broadcaster = Broadcaster::<Event>::default(); + + Self::new(sender_status, broadcaster) + } +} + +impl<Event: Sync + Send + Clone + 'static> EventBus<Event> { + #[must_use] + pub fn new(sender_status: SenderStatus, broadcaster: Broadcaster<Event>) -> Self { + Self { + sender_status, + broadcaster, + } + } + + #[must_use] + pub fn sender(&self) -> Option<Arc<dyn sender::Sender<Event = Event>>> { + match self.sender_status { + SenderStatus::Enabled => Some(Arc::new(self.broadcaster.clone())), + SenderStatus::Disabled => None, + } + } + + #[must_use] + pub fn receiver(&self) -> Box<dyn receiver::Receiver<Event = Event>> { + Box::new(self.broadcaster.subscribe()) + } +} + +#[cfg(test)] +mod tests { + use tokio::time::{timeout, Duration}; + + use super::*; + + #[tokio::test] + async fn it_should_provide_an_event_sender_when_enabled() { + let bus = EventBus::<String>::new(SenderStatus::Enabled, Broadcaster::default()); + + assert!(bus.sender().is_some()); + } + + #[tokio::test] + async fn it_should_not_provide_event_sender_when_disabled() { + let bus = EventBus::<String>::new(SenderStatus::Disabled, Broadcaster::default()); + + assert!(bus.sender().is_none()); + } + + #[tokio::test] + async fn it_should_enabled_by_default() { + let bus = EventBus::<String>::default(); + + assert!(bus.sender().is_some()); + } + + #[tokio::test] + async fn it_should_allow_sending_events_that_are_received_by_receivers() { + let bus = EventBus::<String>::default(); + let sender = bus.sender().unwrap(); + let mut receiver = bus.receiver(); + + let event = "hello".to_string(); + + let _unused = sender.send(event.clone()).await.unwrap().unwrap(); + + let result = timeout(Duration::from_secs(1), receiver.recv()).await; + + assert_eq!(result.unwrap().unwrap(), event); + } + + #[tokio::test] + async fn it_should_send_a_closed_events_to_receivers_when_sender_is_dropped() { + let bus = EventBus::<String>::default(); + + let mut receiver = bus.receiver(); + + let future = receiver.recv(); + + drop(bus); // explicitly drop sender + + let result = timeout(Duration::from_secs(1), future).await; + + assert!(matches!(result.unwrap(), Err(crate::receiver::RecvError::Closed))); + } +} diff --git a/packages/events/src/lib.rs b/packages/events/src/lib.rs new file mode 100644 index 000000000..d933b304c --- /dev/null +++ b/packages/events/src/lib.rs @@ -0,0 +1,7 @@ +pub mod broadcaster; +pub mod bus; +pub mod receiver; +pub mod sender; + +/// Target for tracing crate logs. +pub const EVENTS_TARGET: &str = "EVENTS"; diff --git a/packages/events/src/receiver.rs b/packages/events/src/receiver.rs new file mode 100644 index 000000000..15adb816a --- /dev/null +++ b/packages/events/src/receiver.rs @@ -0,0 +1,38 @@ +use std::fmt; + +use futures::future::BoxFuture; +#[cfg(test)] +use mockall::{automock, predicate::str}; + +/// A trait for receiving events. +#[cfg_attr(test, automock(type Event=();))] +pub trait Receiver: Sync + Send { + type Event: Send + Clone; + + fn recv(&mut self) -> BoxFuture<'_, Result<Self::Event, RecvError>>; +} + +/// An error returned from the [`recv`] function on a [`Receiver`]. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum RecvError { + /// There are no more active senders implying no further messages will ever + /// be sent. + Closed, + + /// The receiver lagged too far behind. Attempting to receive again will + /// return the oldest message still retained by the channel. + /// + /// Includes the number of skipped messages. + Lagged(u64), +} + +impl fmt::Display for RecvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RecvError::Closed => write!(f, "channel closed"), + RecvError::Lagged(amt) => write!(f, "channel lagged by {amt}"), + } + } +} + +impl std::error::Error for RecvError {} diff --git a/packages/events/src/sender.rs b/packages/events/src/sender.rs new file mode 100644 index 000000000..3dccade4c --- /dev/null +++ b/packages/events/src/sender.rs @@ -0,0 +1,39 @@ +use std::fmt; +use std::fmt::Debug; + +use futures::future::BoxFuture; +#[cfg(test)] +use mockall::{automock, predicate::str}; + +/// A trait for sending events. +#[cfg_attr(test, automock(type Event=();))] +pub trait Sender: Sync + Send { + type Event: Send + Clone; + + /// Sends an event to all active receivers. + /// + /// Returns a future that resolves to an `Option<Result<usize, SendError<Self::Event>>>`: + /// + /// - `Some(Ok(n))` — the event was successfully sent to `n` receivers. + /// - `Some(Err(e))` — an error occurred while sending the event. + /// - `None` — the sender is inactive or disconnected, and the event was not sent. + /// + /// The `Option` allows implementations to express cases where sending is not possible + /// (e.g., when the sender is disabled or there are no active receivers). + /// + /// The `usize` typically represents the number of receivers the message was delivered to, + /// but its semantics may vary depending on the concrete implementation. + fn send(&self, event: Self::Event) -> BoxFuture<'_, Option<Result<usize, SendError<Self::Event>>>>; +} + +/// Error returned by the [`send`] function on a [`Sender`]. +#[derive(Debug)] +pub struct SendError<Event>(pub Event); + +impl<Event> fmt::Display for SendError<Event> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "channel closed") + } +} + +impl<Event: fmt::Debug> std::error::Error for SendError<Event> {} diff --git a/packages/events/src/shutdown.rs b/packages/events/src/shutdown.rs new file mode 100644 index 000000000..e69de29bb diff --git a/packages/http-protocol/Cargo.toml b/packages/http-protocol/Cargo.toml index 7803fe78e..78a037b18 100644 --- a/packages/http-protocol/Cargo.toml +++ b/packages/http-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with the primitive types and functions for the BitTorrent HTTP tracker protocol." -keywords = ["api", "library", "primitives"] +keywords = [ "api", "library", "primitives" ] name = "bittorrent-http-tracker-protocol" readme = "README.md" @@ -18,10 +18,10 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } multimap = "0" percent-encoding = "2" -serde = { version = "1", features = ["derive"] } +serde = { version = "1", features = [ "derive" ] } serde_bencode = "0" thiserror = "2" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } diff --git a/packages/http-protocol/src/v1/query.rs b/packages/http-protocol/src/v1/query.rs index b329b787e..9f53ef54f 100644 --- a/packages/http-protocol/src/v1/query.rs +++ b/packages/http-protocol/src/v1/query.rs @@ -86,7 +86,7 @@ impl Query { self.params.get_vec(name).map(|pairs| { let mut param_values = vec![]; for pair in pairs { - param_values.push(pair.value.to_string()); + param_values.push(pair.value.clone()); } param_values }) diff --git a/packages/http-protocol/src/v1/services/peer_ip_resolver.rs b/packages/http-protocol/src/v1/services/peer_ip_resolver.rs index bea93f1ba..ceaa7e11c 100644 --- a/packages/http-protocol/src/v1/services/peer_ip_resolver.rs +++ b/packages/http-protocol/src/v1/services/peer_ip_resolver.rs @@ -1,4 +1,4 @@ -//! This service resolves the peer IP from the request. +//! This service resolves the remote client address. //! //! The peer IP is used to identify the peer in the tracker. It's the peer IP //! that is used in the `announce` responses (peer list). And it's also used to @@ -12,27 +12,103 @@ //! X-Forwarded-For: 126.0.0.1 X-Forwarded-For: 126.0.0.1,126.0.0.2 //! ``` //! -//! This service returns two options for the peer IP: +//! This `ClientIpSources` contains two options for the peer IP: //! //! ```text //! right_most_x_forwarded_for = 126.0.0.2 //! connection_info_ip = 126.0.0.3 //! ``` //! -//! Depending on the tracker configuration. -use std::net::IpAddr; +//! Which one to use depends on the `ReverseProxyMode`. +use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use serde::{Deserialize, Serialize}; use thiserror::Error; +/// Resolves the client's real address considering proxy headers. Port is also +/// included when available. +/// +/// # Errors +/// +/// This function returns an error if the IP address cannot be resolved. +pub fn resolve_remote_client_addr( + reverse_proxy_mode: &ReverseProxyMode, + client_ip_sources: &ClientIpSources, +) -> Result<RemoteClientAddr, PeerIpResolutionError> { + let ip = match reverse_proxy_mode { + ReverseProxyMode::Enabled => ResolvedIp::FromXForwardedFor(client_ip_sources.try_client_ip_from_proxy_header()?), + ReverseProxyMode::Disabled => ResolvedIp::FromSocketAddr(client_ip_sources.try_client_ip_from_connection_info()?), + }; + + let port = client_ip_sources.client_port_from_connection_info(); + + Ok(RemoteClientAddr::new(ip, port)) +} + +/// This struct indicates whether the tracker is running on reverse proxy mode. +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)] +pub enum ReverseProxyMode { + Enabled, + Disabled, +} + +impl From<ReverseProxyMode> for bool { + fn from(reverse_proxy_mode: ReverseProxyMode) -> Self { + match reverse_proxy_mode { + ReverseProxyMode::Enabled => true, + ReverseProxyMode::Disabled => false, + } + } +} + +impl From<bool> for ReverseProxyMode { + fn from(reverse_proxy_mode: bool) -> Self { + if reverse_proxy_mode { + ReverseProxyMode::Enabled + } else { + ReverseProxyMode::Disabled + } + } +} /// This struct contains the sources from which the peer IP can be obtained. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] pub struct ClientIpSources { /// The right most IP from the `X-Forwarded-For` HTTP header. pub right_most_x_forwarded_for: Option<IpAddr>, - /// The IP from the connection info. - pub connection_info_ip: Option<IpAddr>, + + /// The client's socket address from the connection info. + pub connection_info_socket_address: Option<SocketAddr>, +} + +impl ClientIpSources { + fn try_client_ip_from_connection_info(&self) -> Result<IpAddr, PeerIpResolutionError> { + if let Some(socket_addr) = self.connection_info_socket_address { + Ok(socket_addr.ip()) + } else { + Err(PeerIpResolutionError::MissingClientIp { + location: Location::caller(), + }) + } + } + + fn try_client_ip_from_proxy_header(&self) -> Result<IpAddr, PeerIpResolutionError> { + if let Some(ip) = self.right_most_x_forwarded_for { + Ok(ip) + } else { + Err(PeerIpResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }) + } + } + + fn client_port_from_connection_info(&self) -> Option<u16> { + if self.connection_info_socket_address.is_some() { + self.connection_info_socket_address.map(|socket_addr| socket_addr.port()) + } else { + None + } + } } /// The error that can occur when resolving the peer IP. @@ -45,6 +121,7 @@ pub enum PeerIpResolutionError { "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}" )] MissingRightMostXForwardedForIp { location: &'static Location<'static> }, + /// The peer IP cannot be obtained because the tracker is not configured as /// a reverse proxy but the connection info was not provided to the Axum /// framework via a route extension. @@ -52,123 +129,82 @@ pub enum PeerIpResolutionError { MissingClientIp { location: &'static Location<'static> }, } -/// Resolves the peer IP from the request. -/// -/// Given the sources from which the peer IP can be obtained, this function -/// resolves the peer IP according to the tracker configuration. -/// -/// With the tracker running on reverse proxy mode: -/// -/// ```rust -/// use std::net::IpAddr; -/// use std::str::FromStr; -/// -/// use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; -/// -/// let on_reverse_proxy = true; -/// -/// let ip = invoke( -/// on_reverse_proxy, -/// &ClientIpSources { -/// right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), -/// connection_info_ip: None, -/// }, -/// ) -/// .unwrap(); -/// -/// assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); -/// ``` -/// -/// With the tracker non running on reverse proxy mode: -/// -/// ```rust -/// use std::net::IpAddr; -/// use std::str::FromStr; -/// -/// use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; -/// -/// let on_reverse_proxy = false; -/// -/// let ip = invoke( -/// on_reverse_proxy, -/// &ClientIpSources { -/// right_most_x_forwarded_for: None, -/// connection_info_ip: Some(IpAddr::from_str("203.0.113.195").unwrap()), -/// }, -/// ) -/// .unwrap(); -/// -/// assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); -/// ``` -/// -/// # Errors -/// -/// Will return an error if the peer IP cannot be obtained according to the configuration. -/// For example, if the IP is extracted from an HTTP header which is missing in the request. -pub fn invoke(on_reverse_proxy: bool, client_ip_sources: &ClientIpSources) -> Result<IpAddr, PeerIpResolutionError> { - if on_reverse_proxy { - resolve_peer_ip_on_reverse_proxy(client_ip_sources) - } else { - resolve_peer_ip_without_reverse_proxy(client_ip_sources) - } +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)] +pub struct RemoteClientAddr { + ip: ResolvedIp, + port: Option<u16>, } -fn resolve_peer_ip_without_reverse_proxy(remote_client_ip: &ClientIpSources) -> Result<IpAddr, PeerIpResolutionError> { - if let Some(ip) = remote_client_ip.connection_info_ip { - Ok(ip) - } else { - Err(PeerIpResolutionError::MissingClientIp { - location: Location::caller(), - }) +impl RemoteClientAddr { + #[must_use] + pub fn new(ip: ResolvedIp, port: Option<u16>) -> Self { + Self { ip, port } } -} -fn resolve_peer_ip_on_reverse_proxy(remote_client_ip: &ClientIpSources) -> Result<IpAddr, PeerIpResolutionError> { - if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { - Ok(ip) - } else { - Err(PeerIpResolutionError::MissingRightMostXForwardedForIp { - location: Location::caller(), - }) + #[must_use] + pub fn ip(&self) -> IpAddr { + match self.ip { + ResolvedIp::FromSocketAddr(ip) | ResolvedIp::FromXForwardedFor(ip) => ip, + } + } + + #[must_use] + pub fn port(&self) -> Option<u16> { + self.port } } +/// This enum indicates the source of the resolved IP address. +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)] +pub enum ResolvedIp { + FromXForwardedFor(IpAddr), + FromSocketAddr(IpAddr), +} + #[cfg(test)] mod tests { - use super::invoke; + use super::resolve_remote_client_addr; mod working_without_reverse_proxy { - use std::net::IpAddr; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; - use super::invoke; - use crate::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; + use super::resolve_remote_client_addr; + use crate::v1::services::peer_ip_resolver::{ + ClientIpSources, PeerIpResolutionError, RemoteClientAddr, ResolvedIp, ReverseProxyMode, + }; #[test] - fn it_should_get_the_peer_ip_from_the_connection_info() { - let on_reverse_proxy = false; + fn it_should_get_the_remote_client_address_from_the_connection_info() { + let reverse_proxy_mode = ReverseProxyMode::Disabled; - let ip = invoke( - on_reverse_proxy, + let ip = resolve_remote_client_addr( + &reverse_proxy_mode, &ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(IpAddr::from_str("203.0.113.195").unwrap()), + connection_info_socket_address: Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080)), }, ) .unwrap(); - assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + assert_eq!( + ip, + RemoteClientAddr::new( + ResolvedIp::FromSocketAddr(IpAddr::from_str("203.0.113.195").unwrap()), + Some(8080) + ) + ); } #[test] - fn it_should_return_an_error_if_it_cannot_get_the_peer_ip_from_the_connection_info() { - let on_reverse_proxy = false; + fn it_should_return_an_error_if_it_cannot_get_the_remote_client_ip_from_the_connection_info() { + let reverse_proxy_mode = ReverseProxyMode::Disabled; - let error = invoke( - on_reverse_proxy, + let error = resolve_remote_client_addr( + &reverse_proxy_mode, &ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: None, + connection_info_socket_address: None, }, ) .unwrap_err(); @@ -177,37 +213,45 @@ mod tests { } } - mod working_on_reverse_proxy { + mod working_on_reverse_proxy_mode { use std::net::IpAddr; use std::str::FromStr; - use crate::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; + use crate::v1::services::peer_ip_resolver::{ + resolve_remote_client_addr, ClientIpSources, PeerIpResolutionError, RemoteClientAddr, ResolvedIp, ReverseProxyMode, + }; #[test] - fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { - let on_reverse_proxy = true; + fn it_should_get_the_remote_client_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { + let reverse_proxy_mode = ReverseProxyMode::Enabled; - let ip = invoke( - on_reverse_proxy, + let ip = resolve_remote_client_addr( + &reverse_proxy_mode, &ClientIpSources { right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), - connection_info_ip: None, + connection_info_socket_address: None, }, ) .unwrap(); - assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + assert_eq!( + ip, + RemoteClientAddr::new( + ResolvedIp::FromXForwardedFor(IpAddr::from_str("203.0.113.195").unwrap()), + None + ) + ); } #[test] fn it_should_return_an_error_if_it_cannot_get_the_right_most_ip_from_the_x_forwarded_for_header() { - let on_reverse_proxy = true; + let reverse_proxy_mode = ReverseProxyMode::Enabled; - let error = invoke( - on_reverse_proxy, + let error = resolve_remote_client_addr( + &reverse_proxy_mode, &ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: None, + connection_info_socket_address: None, }, ) .unwrap_err(); diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index 1e0bcff28..c419052f9 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -4,7 +4,7 @@ description = "A library with the core functionality needed to implement a BitTo documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["api", "bittorrent", "core", "library", "tracker"] +keywords = [ "api", "bittorrent", "core", "library", "tracker" ] license.workspace = true name = "bittorrent-http-tracker-core" publish.workspace = true @@ -18,13 +18,26 @@ aquatic_udp_protocol = "0" bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "../http-protocol" } bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } +criterion = { version = "0.5.1", features = [ "async_tokio" ] } futures = "0" +serde = "1.0.219" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } +tokio-util = "0.7.15" +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" [dev-dependencies] +formatjson = "0.3.1" mockall = "0" +serde_json = "1.0.140" torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } + +[[bench]] +harness = false +name = "http_tracker_core_benchmark" diff --git a/packages/http-tracker-core/benches/helpers/mod.rs b/packages/http-tracker-core/benches/helpers/mod.rs new file mode 100644 index 000000000..4a91f2224 --- /dev/null +++ b/packages/http-tracker-core/benches/helpers/mod.rs @@ -0,0 +1,2 @@ +pub mod sync; +pub mod util; diff --git a/packages/http-tracker-core/benches/helpers/sync.rs b/packages/http-tracker-core/benches/helpers/sync.rs new file mode 100644 index 000000000..dbf0dac83 --- /dev/null +++ b/packages/http-tracker-core/benches/helpers/sync.rs @@ -0,0 +1,38 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::time::{Duration, Instant}; + +use bittorrent_http_tracker_core::services::announce::AnnounceService; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + +use crate::helpers::util::{initialize_core_tracker_services, sample_announce_request_for_peer, sample_peer}; + +#[must_use] +pub async fn return_announce_data_once(samples: u64) -> Duration { + let (core_tracker_services, core_http_tracker_services) = initialize_core_tracker_services(); + + let peer = sample_peer(); + + let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); + + let announce_service = AnnounceService::new( + core_tracker_services.core_config.clone(), + core_tracker_services.announce_handler.clone(), + core_tracker_services.authentication_service.clone(), + core_tracker_services.whitelist_authorization.clone(), + core_http_tracker_services.http_stats_event_sender.clone(), + ); + + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + + let start = Instant::now(); + + for _ in 0..samples { + let _announce_data = announce_service + .handle_announce(&announce_request, &client_ip_sources, &server_service_binding, None) + .await + .unwrap(); + } + + start.elapsed() +} diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs new file mode 100644 index 000000000..028d7c535 --- /dev/null +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -0,0 +1,135 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::sync::Arc; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use bittorrent_http_tracker_core::event::bus::EventBus; +use bittorrent_http_tracker_core::event::sender::Broadcaster; +use bittorrent_http_tracker_core::event::Event; +use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; +use bittorrent_http_tracker_core::statistics::repository::Repository; +use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::AnnounceHandler; +use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; +use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::databases::setup::initialize_database; +use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; +use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use futures::future::BoxFuture; +use mockall::mock; +use tokio_util::sync::CancellationToken; +use torrust_tracker_configuration::{Configuration, Core}; +use torrust_tracker_events::sender::SendError; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; +use torrust_tracker_test_helpers::configuration; + +pub struct CoreTrackerServices { + pub core_config: Arc<Core>, + pub announce_handler: Arc<AnnounceHandler>, + pub authentication_service: Arc<AuthenticationService>, + pub whitelist_authorization: Arc<WhitelistAuthorization>, +} + +pub struct CoreHttpTrackerServices { + pub http_stats_event_sender: bittorrent_http_tracker_core::event::sender::Sender, +} + +pub fn initialize_core_tracker_services() -> (CoreTrackerServices, CoreHttpTrackerServices) { + initialize_core_tracker_services_with_config(&configuration::ephemeral_public()) +} + +pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { + let cancellation_token = CancellationToken::new(); + + let core_config = Arc::new(config.core.clone()); + let database = initialize_database(&config.core); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication_service = Arc::new(AuthenticationService::new(&core_config, &in_memory_key_repository)); + + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_downloads_metric_repository, + )); + + // HTTP core stats + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_event_bus = Arc::new(EventBus::new( + config.core.tracker_usage_statistics.into(), + http_core_broadcaster.clone(), + )); + + let http_stats_event_sender = http_stats_event_bus.sender(); + + if config.core.tracker_usage_statistics { + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); + } + + ( + CoreTrackerServices { + core_config, + announce_handler, + authentication_service, + whitelist_authorization, + }, + CoreHttpTrackerServices { http_stats_event_sender }, + ) +} + +pub fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Started, + } +} + +pub fn sample_announce_request_for_peer(peer: Peer) -> (Announce, ClientIpSources) { + let announce_request = Announce { + info_hash: sample_info_hash(), + peer_id: peer.peer_id, + port: peer.peer_addr.port(), + uploaded: Some(peer.uploaded), + downloaded: Some(peer.downloaded), + left: Some(peer.left), + event: Some(peer.event.into()), + compact: None, + numwant: None, + }; + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_socket_address: Some(SocketAddr::new(peer.peer_addr.ip(), 8080)), + }; + + (announce_request, client_ip_sources) +} +#[must_use] +pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::<InfoHash>() + .expect("String should be a valid info hash") +} + +mock! { + HttpStatsEventSender {} + impl torrust_tracker_events::sender::Sender for HttpStatsEventSender { + type Event = Event; + + fn send(&self, event: Event) -> BoxFuture<'static,Option<Result<usize,SendError<Event> > > > ; + } +} diff --git a/packages/http-tracker-core/benches/http_tracker_core_benchmark.rs b/packages/http-tracker-core/benches/http_tracker_core_benchmark.rs new file mode 100644 index 000000000..c193c5124 --- /dev/null +++ b/packages/http-tracker-core/benches/http_tracker_core_benchmark.rs @@ -0,0 +1,23 @@ +mod helpers; + +use std::time::Duration; + +use criterion::{criterion_group, criterion_main, Criterion}; + +use crate::helpers::sync; + +fn announce_once(c: &mut Criterion) { + let _rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("http_tracker_handle_announce_once"); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_secs(1)); + + group.bench_function("handle_announce_data", |b| { + b.iter(|| sync::return_announce_data_once(100)); + }); +} + +criterion_group!(benches, announce_once); +criterion_main!(benches); diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 448dce246..ed0aaf8b0 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -1,27 +1,25 @@ use std::sync::Arc; -use bittorrent_tracker_core::announce_handler::AnnounceHandler; -use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::container::TrackerCoreContainer; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::{Core, HttpTracker}; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; +use crate::event::bus::EventBus; +use crate::event::sender::Broadcaster; use crate::services::announce::AnnounceService; use crate::services::scrape::ScrapeService; -use crate::statistics; +use crate::statistics::repository::Repository; +use crate::{event, services, statistics}; pub struct HttpTrackerCoreContainer { - // todo: replace with TrackerCoreContainer - pub core_config: Arc<Core>, - pub announce_handler: Arc<AnnounceHandler>, - pub scrape_handler: Arc<ScrapeHandler>, - pub whitelist_authorization: Arc<whitelist::authorization::WhitelistAuthorization>, - pub authentication_service: Arc<AuthenticationService>, - pub http_tracker_config: Arc<HttpTracker>, - pub http_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>>, - pub http_stats_repository: Arc<statistics::repository::Repository>, + + pub tracker_core_container: Arc<TrackerCoreContainer>, + + // `HttpTrackerCoreServices` + pub event_bus: Arc<event::bus::EventBus>, + pub stats_event_sender: event::sender::Sender, + pub stats_repository: Arc<statistics::repository::Repository>, pub announce_service: Arc<AnnounceService>, pub scrape_service: Arc<ScrapeService>, } @@ -29,21 +27,68 @@ pub struct HttpTrackerCoreContainer { impl HttpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc<Core>, http_tracker_config: &Arc<HttpTracker>) -> Arc<Self> { - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); - Self::initialize_from(&tracker_core_container, http_tracker_config) + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + core_config, + &swarm_coordination_registry_container, + )); + + Self::initialize_from_tracker_core(&tracker_core_container, http_tracker_config) } #[must_use] - pub fn initialize_from( + pub fn initialize_from_tracker_core( tracker_core_container: &Arc<TrackerCoreContainer>, http_tracker_config: &Arc<HttpTracker>, ) -> Arc<Self> { - let (http_stats_event_sender, http_stats_repository) = - statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); - let http_stats_repository = Arc::new(http_stats_repository); + let http_tracker_core_services = HttpTrackerCoreServices::initialize_from(tracker_core_container); - let announce_service = Arc::new(AnnounceService::new( + Self::initialize_from_services(tracker_core_container, &http_tracker_core_services, http_tracker_config) + } + + #[must_use] + pub fn initialize_from_services( + tracker_core_container: &Arc<TrackerCoreContainer>, + http_tracker_core_services: &Arc<HttpTrackerCoreServices>, + http_tracker_config: &Arc<HttpTracker>, + ) -> Arc<Self> { + Arc::new(Self { + tracker_core_container: tracker_core_container.clone(), + http_tracker_config: http_tracker_config.clone(), + event_bus: http_tracker_core_services.event_bus.clone(), + stats_event_sender: http_tracker_core_services.stats_event_sender.clone(), + stats_repository: http_tracker_core_services.stats_repository.clone(), + announce_service: http_tracker_core_services.announce_service.clone(), + scrape_service: http_tracker_core_services.scrape_service.clone(), + }) + } +} + +pub struct HttpTrackerCoreServices { + pub event_bus: Arc<event::bus::EventBus>, + pub stats_event_sender: event::sender::Sender, + pub stats_repository: Arc<statistics::repository::Repository>, + pub announce_service: Arc<services::announce::AnnounceService>, + pub scrape_service: Arc<services::scrape::ScrapeService>, +} + +impl HttpTrackerCoreServices { + #[must_use] + pub fn initialize_from(tracker_core_container: &Arc<TrackerCoreContainer>) -> Arc<Self> { + // HTTP core stats + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_event_bus = Arc::new(EventBus::new( + tracker_core_container.core_config.tracker_usage_statistics.into(), + http_core_broadcaster.clone(), + )); + + let http_stats_event_sender = http_stats_event_bus.sender(); + + let http_announce_service = Arc::new(AnnounceService::new( tracker_core_container.core_config.clone(), tracker_core_container.announce_handler.clone(), tracker_core_container.authentication_service.clone(), @@ -51,7 +96,7 @@ impl HttpTrackerCoreContainer { http_stats_event_sender.clone(), )); - let scrape_service = Arc::new(ScrapeService::new( + let http_scrape_service = Arc::new(ScrapeService::new( tracker_core_container.core_config.clone(), tracker_core_container.scrape_handler.clone(), tracker_core_container.authentication_service.clone(), @@ -59,17 +104,11 @@ impl HttpTrackerCoreContainer { )); Arc::new(Self { - core_config: tracker_core_container.core_config.clone(), - announce_handler: tracker_core_container.announce_handler.clone(), - scrape_handler: tracker_core_container.scrape_handler.clone(), - whitelist_authorization: tracker_core_container.whitelist_authorization.clone(), - authentication_service: tracker_core_container.authentication_service.clone(), - - http_tracker_config: http_tracker_config.clone(), - http_stats_event_sender: http_stats_event_sender.clone(), - http_stats_repository: http_stats_repository.clone(), - announce_service: announce_service.clone(), - scrape_service: scrape_service.clone(), + event_bus: http_stats_event_bus, + stats_event_sender: http_stats_event_sender, + stats_repository: http_stats_repository, + announce_service: http_announce_service, + scrape_service: http_scrape_service, }) } } diff --git a/packages/http-tracker-core/src/event.rs b/packages/http-tracker-core/src/event.rs new file mode 100644 index 000000000..2a4734bfd --- /dev/null +++ b/packages/http-tracker-core/src/event.rs @@ -0,0 +1,206 @@ +use std::net::{IpAddr, SocketAddr}; + +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::RemoteClientAddr; +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::label_name; +use torrust_tracker_primitives::peer::PeerAnnouncement; +use torrust_tracker_primitives::service_binding::ServiceBinding; + +/// A HTTP core event. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Event { + TcpAnnounce { + connection: ConnectionContext, + info_hash: InfoHash, + announcement: PeerAnnouncement, + }, + TcpScrape { + connection: ConnectionContext, + }, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct ConnectionContext { + client: ClientConnectionContext, + server: ServerConnectionContext, +} + +impl ConnectionContext { + #[must_use] + pub fn new(remote_client_addr: RemoteClientAddr, server_service_binding: ServiceBinding) -> Self { + Self { + client: ClientConnectionContext { remote_client_addr }, + server: ServerConnectionContext { + service_binding: server_service_binding, + }, + } + } + + #[must_use] + pub fn client_ip_addr(&self) -> IpAddr { + self.client.ip_addr() + } + + #[must_use] + pub fn client_port(&self) -> Option<u16> { + self.client.port() + } + + #[must_use] + pub fn server_socket_addr(&self) -> SocketAddr { + self.server.service_binding.bind_address() + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct ClientConnectionContext { + remote_client_addr: RemoteClientAddr, +} + +impl ClientConnectionContext { + #[must_use] + pub fn ip_addr(&self) -> IpAddr { + self.remote_client_addr.ip() + } + + #[must_use] + pub fn port(&self) -> Option<u16> { + self.remote_client_addr.port() + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct ServerConnectionContext { + service_binding: ServiceBinding, +} + +impl From<ConnectionContext> for LabelSet { + fn from(connection_context: ConnectionContext) -> Self { + LabelSet::from([ + ( + label_name!("server_binding_protocol"), + LabelValue::new(&connection_context.server.service_binding.protocol().to_string()), + ), + ( + label_name!("server_binding_ip"), + LabelValue::new(&connection_context.server.service_binding.bind_address().ip().to_string()), + ), + ( + label_name!("server_binding_address_ip_type"), + LabelValue::new(&connection_context.server.service_binding.bind_address_ip_type().to_string()), + ), + ( + label_name!("server_binding_address_ip_family"), + LabelValue::new(&connection_context.server.service_binding.bind_address_ip_family().to_string()), + ), + ( + label_name!("server_binding_port"), + LabelValue::new(&connection_context.server.service_binding.bind_address().port().to_string()), + ), + ]) + } +} + +pub mod sender { + use std::sync::Arc; + + use super::Event; + + pub type Sender = Option<Arc<dyn torrust_tracker_events::sender::Sender<Event = Event>>>; + pub type Broadcaster = torrust_tracker_events::broadcaster::Broadcaster<Event>; +} + +pub mod receiver { + use super::Event; + + pub type Receiver = Box<dyn torrust_tracker_events::receiver::Receiver<Event = Event>>; +} + +pub mod bus { + use crate::event::Event; + + pub type EventBus = torrust_tracker_events::bus::EventBus<Event>; +} + +#[cfg(test)] +pub mod test { + + use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{RemoteClientAddr, ResolvedIp}; + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::service_binding::Protocol; + + use super::Event; + use crate::tests::sample_info_hash; + + #[must_use] + pub fn announce_events_match(event: &Event, expected_event: &Event) -> bool { + match (event, expected_event) { + ( + Event::TcpAnnounce { + connection, + info_hash, + announcement, + }, + Event::TcpAnnounce { + connection: expected_connection, + info_hash: expected_info_hash, + announcement: expected_announcement, + }, + ) => { + *connection == *expected_connection + && *info_hash == *expected_info_hash + && announcement.peer_id == expected_announcement.peer_id + && announcement.peer_addr == expected_announcement.peer_addr + // Events can't be compared due to the `updated` field. + // The `announcement.uploaded` contains the current time + // when the test is executed. + // todo: mock time + //&& announcement.updated == expected_announcement.updated + && announcement.uploaded == expected_announcement.uploaded + && announcement.downloaded == expected_announcement.downloaded + && announcement.left == expected_announcement.left + && announcement.event == expected_announcement.event + } + _ => false, + } + } + + #[test] + fn events_should_be_comparable() { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use torrust_tracker_primitives::service_binding::ServiceBinding; + + use crate::event::{ConnectionContext, Event}; + + let remote_client_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + let info_hash = sample_info_hash(); + + let event1 = Event::TcpAnnounce { + connection: ConnectionContext::new( + RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), + ), + info_hash, + announcement: Peer::default(), + }; + + let event2 = Event::TcpAnnounce { + connection: ConnectionContext::new( + RemoteClientAddr::new( + ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2))), + Some(8080), + ), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), + ), + info_hash, + announcement: Peer::default(), + }; + + let event1_clone = event1.clone(); + + assert!(event1 == event1_clone); + assert!(event1 != event2); + } +} diff --git a/packages/http-tracker-core/src/lib.rs b/packages/http-tracker-core/src/lib.rs index b42b99f8e..1692a68fa 100644 --- a/packages/http-tracker-core/src/lib.rs +++ b/packages/http-tracker-core/src/lib.rs @@ -1,10 +1,30 @@ pub mod container; +pub mod event; pub mod services; pub mod statistics; +use torrust_tracker_clock::clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + +pub const HTTP_TRACKER_LOG_TARGET: &str = "HTTP TRACKER"; + #[cfg(test)] pub(crate) mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; /// # Panics /// @@ -15,4 +35,29 @@ pub(crate) mod tests { .parse::<InfoHash>() .expect("String should be a valid info hash") } + + pub fn sample_peer_using_ipv4() -> peer::Peer { + sample_peer() + } + + pub fn sample_peer_using_ipv6() -> peer::Peer { + let mut peer = sample_peer(); + peer.peer_addr = SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + ); + peer + } + + pub fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Started, + } + } } diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 896387b28..766f08c12 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -5,14 +5,15 @@ //! It delegates the `announce` logic to the [`AnnounceHandler`] and it returns //! the [`AnnounceData`]. //! -//! It also sends an [`http_tracker_core::statistics::event::Event`] +//! It also sends an [`http_tracker_core::event::Event`] //! because events are specific for the HTTP tracker. -use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::announce::{peer_from_request, Announce}; -use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources, PeerIpResolutionError}; +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ + resolve_remote_client_addr, ClientIpSources, PeerIpResolutionError, RemoteClientAddr, +}; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::authentication::service::AuthenticationService; @@ -21,8 +22,11 @@ use bittorrent_tracker_core::error::{AnnounceError, TrackerCoreError, WhitelistE use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; +use torrust_tracker_primitives::peer::PeerAnnouncement; +use torrust_tracker_primitives::service_binding::ServiceBinding; -use crate::statistics; +use crate::event; +use crate::event::Event; /// The HTTP tracker `announce` service. /// @@ -35,7 +39,7 @@ pub struct AnnounceService { announce_handler: Arc<AnnounceHandler>, authentication_service: Arc<AuthenticationService>, whitelist_authorization: Arc<whitelist::authorization::WhitelistAuthorization>, - opt_http_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>>, + opt_http_stats_event_sender: event::sender::Sender, } impl AnnounceService { @@ -45,7 +49,7 @@ impl AnnounceService { announce_handler: Arc<AnnounceHandler>, authentication_service: Arc<AuthenticationService>, whitelist_authorization: Arc<whitelist::authorization::WhitelistAuthorization>, - opt_http_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>>, + opt_http_stats_event_sender: event::sender::Sender, ) -> Self { Self { core_config, @@ -68,24 +72,36 @@ impl AnnounceService { &self, announce_request: &Announce, client_ip_sources: &ClientIpSources, + server_service_binding: &ServiceBinding, maybe_key: Option<Key>, ) -> Result<AnnounceData, HttpAnnounceError> { self.authenticate(maybe_key).await?; self.authorize(announce_request.info_hash).await?; - let remote_client_ip = self.resolve_remote_client_ip(client_ip_sources)?; + let remote_client_addr = resolve_remote_client_addr(&self.core_config.net.on_reverse_proxy.into(), client_ip_sources)?; - let mut peer = peer_from_request(announce_request, &remote_client_ip); + let mut peer = peer_from_request(announce_request, &remote_client_addr.ip()); let peers_wanted = Self::peers_wanted(announce_request); let announce_data = self .announce_handler - .announce(&announce_request.info_hash, &mut peer, &remote_client_ip, &peers_wanted) + .handle_announcement( + &announce_request.info_hash, + &mut peer, + &remote_client_addr.ip(), + &peers_wanted, + ) .await?; - self.send_stats_event(remote_client_ip).await; + self.send_event( + announce_request.info_hash, + remote_client_addr, + server_service_binding.clone(), + peer, + ) + .await; Ok(announce_data) } @@ -106,14 +122,6 @@ impl AnnounceService { self.whitelist_authorization.authorize(&info_hash).await } - /// Resolves the client's real IP address considering proxy headers - fn resolve_remote_client_ip(&self, client_ip_sources: &ClientIpSources) -> Result<IpAddr, PeerIpResolutionError> { - match peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources) { - Ok(peer_ip) => Ok(peer_ip), - Err(error) => Err(error), - } - } - /// Determines how many peers the client wants in the response fn peers_wanted(announce_request: &Announce) -> PeersWanted { match announce_request.numwant { @@ -122,20 +130,23 @@ impl AnnounceService { } } - async fn send_stats_event(&self, peer_ip: IpAddr) { + async fn send_event( + &self, + info_hash: InfoHash, + remote_client_addr: RemoteClientAddr, + server_service_binding: ServiceBinding, + announcement: PeerAnnouncement, + ) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { - match peer_ip { - IpAddr::V4(_) => { - http_stats_event_sender - .send_event(statistics::event::Event::Tcp4Announce) - .await; - } - IpAddr::V6(_) => { - http_stats_event_sender - .send_event(statistics::event::Event::Tcp6Announce) - .await; - } - } + let event = Event::TcpAnnounce { + connection: event::ConnectionContext::new(remote_client_addr, server_service_binding), + info_hash, + announcement, + }; + + tracing::debug!("Sending TcpAnnounce event: {:?}", event); + + http_stats_event_sender.send(event).await; } } } @@ -192,23 +203,22 @@ impl From<authentication::key::Error> for HttpAnnounceError { #[cfg(test)] mod tests { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::net::SocketAddr; use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_primitives::peer::Peer; - use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; struct CoreTrackerServices { @@ -219,7 +229,7 @@ mod tests { } struct CoreHttpTrackerServices { - pub http_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>>, + pub http_stats_event_sender: crate::event::sender::Sender, } fn initialize_core_tracker_services() -> (CoreTrackerServices, CoreHttpTrackerServices) { @@ -227,10 +237,12 @@ mod tests { } fn initialize_core_tracker_services_with_config(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { + let cancellation_token = CancellationToken::new(); + let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); @@ -240,13 +252,22 @@ mod tests { &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, + )); + + // HTTP core stats + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_event_bus = Arc::new(EventBus::new( + config.core.tracker_usage_statistics.into(), + http_core_broadcaster.clone(), )); - // HTTP stats - let (http_stats_event_sender, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); - let _http_stats_repository = Arc::new(http_stats_repository); + let http_stats_event_sender = http_stats_event_bus.sender(); + + if config.core.tracker_usage_statistics { + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); + } ( CoreTrackerServices { @@ -259,31 +280,6 @@ mod tests { ) } - fn sample_peer_using_ipv4() -> peer::Peer { - sample_peer() - } - - fn sample_peer_using_ipv6() -> peer::Peer { - let mut peer = sample_peer(); - peer.peer_addr = SocketAddr::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - 8080, - ); - peer - } - - fn sample_peer() -> peer::Peer { - peer::Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), - event: AnnounceEvent::Started, - } - } - fn sample_announce_request_for_peer(peer: Peer) -> (Announce, ClientIpSources) { let announce_request = Announce { info_hash: sample_info_hash(), @@ -299,7 +295,7 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(peer.peer_addr.ip()), + connection_info_socket_address: Some(SocketAddr::new(peer.peer_addr.ip(), 8080)), }; (announce_request, client_ip_sources) @@ -307,15 +303,21 @@ mod tests { use futures::future::BoxFuture; use mockall::mock; - use tokio::sync::mpsc::error::SendError; + use torrust_tracker_events::sender::SendError; - use crate::statistics; + use crate::event::bus::EventBus; + use crate::event::sender::Broadcaster; + use crate::event::Event; + use crate::statistics::event::listener::run_event_listener; + use crate::statistics::repository::Repository; use crate::tests::sample_info_hash; mock! { HttpStatsEventSender {} - impl statistics::event::sender::Sender for HttpStatsEventSender { - fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option<Result<(),SendError<statistics::event::Event> > > > ; + impl torrust_tracker_events::sender::Sender for HttpStatsEventSender { + type Event = Event; + + fn send(&self, event: Event) -> BoxFuture<'static,Option<Result<usize,SendError<Event> > > > ; } } @@ -324,20 +326,23 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; - use mockall::predicate::eq; + use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{RemoteClientAddr, ResolvedIp}; + use mockall::predicate::{self}; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; - use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; + use crate::event::test::announce_events_match; + use crate::event::{ConnectionContext, Event}; use crate::services::announce::tests::{ initialize_core_tracker_services, initialize_core_tracker_services_with_config, sample_announce_request_for_peer, - sample_peer, MockHttpStatsEventSender, + MockHttpStatsEventSender, }; use crate::services::announce::AnnounceService; - use crate::statistics; + use crate::tests::{sample_info_hash, sample_peer, sample_peer_using_ipv4, sample_peer_using_ipv6}; #[tokio::test] async fn it_should_return_the_announce_data() { @@ -347,6 +352,9 @@ mod tests { let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let announce_service = AnnounceService::new( core_tracker_services.core_config.clone(), core_tracker_services.announce_handler.clone(), @@ -356,7 +364,7 @@ mod tests { ); let announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, None) + .handle_announce(&announce_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); @@ -375,19 +383,38 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let peer = sample_peer_using_ipv4(); + let remote_client_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); + + let server_service_binding_clone = server_service_binding.clone(); + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Announce)) + .expect_send() + .with(predicate::function(move |event| { + let mut announcement = peer; + announcement.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + + let expected_event = Event::TcpAnnounce { + connection: ConnectionContext::new( + RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), + server_service_binding.clone(), + ), + info_hash: sample_info_hash(), + announcement, + }; + + announce_events_match(event, &expected_event) + })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let http_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(http_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let http_stats_event_sender: crate::event::sender::Sender = Some(Arc::new(http_stats_event_sender_mock)); let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services(); - core_http_tracker_services.http_stats_event_sender = http_stats_event_sender; - let peer = sample_peer_using_ipv4(); + core_http_tracker_services.http_stats_event_sender = http_stats_event_sender; let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); @@ -400,7 +427,7 @@ mod tests { ); let _announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, None) + .handle_announce(&announce_request, &client_ip_sources, &server_service_binding_clone, None) .await .unwrap(); } @@ -414,7 +441,7 @@ mod tests { } fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { - let loopback_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let loopback_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); let mut peer = sample_peer(); peer.peer_addr = SocketAddr::new(loopback_ip, 8080); peer @@ -425,21 +452,43 @@ mod tests { { // Tracker changes the peer IP to the tracker external IP when the peer is using the loopback IP. - // Assert that the event sent is a TCP4 event + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let peer = peer_with_the_ipv4_loopback_ip(); + let remote_client_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let server_service_binding_clone = server_service_binding.clone(); + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Announce)) + .expect_send() + .with(predicate::function(move |event| { + let mut peer_announcement = peer; + peer_announcement.peer_addr = SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + ); + + let expected_event = Event::TcpAnnounce { + connection: ConnectionContext::new( + RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), + server_service_binding.clone(), + ), + info_hash: sample_info_hash(), + announcement: peer_announcement, + }; + + announce_events_match(event, &expected_event) + })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let http_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(http_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + let http_stats_event_sender: crate::event::sender::Sender = Some(Arc::new(http_stats_event_sender_mock)); let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services_with_config(&tracker_with_an_ipv6_external_ip()); - core_http_tracker_services.http_stats_event_sender = http_stats_event_sender; - let peer = peer_with_the_ipv4_loopback_ip(); + core_http_tracker_services.http_stats_event_sender = http_stats_event_sender; let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); @@ -452,7 +501,7 @@ mod tests { ); let _announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, None) + .handle_announce(&announce_request, &client_ip_sources, &server_service_binding_clone, None) .await .unwrap(); } @@ -460,20 +509,32 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_announce_event_when_the_peer_uses_ipv6_even_if_the_tracker_changes_the_peer_ip_to_ipv4() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let peer = sample_peer_using_ipv6(); + let remote_client_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Tcp6Announce)) + .expect_send() + .with(predicate::function(move |event| { + let expected_event = Event::TcpAnnounce { + connection: ConnectionContext::new( + RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), + server_service_binding.clone(), + ), + info_hash: sample_info_hash(), + announcement: peer, + }; + announce_events_match(event, &expected_event) + })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let http_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(http_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let http_stats_event_sender: crate::event::sender::Sender = Some(Arc::new(http_stats_event_sender_mock)); let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services(); core_http_tracker_services.http_stats_event_sender = http_stats_event_sender; - let peer = sample_peer_using_ipv6(); - let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); let announce_service = AnnounceService::new( @@ -484,8 +545,11 @@ mod tests { core_http_tracker_services.http_stats_event_sender.clone(), ); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let _announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, None) + .handle_announce(&announce_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); } diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 53eed0361..4587bc90a 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -7,19 +7,21 @@ //! //! It also sends an [`http_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. -use std::net::IpAddr; use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; -use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources, PeerIpResolutionError}; +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ + resolve_remote_client_addr, ClientIpSources, PeerIpResolutionError, RemoteClientAddr, +}; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::{self, Key}; use bittorrent_tracker_core::error::{ScrapeError, TrackerCoreError, WhitelistError}; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; +use torrust_tracker_primitives::service_binding::ServiceBinding; -use crate::statistics; +use crate::event::{ConnectionContext, Event}; /// The HTTP tracker `scrape` service. /// @@ -37,7 +39,7 @@ pub struct ScrapeService { core_config: Arc<Core>, scrape_handler: Arc<ScrapeHandler>, authentication_service: Arc<AuthenticationService>, - opt_http_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>>, + opt_http_stats_event_sender: crate::event::sender::Sender, } impl ScrapeService { @@ -46,7 +48,7 @@ impl ScrapeService { core_config: Arc<Core>, scrape_handler: Arc<ScrapeHandler>, authentication_service: Arc<AuthenticationService>, - opt_http_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>>, + opt_http_stats_event_sender: crate::event::sender::Sender, ) -> Self { Self { core_config, @@ -70,17 +72,18 @@ impl ScrapeService { &self, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, + server_service_binding: &ServiceBinding, maybe_key: Option<Key>, ) -> Result<ScrapeData, HttpScrapeError> { let scrape_data = if self.authentication_is_required() && !self.is_authenticated(maybe_key).await { ScrapeData::zeroed(&scrape_request.info_hashes) } else { - self.scrape_handler.scrape(&scrape_request.info_hashes).await? + self.scrape_handler.handle_scrape(&scrape_request.info_hashes).await? }; - let remote_client_ip = self.resolve_remote_client_ip(client_ip_sources)?; + let remote_client_addr = resolve_remote_client_addr(&self.core_config.net.on_reverse_proxy.into(), client_ip_sources)?; - self.send_stats_event(&remote_client_ip).await; + self.send_event(remote_client_addr, server_service_binding.clone()).await; Ok(scrape_data) } @@ -97,18 +100,15 @@ impl ScrapeService { false } - /// Resolves the client's real IP address considering proxy headers. - fn resolve_remote_client_ip(&self, client_ip_sources: &ClientIpSources) -> Result<IpAddr, PeerIpResolutionError> { - peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources) - } - - async fn send_stats_event(&self, original_peer_ip: &IpAddr) { + async fn send_event(&self, remote_client_addr: RemoteClientAddr, server_service_binding: ServiceBinding) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { - let event = match original_peer_ip { - IpAddr::V4(_) => statistics::event::Event::Tcp4Scrape, - IpAddr::V6(_) => statistics::event::Event::Tcp6Scrape, + let event = Event::TcpScrape { + connection: ConnectionContext::new(remote_client_addr, server_service_binding), }; - http_stats_event_sender.send_event(event).await; + + tracing::debug!("Sending TcpScrape event: {:?}", event); + + http_stats_event_sender.send(event).await; } } } @@ -176,17 +176,17 @@ mod tests { use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; use mockall::mock; - use tokio::sync::mpsc::error::SendError; use torrust_tracker_configuration::Configuration; + use torrust_tracker_events::sender::SendError; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; - use crate::statistics; + use crate::event::Event; use crate::tests::sample_info_hash; struct Container { @@ -200,7 +200,7 @@ mod tests { let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); @@ -208,7 +208,7 @@ mod tests { &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); @@ -238,30 +238,36 @@ mod tests { mock! { HttpStatsEventSender {} - impl statistics::event::sender::Sender for HttpStatsEventSender { - fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option<Result<(),SendError<statistics::event::Event> > > > ; + impl torrust_tracker_events::sender::Sender for HttpStatsEventSender { + type Event = Event; + + fn send(&self, event: Event) -> BoxFuture<'static,Option<Result<usize,SendError<Event> > > > ; } } mod with_real_data { use std::future; - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; - use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ClientIpSources, RemoteClientAddr, ResolvedIp}; use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::core::ScrapeData; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; + use crate::event::bus::EventBus; + use crate::event::sender::Broadcaster; + use crate::event::{ConnectionContext, Event}; use crate::services::scrape::tests::{ initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::services::scrape::ScrapeService; - use crate::statistics; use crate::tests::sample_info_hash; #[tokio::test] @@ -269,8 +275,11 @@ mod tests { let configuration = configuration::ephemeral_public(); let core_config = Arc::new(configuration.core.clone()); - let (http_stats_event_sender, _http_stats_repository) = statistics::setup::factory(false); - let http_stats_event_sender = Arc::new(http_stats_event_sender); + // HTTP core stats + let http_core_broadcaster = Broadcaster::default(); + let http_stats_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, http_core_broadcaster.clone())); + + let http_stats_event_sender = http_stats_event_bus.sender(); let container = initialize_services_with_configuration(&configuration); @@ -282,7 +291,7 @@ mod tests { let original_peer_ip = peer.ip(); container .announce_handler - .announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) + .handle_announcement(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -292,9 +301,12 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(original_peer_ip), + connection_info_socket_address: Some(SocketAddr::new(original_peer_ip, 8080)), }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let scrape_service = Arc::new(ScrapeService::new( core_config.clone(), container.scrape_handler.clone(), @@ -303,7 +315,7 @@ mod tests { )); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); @@ -326,12 +338,19 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Scrape)) + .expect_send() + .with(eq(Event::TcpScrape { + connection: ConnectionContext::new( + RemoteClientAddr::new( + ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1))), + Some(8080), + ), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), + ), + })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let http_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(http_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let http_stats_event_sender: crate::event::sender::Sender = Some(Arc::new(http_stats_event_sender_mock)); let container = initialize_services_with_configuration(&config); @@ -343,9 +362,12 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(peer_ip), + connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), container.scrape_handler.clone(), @@ -354,23 +376,35 @@ mod tests { )); scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); } #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let config = configuration::ephemeral(); let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Tcp6Scrape)) + .expect_send() + .with(eq(Event::TcpScrape { + connection: ConnectionContext::new( + RemoteClientAddr::new( + ResolvedIp::FromSocketAddr(IpAddr::V6(Ipv6Addr::new( + 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, + ))), + Some(8080), + ), + server_service_binding, + ), + })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let http_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(http_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let http_stats_event_sender: crate::event::sender::Sender = Some(Arc::new(http_stats_event_sender_mock)); let container = initialize_services_with_configuration(&config); @@ -382,9 +416,12 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(peer_ip), + connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), container.scrape_handler.clone(), @@ -393,7 +430,7 @@ mod tests { )); scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); } @@ -402,21 +439,25 @@ mod tests { mod with_zeroed_data { use std::future; - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; - use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ClientIpSources, RemoteClientAddr, ResolvedIp}; use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::core::ScrapeData; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use torrust_tracker_test_helpers::configuration; + use crate::event::bus::EventBus; + use crate::event::sender::Broadcaster; + use crate::event::{ConnectionContext, Event}; use crate::services::scrape::tests::{ initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::services::scrape::ScrapeService; - use crate::statistics; use crate::tests::sample_info_hash; #[tokio::test] @@ -426,8 +467,11 @@ mod tests { let container = initialize_services_with_configuration(&config); - let (http_stats_event_sender, _http_stats_repository) = statistics::setup::factory(false); - let http_stats_event_sender = Arc::new(http_stats_event_sender); + // HTTP core stats + let http_core_broadcaster = Broadcaster::default(); + let http_stats_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, http_core_broadcaster.clone())); + + let http_stats_event_sender = http_stats_event_bus.sender(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; @@ -437,7 +481,7 @@ mod tests { let original_peer_ip = peer.ip(); container .announce_handler - .announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) + .handle_announcement(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -447,9 +491,12 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(original_peer_ip), + connection_info_socket_address: Some(SocketAddr::new(original_peer_ip, 8080)), }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), container.scrape_handler.clone(), @@ -458,7 +505,7 @@ mod tests { )); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); @@ -475,12 +522,19 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Scrape)) + .expect_send() + .with(eq(Event::TcpScrape { + connection: ConnectionContext::new( + RemoteClientAddr::new( + ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1))), + Some(8080), + ), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), + ), + })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let http_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(http_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let http_stats_event_sender: crate::event::sender::Sender = Some(Arc::new(http_stats_event_sender_mock)); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); @@ -490,9 +544,12 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(peer_ip), + connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), container.scrape_handler.clone(), @@ -501,25 +558,37 @@ mod tests { )); scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); } #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let config = configuration::ephemeral(); let container = initialize_services_with_configuration(&config); let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Tcp6Scrape)) + .expect_send() + .with(eq(Event::TcpScrape { + connection: ConnectionContext::new( + RemoteClientAddr::new( + ResolvedIp::FromSocketAddr(IpAddr::V6(Ipv6Addr::new( + 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, + ))), + Some(8080), + ), + server_service_binding, + ), + })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let http_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(http_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let http_stats_event_sender: crate::event::sender::Sender = Some(Arc::new(http_stats_event_sender_mock)); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); @@ -529,9 +598,12 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(peer_ip), + connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), container.scrape_handler.clone(), @@ -540,7 +612,7 @@ mod tests { )); scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); } diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index b0a0c186f..37c7a26b5 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -1,22 +1,48 @@ -use crate::statistics::event::Event; +use std::sync::Arc; + +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::Event; use crate::statistics::repository::Repository; +use crate::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; -pub async fn handle_event(event: Event, stats_repository: &Repository) { +pub async fn handle_event(event: Event, stats_repository: &Arc<Repository>, now: DurationSinceUnixEpoch) { match event { - // TCP4 - Event::Tcp4Announce => { - stats_repository.increase_tcp4_announces().await; - } - Event::Tcp4Scrape => { - stats_repository.increase_tcp4_scrapes().await; + Event::TcpAnnounce { connection, .. } => { + let mut label_set = LabelSet::from(connection); + label_set.upsert(label_name!("request_kind"), LabelValue::new("announce")); + + match stats_repository + .increase_counter(&metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) + .await + { + Ok(()) => { + tracing::debug!( + "Successfully increased the counter for HTTP announce requests received: {}", + label_set + ); + } + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } - - // TCP6 - Event::Tcp6Announce => { - stats_repository.increase_tcp6_announces().await; - } - Event::Tcp6Scrape => { - stats_repository.increase_tcp6_scrapes().await; + Event::TcpScrape { connection } => { + let mut label_set = LabelSet::from(connection); + label_set.upsert(label_name!("request_kind"), LabelValue::new("scrape")); + + match stats_repository + .increase_counter(&metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) + .await + { + Ok(()) => { + tracing::debug!( + "Successfully increased the counter for HTTP scrape requests received: {}", + label_set + ); + } + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } } @@ -25,51 +51,116 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { #[cfg(test)] mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{RemoteClientAddr, ResolvedIp}; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; use crate::statistics::event::handler::handle_event; - use crate::statistics::event::Event; use crate::statistics::repository::Repository; + use crate::tests::{sample_info_hash, sample_peer_using_ipv4, sample_peer_using_ipv6}; + use crate::CurrentClock; #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp4Announce, &stats_repository).await; + let stats_repository = Arc::new(Repository::new()); + let peer = sample_peer_using_ipv4(); + let remote_client_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)); + + handle_event( + Event::TcpAnnounce { + connection: ConnectionContext::new( + RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), + ), + info_hash: sample_info_hash(), + announcement: peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.tcp4_announces_handled, 1); + assert_eq!(stats.tcp4_announces_handled(), 1); } #[tokio::test] async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp4Scrape, &stats_repository).await; + let stats_repository = Arc::new(Repository::new()); + + handle_event( + Event::TcpScrape { + connection: ConnectionContext::new( + RemoteClientAddr::new( + ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2))), + Some(8080), + ), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), + ), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.tcp4_scrapes_handled, 1); + assert_eq!(stats.tcp4_scrapes_handled(), 1); } #[tokio::test] async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp6Announce, &stats_repository).await; + let stats_repository = Arc::new(Repository::new()); + let peer = sample_peer_using_ipv6(); + let remote_client_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); + + handle_event( + Event::TcpAnnounce { + connection: ConnectionContext::new( + RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 7070)).unwrap(), + ), + info_hash: sample_info_hash(), + announcement: peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.tcp6_announces_handled, 1); + assert_eq!(stats.tcp6_announces_handled(), 1); } #[tokio::test] async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp6Scrape, &stats_repository).await; + let stats_repository = Arc::new(Repository::new()); + + handle_event( + Event::TcpScrape { + connection: ConnectionContext::new( + RemoteClientAddr::new( + ResolvedIp::FromSocketAddr(IpAddr::V6(Ipv6Addr::new( + 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, + ))), + Some(8080), + ), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 7070)).unwrap(), + ), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.tcp6_scrapes_handled, 1); + assert_eq!(stats.tcp6_scrapes_handled(), 1); } } diff --git a/packages/http-tracker-core/src/statistics/event/listener.rs b/packages/http-tracker-core/src/statistics/event/listener.rs index f1a2e25de..ff2937a59 100644 --- a/packages/http-tracker-core/src/statistics/event/listener.rs +++ b/packages/http-tracker-core/src/statistics/event/listener.rs @@ -1,11 +1,58 @@ -use tokio::sync::mpsc; +use std::sync::Arc; + +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; use super::handler::handle_event; -use super::Event; +use crate::event::receiver::Receiver; use crate::statistics::repository::Repository; +use crate::{CurrentClock, HTTP_TRACKER_LOG_TARGET}; + +#[must_use] +pub fn run_event_listener( + receiver: Receiver, + cancellation_token: CancellationToken, + repository: &Arc<Repository>, +) -> JoinHandle<()> { + let stats_repository = repository.clone(); + + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting HTTP tracker core event listener"); + + tokio::spawn(async move { + dispatch_events(receiver, cancellation_token, stats_repository).await; + + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "HTTP tracker core event listener finished"); + }) +} + +async fn dispatch_events(mut receiver: Receiver, cancellation_token: CancellationToken, stats_repository: Arc<Repository>) { + loop { + tokio::select! { + biased; + + () = cancellation_token.cancelled() => { + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Received cancellation request, shutting down HTTP tracker core event listener."); + break; + } -pub async fn dispatch_events(mut receiver: mpsc::Receiver<Event>, stats_repository: Repository) { - while let Some(event) = receiver.recv().await { - handle_event(event, &stats_repository).await; + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Http tracker core statistics receiver closed."); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: HTTP_TRACKER_LOG_TARGET, "Http tracker core statistics receiver lagged by {} events.", n); + } + } + } + } + } + } } } diff --git a/packages/http-tracker-core/src/statistics/event/mod.rs b/packages/http-tracker-core/src/statistics/event/mod.rs index e25148666..dae683398 100644 --- a/packages/http-tracker-core/src/statistics/event/mod.rs +++ b/packages/http-tracker-core/src/statistics/event/mod.rs @@ -1,21 +1,2 @@ pub mod handler; pub mod listener; -pub mod sender; - -/// An statistics event. It is used to collect tracker metrics. -/// -/// - `Tcp` prefix means the event was triggered by the HTTP tracker -/// - `Udp` prefix means the event was triggered by the UDP tracker -/// - `4` or `6` prefixes means the IP version used by the peer -/// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` -/// -/// > NOTE: HTTP trackers do not use `connection` requests. -#[derive(Debug, PartialEq, Eq)] -pub enum Event { - // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } - // Attributes are enums too. - Tcp4Announce, - Tcp4Scrape, - Tcp6Announce, - Tcp6Scrape, -} diff --git a/packages/http-tracker-core/src/statistics/event/sender.rs b/packages/http-tracker-core/src/statistics/event/sender.rs deleted file mode 100644 index ca4b4e210..000000000 --- a/packages/http-tracker-core/src/statistics/event/sender.rs +++ /dev/null @@ -1,29 +0,0 @@ -use futures::future::BoxFuture; -use futures::FutureExt; -#[cfg(test)] -use mockall::{automock, predicate::str}; -use tokio::sync::mpsc; -use tokio::sync::mpsc::error::SendError; - -use super::Event; - -/// A trait to allow sending statistics events -#[cfg_attr(test, automock)] -pub trait Sender: Sync + Send { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option<Result<(), SendError<Event>>>>; -} - -/// An [`statistics::EventSender`](crate::statistics::event::sender::Sender) implementation. -/// -/// It uses a channel sender to send the statistic events. The channel is created by a -/// [`statistics::Keeper`](crate::statistics::keeper::Keeper) -#[allow(clippy::module_name_repetitions)] -pub struct ChannelSender { - pub(crate) sender: mpsc::Sender<Event>, -} - -impl Sender for ChannelSender { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option<Result<(), SendError<Event>>>> { - async move { Some(self.sender.send(event).await) }.boxed() - } -} diff --git a/packages/http-tracker-core/src/statistics/keeper.rs b/packages/http-tracker-core/src/statistics/keeper.rs deleted file mode 100644 index ae5c3276e..000000000 --- a/packages/http-tracker-core/src/statistics/keeper.rs +++ /dev/null @@ -1,77 +0,0 @@ -use tokio::sync::mpsc; - -use super::event::listener::dispatch_events; -use super::event::sender::{ChannelSender, Sender}; -use super::event::Event; -use super::repository::Repository; - -const CHANNEL_BUFFER_SIZE: usize = 65_535; - -/// The service responsible for keeping tracker metrics (listening to statistics events and handle them). -/// -/// It actively listen to new statistics events. When it receives a new event -/// it accordingly increases the counters. -pub struct Keeper { - pub repository: Repository, -} - -impl Default for Keeper { - fn default() -> Self { - Self::new() - } -} - -impl Keeper { - #[must_use] - pub fn new() -> Self { - Self { - repository: Repository::new(), - } - } - - #[must_use] - pub fn new_active_instance() -> (Box<dyn Sender>, Repository) { - let mut stats_tracker = Self::new(); - - let stats_event_sender = stats_tracker.run_event_listener(); - - (stats_event_sender, stats_tracker.repository) - } - - pub fn run_event_listener(&mut self) -> Box<dyn Sender> { - let (sender, receiver) = mpsc::channel::<Event>(CHANNEL_BUFFER_SIZE); - - let stats_repository = self.repository.clone(); - - tokio::spawn(async move { dispatch_events(receiver, stats_repository).await }); - - Box::new(ChannelSender { sender }) - } -} - -#[cfg(test)] -mod tests { - use crate::statistics::event::Event; - use crate::statistics::keeper::Keeper; - use crate::statistics::metrics::Metrics; - - #[tokio::test] - async fn should_contain_the_tracker_statistics() { - let stats_tracker = Keeper::new(); - - let stats = stats_tracker.repository.get_stats().await; - - assert_eq!(stats.tcp4_announces_handled, Metrics::default().tcp4_announces_handled); - } - - #[tokio::test] - async fn should_create_an_event_sender_to_send_statistical_events() { - let mut stats_tracker = Keeper::new(); - - let event_sender = stats_tracker.run_event_listener(); - - let result = event_sender.send_event(Event::Tcp4Announce).await; - - assert!(result.is_some()); - } -} diff --git a/packages/http-tracker-core/src/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs index 6c102770b..00d09b803 100644 --- a/packages/http-tracker-core/src/statistics/metrics.rs +++ b/packages/http-tracker-core/src/statistics/metrics.rs @@ -1,22 +1,97 @@ +use serde::Serialize; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::aggregate::sum::Sum; +use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; + /// Metrics collected by the tracker. -/// -/// - Number of connections handled -/// - Number of `announce` requests handled -/// - Number of `scrape` request handled -/// -/// These metrics are collected for each connection type: UDP and HTTP -/// and also for each IP version used by the peers: IPv4 and IPv6. -#[derive(Debug, PartialEq, Default)] +#[derive(Debug, Clone, PartialEq, Default, Serialize)] pub struct Metrics { + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increase_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increment_counter(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) + } +} + +impl Metrics { /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. - pub tcp4_announces_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn tcp4_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. - pub tcp4_scrapes_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn tcp4_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. - pub tcp6_announces_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn tcp6_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. - pub tcp6_scrapes_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn tcp6_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() as u64 + } } diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index 939a41061..3ae355471 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -1,6 +1,23 @@ pub mod event; -pub mod keeper; pub mod metrics; pub mod repository; -pub mod services; -pub mod setup; + +use metrics::Metrics; +use torrust_tracker_metrics::metric::description::MetricDescription; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::unit::Unit; + +pub const HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL: &str = "http_tracker_core_requests_received_total"; + +#[must_use] +pub fn describe_metrics() -> Metrics { + let mut metrics = Metrics::default(); + + metrics.metric_collection.describe_counter( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("Total number of HTTP requests received")), + ); + + metrics +} diff --git a/packages/http-tracker-core/src/statistics/repository.rs b/packages/http-tracker-core/src/statistics/repository.rs index 5e15fc298..ea027f5c6 100644 --- a/packages/http-tracker-core/src/statistics/repository.rs +++ b/packages/http-tracker-core/src/statistics/repository.rs @@ -1,7 +1,12 @@ use std::sync::Arc; use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::Error; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use super::describe_metrics; use super::metrics::Metrics; /// A repository for the tracker metrics. @@ -19,36 +24,31 @@ impl Default for Repository { impl Repository { #[must_use] pub fn new() -> Self { - Self { - stats: Arc::new(RwLock::new(Metrics::default())), - } + let stats = Arc::new(RwLock::new(describe_metrics())); + + Self { stats } } pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { self.stats.read().await } - pub async fn increase_tcp4_announces(&self) { + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increase the counter. + pub async fn increase_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { let mut stats_lock = self.stats.write().await; - stats_lock.tcp4_announces_handled += 1; - drop(stats_lock); - } - pub async fn increase_tcp4_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp4_scrapes_handled += 1; - drop(stats_lock); - } + let result = stats_lock.increase_counter(metric_name, labels, now); - pub async fn increase_tcp6_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp6_announces_handled += 1; drop(stats_lock); - } - pub async fn increase_tcp6_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp6_scrapes_handled += 1; - drop(stats_lock); + result } } diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs deleted file mode 100644 index dce7098b9..000000000 --- a/packages/http-tracker-core/src/statistics/services.rs +++ /dev/null @@ -1,102 +0,0 @@ -//! Statistics services. -//! -//! It includes: -//! -//! - A [`factory`](crate::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. -//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::statistics::metrics::Metrics). -//! -//! Tracker metrics are collected using a Publisher-Subscribe pattern. -//! -//! The factory function builds two structs: -//! -//! - An statistics event [`Sender`](crate::statistics::event::sender::Sender) -//! - An statistics [`Repository`] -//! -//! ```text -//! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); -//! ``` -//! -//! The statistics repository is responsible for storing the metrics in memory. -//! The statistics event sender allows sending events related to metrics. -//! There is an event listener that is receiving all the events and processing them with an event handler. -//! Then, the event handler updates the metrics depending on the received event. -use std::sync::Arc; - -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - -use crate::statistics::metrics::Metrics; -use crate::statistics::repository::Repository; - -/// All the metrics collected by the tracker. -#[derive(Debug, PartialEq)] -pub struct TrackerMetrics { - /// Domain level metrics. - /// - /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateSwarmMetadata, - - /// Application level metrics. Usage statistics/metrics. - /// - /// Metrics about how the tracker is been used (number of number of http scrape requests, etcetera) - pub protocol_metrics: Metrics, -} - -/// It returns all the [`TrackerMetrics`] -pub async fn get_metrics( - in_memory_torrent_repository: Arc<InMemoryTorrentRepository>, - stats_repository: Arc<Repository>, -) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); - let stats = stats_repository.get_stats().await; - - TrackerMetrics { - torrents_metrics, - protocol_metrics: Metrics { - // TCPv4 - tcp4_announces_handled: stats.tcp4_announces_handled, - tcp4_scrapes_handled: stats.tcp4_scrapes_handled, - // TCPv6 - tcp6_announces_handled: stats.tcp6_announces_handled, - tcp6_scrapes_handled: stats.tcp6_scrapes_handled, - }, - } -} - -#[cfg(test)] -mod tests { - use std::sync::Arc; - - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::{self}; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - use torrust_tracker_test_helpers::configuration; - - use crate::statistics; - use crate::statistics::services::{get_metrics, TrackerMetrics}; - - pub fn tracker_configuration() -> Configuration { - configuration::ephemeral() - } - - #[tokio::test] - async fn the_statistics_service_should_return_the_tracker_metrics() { - let config = tracker_configuration(); - - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let (_http_stats_event_sender, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_repository = Arc::new(http_stats_repository); - - let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), http_stats_repository.clone()).await; - - assert_eq!( - tracker_metrics, - TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata::default(), - protocol_metrics: statistics::metrics::Metrics::default(), - } - ); - } -} diff --git a/packages/http-tracker-core/src/statistics/setup.rs b/packages/http-tracker-core/src/statistics/setup.rs deleted file mode 100644 index d3114a75e..000000000 --- a/packages/http-tracker-core/src/statistics/setup.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! Setup for the tracker statistics. -//! -//! The [`factory`] function builds the structs needed for handling the tracker metrics. -use crate::statistics; - -/// It builds the structs needed for handling the tracker metrics. -/// -/// It returns: -/// -/// - An statistics event [`Sender`](crate::statistics::event::sender::Sender) that allows you to send events related to statistics. -/// - An statistics [`Repository`](crate::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. -/// -/// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics -/// events are sent are received but not dispatched to the handler. -#[must_use] -pub fn factory( - tracker_usage_statistics: bool, -) -> ( - Option<Box<dyn statistics::event::sender::Sender>>, - statistics::repository::Repository, -) { - let mut stats_event_sender = None; - - let mut stats_tracker = statistics::keeper::Keeper::new(); - - if tracker_usage_statistics { - stats_event_sender = Some(stats_tracker.run_event_listener()); - } - - (stats_event_sender, stats_tracker.repository) -} - -#[cfg(test)] -mod test { - use super::factory; - - #[tokio::test] - async fn should_not_send_any_event_when_statistics_are_disabled() { - let tracker_usage_statistics = false; - - let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); - - assert!(stats_event_sender.is_none()); - } - - #[tokio::test] - async fn should_send_events_when_statistics_are_enabled() { - let tracker_usage_statistics = true; - - let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); - - assert!(stats_event_sender.is_some()); - } -} diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index 29b0dfb2c..232a6113f 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to provide error decorator with the location and the source of the original error." -keywords = ["errors", "helper", "library"] +keywords = [ "errors", "helper", "library" ] name = "torrust-tracker-located-error" readme = "README.md" diff --git a/packages/metrics/.gitignore b/packages/metrics/.gitignore new file mode 100644 index 000000000..6350e9868 --- /dev/null +++ b/packages/metrics/.gitignore @@ -0,0 +1 @@ +.coverage diff --git a/packages/metrics/Cargo.toml b/packages/metrics/Cargo.toml new file mode 100644 index 000000000..b6d327d70 --- /dev/null +++ b/packages/metrics/Cargo.toml @@ -0,0 +1,30 @@ +[package] +description = "A library with the primitive types shared by the Torrust tracker packages." +keywords = [ "api", "library", "metrics" ] +name = "torrust-tracker-metrics" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +chrono = { version = "0", default-features = false, features = [ "clock" ] } +derive_more = { version = "2", features = [ "constructor" ] } +serde = { version = "1", features = [ "derive" ] } +serde_json = "1.0.140" +thiserror = "2" +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +tracing = "0.1.41" + +[dev-dependencies] +approx = "0.5.1" +formatjson = "0.3.1" +pretty_assertions = "1.4.1" +rstest = "0.25.0" diff --git a/packages/metrics/LICENSE b/packages/metrics/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/metrics/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see <https://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +<https://www.gnu.org/licenses/>. diff --git a/packages/metrics/README.md b/packages/metrics/README.md new file mode 100644 index 000000000..3d1d94c5f --- /dev/null +++ b/packages/metrics/README.md @@ -0,0 +1,210 @@ +# Torrust Tracker Metrics + +A comprehensive metrics library providing type-safe metric collection, aggregation, and Prometheus export functionality for the [Torrust Tracker](https://github.com/torrust/torrust-tracker) ecosystem. + +## Overview + +This library offers a robust metrics system designed specifically for tracking and monitoring BitTorrent tracker performance. It provides type-safe metric collection with support for labels, time-series data, and multiple export formats including Prometheus. + +## Key Features + +- **Type-Safe Metrics**: Strongly typed `Counter` and `Gauge` metrics with compile-time guarantees +- **Label Support**: Rich labeling system for multi-dimensional metrics +- **Time-Series Data**: Built-in support for timestamped samples +- **Prometheus Export**: Native Prometheus format serialization +- **Aggregation Functions**: Sum operations with mathematically appropriate return types +- **JSON Serialization**: Full serde support for all metric types +- **Memory Efficient**: Optimized data structures for high-performance scenarios + +## Quick Start + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +torrust-tracker-metrics = "3.0.0" +``` + +### Basic Usage + +```rust +use torrust_tracker_metrics::{ + metric_collection::MetricCollection, + label::{LabelSet, LabelValue}, + metric_name, label_name, +}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +// Create a metric collection +let mut metrics = MetricCollection::default(); + +// Define labels +let labels: LabelSet = [ + (label_name!("server"), LabelValue::new("tracker-01")), + (label_name!("protocol"), LabelValue::new("http")), +].into(); + +// Record metrics +let time = DurationSinceUnixEpoch::from_secs(1234567890); +metrics.increment_counter( + &metric_name!("requests_total"), + &labels, + time, +)?; + +metrics.set_gauge( + &metric_name!("active_connections"), + &labels, + 42.0, + time, +)?; + +// Export to Prometheus format +let prometheus_output = metrics.to_prometheus(); +println!("{}", prometheus_output); +``` + +### Metric Aggregation + +```rust +use torrust_tracker_metrics::metric_collection::aggregate::{Sum, Avg}; + +// Sum all counter values matching specific labels +let total_requests = metrics.sum( + &metric_name!("requests_total"), + &[("server", "tracker-01")].into(), +); + +println!("Total requests: {:?}", total_requests); + +// Calculate average of gauge values matching specific labels +let avg_response_time = metrics.avg( + &metric_name!("response_time_seconds"), + &[("endpoint", "/announce")].into(), +); + +println!("Average response time: {:?}", avg_response_time); +``` + +## Architecture + +### Core Components + +- **`Counter`**: Monotonically increasing integer values (u64) +- **`Gauge`**: Arbitrary floating-point values that can increase or decrease (f64) +- **`Metric<T>`**: Generic metric container with metadata (name, description, unit) +- **`MetricCollection`**: Type-safe collection managing both counters and gauges +- **`LabelSet`**: Key-value pairs for metric dimensionality +- **`Sample`**: Timestamped metric values with associated labels + +### Type System + +The library uses Rust's type system to ensure metric safety: + +```rust +// Counter operations return u64 +let counter_sum: Option<u64> = counter_collection.sum(&name, &labels); + +// Gauge operations return f64 +let gauge_sum: Option<f64> = gauge_collection.sum(&name, &labels); + +// Mixed collections convert to f64 for compatibility +let mixed_sum: Option<f64> = metric_collection.sum(&name, &labels); +``` + +### Module Structure + +```output +src/ +├── counter.rs # Counter metric type +├── gauge.rs # Gauge metric type +├── metric/ # Generic metric container +│ ├── mod.rs +│ ├── name.rs # Metric naming +│ ├── description.rs # Metric descriptions +│ └── aggregate/ # Metric-level aggregations +├── metric_collection/ # Collection management +│ ├── mod.rs +│ └── aggregate/ # Collection-level aggregations +├── label/ # Label system +│ ├── name.rs # Label names +│ ├── value.rs # Label values +│ └── set.rs # Label collections +├── sample.rs # Timestamped values +├── sample_collection.rs # Sample management +├── prometheus.rs # Prometheus export +└── unit.rs # Measurement units +``` + +## Documentation + +- [Crate documentation](https://docs.rs/torrust-tracker-metrics) +- [API Reference](https://docs.rs/torrust-tracker-metrics/latest/torrust_tracker_metrics/) + +## Development + +### Code Coverage + +Run basic coverage report: + +```console +cargo llvm-cov --package torrust-tracker-metrics +``` + +Generate LCOV report (for IDE integration): + +```console +mkdir -p ./.coverage +cargo llvm-cov --package torrust-tracker-metrics --lcov --output-path=./.coverage/lcov.info +``` + +Generate detailed HTML coverage report: + +Generate detailed HTML coverage report: + +```console +mkdir -p ./.coverage +cargo llvm-cov --package torrust-tracker-metrics --html --output-dir ./.coverage +``` + +Open the coverage report in your browser: + +```console +open ./.coverage/index.html # macOS +xdg-open ./.coverage/index.html # Linux +``` + +## Performance Considerations + +- **Memory Usage**: Metrics are stored in-memory with efficient HashMap-based collections +- **Label Cardinality**: Be mindful of label combinations as they create separate time series +- **Aggregation**: Sum operations are optimized for both single-type and mixed collections + +## Compatibility + +This library is designed to be compatible with the standard Rust [metrics](https://crates.io/crates/metrics) crate ecosystem where possible. + +## Contributing + +We welcome contributions! Please see the main [Torrust Tracker repository](https://github.com/torrust/torrust-tracker) for contribution guidelines. + +### Reporting Issues + +- [Bug Reports](https://github.com/torrust/torrust-tracker/issues/new?template=bug_report.md) +- [Feature Requests](https://github.com/torrust/torrust-tracker/issues/new?template=feature_request.md) + +## Acknowledgements + +This library draws inspiration from the Rust [metrics](https://crates.io/crates/metrics) crate, incorporating compatible APIs and naming conventions where possible. We may consider migrating to the standard metrics crate in future versions while maintaining our specialized functionality. + +Special thanks to the Rust metrics ecosystem contributors for establishing excellent patterns for metrics collection and export. + +## License + +This project is licensed under the [GNU AFFERO GENERAL PUBLIC LICENSE v3.0](./LICENSE). + +## Related Projects + +- [Torrust Tracker](https://github.com/torrust/torrust-tracker) - The main BitTorrent tracker +- [metrics](https://crates.io/crates/metrics) - Standard Rust metrics facade +- [prometheus](https://crates.io/crates/prometheus) - Prometheus client library diff --git a/packages/metrics/cSpell.json b/packages/metrics/cSpell.json new file mode 100644 index 000000000..8f5002833 --- /dev/null +++ b/packages/metrics/cSpell.json @@ -0,0 +1,21 @@ +{ + "$schema": "https://raw.githubusercontent.com/streetsidesoftware/cspell/main/cspell.schema.json", + "version": "0.2", + "dictionaryDefinitions": [ + { + "name": "project-words", + "path": "../../project-words.txt", + "addWords": true + } + ], + "dictionaries": ["project-words"], + "enableFiletypes": [ + "dockerfile", + "shellscript", + "toml" + ], + "ignorePaths": [ + "target", + "/project-words.txt" + ] +} diff --git a/packages/metrics/src/counter.rs b/packages/metrics/src/counter.rs new file mode 100644 index 000000000..0e2002181 --- /dev/null +++ b/packages/metrics/src/counter.rs @@ -0,0 +1,266 @@ +use derive_more::Display; +use serde::{Deserialize, Serialize}; + +use super::prometheus::PrometheusSerializable; + +#[derive(Debug, Display, Clone, Default, PartialEq, Serialize, Deserialize)] +pub struct Counter(u64); + +impl Counter { + #[must_use] + pub fn new(value: u64) -> Self { + Self(value) + } + + #[must_use] + pub fn value(&self) -> u64 { + self.0 + } + + #[must_use] + pub fn primitive(&self) -> u64 { + self.value() + } + + pub fn increment(&mut self, value: u64) { + self.0 += value; + } + + pub fn absolute(&mut self, value: u64) { + self.0 = value; + } +} + +impl From<u32> for Counter { + fn from(value: u32) -> Self { + Self(u64::from(value)) + } +} + +impl From<u64> for Counter { + fn from(value: u64) -> Self { + Self(value) + } +} + +impl From<i32> for Counter { + fn from(value: i32) -> Self { + #[allow(clippy::cast_sign_loss)] + Self(value as u64) + } +} + +impl From<Counter> for u64 { + fn from(counter: Counter) -> Self { + counter.value() + } +} + +impl PrometheusSerializable for Counter { + fn to_prometheus(&self) -> String { + format!("{}", self.value()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_should_be_created_from_integer_values() { + let counter = Counter::new(0); + assert_eq!(counter.value(), 0); + } + + #[test] + fn it_could_be_converted_from_u64() { + let counter: Counter = 42.into(); + assert_eq!(counter.value(), 42); + } + + #[test] + fn it_could_be_converted_into_u64() { + let counter = Counter::new(42); + let value: u64 = counter.into(); + assert_eq!(value, 42); + } + + #[test] + fn it_could_be_incremented() { + let mut counter = Counter::new(0); + counter.increment(1); + assert_eq!(counter.value(), 1); + + counter.increment(2); + assert_eq!(counter.value(), 3); + } + + #[test] + fn it_could_set_to_an_absolute_value() { + let mut counter = Counter::new(0); + counter.absolute(1); + assert_eq!(counter.value(), 1); + } + + #[test] + fn it_serializes_to_prometheus() { + let counter = Counter::new(42); + assert_eq!(counter.to_prometheus(), "42"); + } + + #[test] + fn it_could_be_converted_from_u32() { + let counter: Counter = 42u32.into(); + assert_eq!(counter.value(), 42); + } + + #[test] + fn it_could_be_converted_from_i32() { + let counter: Counter = 42i32.into(); + assert_eq!(counter.value(), 42); + } + + #[test] + fn it_should_return_primitive_value() { + let counter = Counter::new(123); + assert_eq!(counter.primitive(), 123); + } + + #[test] + fn it_should_handle_zero_value() { + let counter = Counter::new(0); + assert_eq!(counter.value(), 0); + assert_eq!(counter.primitive(), 0); + } + + #[test] + fn it_should_handle_large_values() { + let counter = Counter::new(u64::MAX); + assert_eq!(counter.value(), u64::MAX); + } + + #[test] + fn it_should_handle_u32_max_conversion() { + let counter: Counter = u32::MAX.into(); + assert_eq!(counter.value(), u64::from(u32::MAX)); + } + + #[test] + fn it_should_handle_i32_max_conversion() { + let counter: Counter = i32::MAX.into(); + assert_eq!(counter.value(), i32::MAX as u64); + } + + #[test] + fn it_should_handle_negative_i32_conversion() { + let counter: Counter = (-42i32).into(); + #[allow(clippy::cast_sign_loss)] + let expected = (-42i32) as u64; + assert_eq!(counter.value(), expected); + } + + #[test] + fn it_should_handle_i32_min_conversion() { + let counter: Counter = i32::MIN.into(); + #[allow(clippy::cast_sign_loss)] + let expected = i32::MIN as u64; + assert_eq!(counter.value(), expected); + } + + #[test] + fn it_should_handle_large_increments() { + let mut counter = Counter::new(100); + counter.increment(1000); + assert_eq!(counter.value(), 1100); + + counter.increment(u64::MAX - 1100); + assert_eq!(counter.value(), u64::MAX); + } + + #[test] + fn it_should_support_multiple_absolute_operations() { + let mut counter = Counter::new(0); + + counter.absolute(100); + assert_eq!(counter.value(), 100); + + counter.absolute(50); + assert_eq!(counter.value(), 50); + + counter.absolute(0); + assert_eq!(counter.value(), 0); + } + + #[test] + fn it_should_be_displayable() { + let counter = Counter::new(42); + assert_eq!(counter.to_string(), "42"); + + let counter = Counter::new(0); + assert_eq!(counter.to_string(), "0"); + } + + #[test] + fn it_should_be_debuggable() { + let counter = Counter::new(42); + let debug_string = format!("{counter:?}"); + assert_eq!(debug_string, "Counter(42)"); + } + + #[test] + fn it_should_be_cloneable() { + let counter = Counter::new(42); + let cloned_counter = counter.clone(); + assert_eq!(counter, cloned_counter); + assert_eq!(counter.value(), cloned_counter.value()); + } + + #[test] + fn it_should_support_equality_comparison() { + let counter1 = Counter::new(42); + let counter2 = Counter::new(42); + let counter3 = Counter::new(43); + + assert_eq!(counter1, counter2); + assert_ne!(counter1, counter3); + } + + #[test] + fn it_should_have_default_value() { + let counter = Counter::default(); + assert_eq!(counter.value(), 0); + } + + #[test] + fn it_should_handle_conversion_roundtrip() { + let original_value = 12345u64; + let counter = Counter::from(original_value); + let converted_back: u64 = counter.into(); + assert_eq!(original_value, converted_back); + } + + #[test] + fn it_should_handle_u32_conversion_roundtrip() { + let original_value = 12345u32; + let counter = Counter::from(original_value); + assert_eq!(counter.value(), u64::from(original_value)); + } + + #[test] + fn it_should_handle_i32_conversion_roundtrip() { + let original_value = 12345i32; + let counter = Counter::from(original_value); + #[allow(clippy::cast_sign_loss)] + let expected = original_value as u64; + assert_eq!(counter.value(), expected); + } + + #[test] + fn it_should_serialize_large_values_to_prometheus() { + let counter = Counter::new(u64::MAX); + assert_eq!(counter.to_prometheus(), u64::MAX.to_string()); + + let counter = Counter::new(0); + assert_eq!(counter.to_prometheus(), "0"); + } +} diff --git a/packages/metrics/src/gauge.rs b/packages/metrics/src/gauge.rs new file mode 100644 index 000000000..d0883715b --- /dev/null +++ b/packages/metrics/src/gauge.rs @@ -0,0 +1,240 @@ +use derive_more::Display; +use serde::{Deserialize, Serialize}; + +use super::prometheus::PrometheusSerializable; + +#[derive(Debug, Display, Clone, Default, PartialEq, Serialize, Deserialize)] +pub struct Gauge(f64); + +impl Gauge { + #[must_use] + pub fn new(value: f64) -> Self { + Self(value) + } + + #[must_use] + pub fn value(&self) -> f64 { + self.0 + } + + #[must_use] + pub fn primitive(&self) -> f64 { + self.value() + } + + pub fn set(&mut self, value: f64) { + self.0 = value; + } + + pub fn increment(&mut self, value: f64) { + self.0 += value; + } + + pub fn decrement(&mut self, value: f64) { + self.0 -= value; + } +} + +impl From<f32> for Gauge { + fn from(value: f32) -> Self { + Self(f64::from(value)) + } +} + +impl From<f64> for Gauge { + fn from(value: f64) -> Self { + Self(value) + } +} + +impl From<Gauge> for f64 { + fn from(counter: Gauge) -> Self { + counter.value() + } +} + +impl PrometheusSerializable for Gauge { + fn to_prometheus(&self) -> String { + format!("{}", self.value()) + } +} + +#[cfg(test)] +mod tests { + use approx::assert_relative_eq; + + use super::*; + + #[test] + fn it_should_be_created_from_integer_values() { + let gauge = Gauge::new(0.0); + assert_relative_eq!(gauge.value(), 0.0); + } + + #[test] + fn it_could_be_converted_from_u64() { + let gauge: Gauge = 42.0.into(); + assert_relative_eq!(gauge.value(), 42.0); + } + + #[test] + fn it_could_be_converted_into_i64() { + let gauge = Gauge::new(42.0); + let value: f64 = gauge.into(); + assert_relative_eq!(value, 42.0); + } + + #[test] + fn it_could_be_set() { + let mut gauge = Gauge::new(0.0); + gauge.set(1.0); + assert_relative_eq!(gauge.value(), 1.0); + } + + #[test] + fn it_could_be_incremented() { + let mut gauge = Gauge::new(0.0); + gauge.increment(1.0); + assert_relative_eq!(gauge.value(), 1.0); + } + + #[test] + fn it_could_be_decremented() { + let mut gauge = Gauge::new(1.0); + gauge.decrement(1.0); + assert_relative_eq!(gauge.value(), 0.0); + } + + #[test] + fn it_serializes_to_prometheus() { + let counter = Gauge::new(42.0); + assert_eq!(counter.to_prometheus(), "42"); + + let counter = Gauge::new(42.1); + assert_eq!(counter.to_prometheus(), "42.1"); + } + + #[test] + fn it_could_be_converted_from_f32() { + let gauge: Gauge = 42.5f32.into(); + assert_relative_eq!(gauge.value(), 42.5); + } + + #[test] + fn it_should_return_primitive_value() { + let gauge = Gauge::new(123.456); + assert_relative_eq!(gauge.primitive(), 123.456); + } + + #[test] + fn it_should_handle_zero_value() { + let gauge = Gauge::new(0.0); + assert_relative_eq!(gauge.value(), 0.0); + assert_relative_eq!(gauge.primitive(), 0.0); + } + + #[test] + fn it_should_handle_negative_values() { + let gauge = Gauge::new(-42.5); + assert_relative_eq!(gauge.value(), -42.5); + } + + #[test] + fn it_should_handle_large_values() { + let gauge = Gauge::new(f64::MAX); + assert_relative_eq!(gauge.value(), f64::MAX); + } + + #[test] + fn it_should_handle_infinity() { + let gauge = Gauge::new(f64::INFINITY); + assert_relative_eq!(gauge.value(), f64::INFINITY); + } + + #[test] + fn it_should_handle_nan() { + let gauge = Gauge::new(f64::NAN); + assert!(gauge.value().is_nan()); + } + + #[test] + fn it_should_be_displayable() { + let gauge = Gauge::new(42.5); + assert_eq!(gauge.to_string(), "42.5"); + + let gauge = Gauge::new(0.0); + assert_eq!(gauge.to_string(), "0"); + } + + #[test] + fn it_should_be_debuggable() { + let gauge = Gauge::new(42.5); + let debug_string = format!("{gauge:?}"); + assert_eq!(debug_string, "Gauge(42.5)"); + } + + #[test] + fn it_should_be_cloneable() { + let gauge = Gauge::new(42.5); + let cloned_gauge = gauge.clone(); + assert_eq!(gauge, cloned_gauge); + assert_relative_eq!(gauge.value(), cloned_gauge.value()); + } + + #[test] + fn it_should_support_equality_comparison() { + let gauge1 = Gauge::new(42.5); + let gauge2 = Gauge::new(42.5); + let gauge3 = Gauge::new(43.0); + + assert_eq!(gauge1, gauge2); + assert_ne!(gauge1, gauge3); + } + + #[test] + fn it_should_have_default_value() { + let gauge = Gauge::default(); + assert_relative_eq!(gauge.value(), 0.0); + } + + #[test] + fn it_should_handle_conversion_roundtrip() { + let original_value = 12345.678; + let gauge = Gauge::from(original_value); + let converted_back: f64 = gauge.into(); + assert_relative_eq!(original_value, converted_back); + } + + #[test] + fn it_should_handle_f32_conversion_roundtrip() { + let original_value = 12345.5f32; + let gauge = Gauge::from(original_value); + assert_relative_eq!(gauge.value(), f64::from(original_value)); + } + + #[test] + fn it_should_handle_multiple_operations() { + let mut gauge = Gauge::new(100.0); + + gauge.increment(50.0); + assert_relative_eq!(gauge.value(), 150.0); + + gauge.decrement(25.0); + assert_relative_eq!(gauge.value(), 125.0); + + gauge.set(200.0); + assert_relative_eq!(gauge.value(), 200.0); + } + + #[test] + fn it_should_serialize_special_values_to_prometheus() { + let gauge = Gauge::new(f64::INFINITY); + assert_eq!(gauge.to_prometheus(), "inf"); + + let gauge = Gauge::new(f64::NEG_INFINITY); + assert_eq!(gauge.to_prometheus(), "-inf"); + + let gauge = Gauge::new(f64::NAN); + assert_eq!(gauge.to_prometheus(), "NaN"); + } +} diff --git a/packages/metrics/src/label/mod.rs b/packages/metrics/src/label/mod.rs new file mode 100644 index 000000000..880fdbbb1 --- /dev/null +++ b/packages/metrics/src/label/mod.rs @@ -0,0 +1,9 @@ +pub mod name; +mod pair; +mod set; +pub mod value; + +pub type LabelName = name::LabelName; +pub type LabelValue = value::LabelValue; +pub type LabelPair = pair::LabelPair; +pub type LabelSet = set::LabelSet; diff --git a/packages/metrics/src/label/name.rs b/packages/metrics/src/label/name.rs new file mode 100644 index 000000000..194aeb2b3 --- /dev/null +++ b/packages/metrics/src/label/name.rs @@ -0,0 +1,126 @@ +use derive_more::Display; +use serde::{Deserialize, Serialize}; + +use crate::prometheus::PrometheusSerializable; + +#[derive(Debug, Display, Clone, Eq, PartialEq, Default, Deserialize, Serialize, Hash, Ord, PartialOrd)] +pub struct LabelName(String); + +impl LabelName { + /// Creates a new `LabelName` instance. + /// + /// # Panics + /// + /// Panics if the provided name is empty. + #[must_use] + pub fn new(name: &str) -> Self { + assert!(!name.is_empty(), "Label name cannot be empty."); + Self(name.to_owned()) + } +} + +impl PrometheusSerializable for LabelName { + /// In Prometheus: + /// + /// - Labels may contain ASCII letters, numbers, as well as underscores. + /// They must match the regex [a-zA-Z_][a-zA-Z0-9_]*. + /// - Label names beginning with __ (two "_") are reserved for internal + /// use. + /// - Label values may contain any Unicode characters. + /// - Labels with an empty label value are considered equivalent to + /// labels that do not exist. + /// + /// The label name is changed: + /// + /// - If a label name starts with, or contains, an invalid character: + /// replace character with underscore. + /// - If th label name starts with two underscores: + /// add additional underscore (three underscores total) + fn to_prometheus(&self) -> String { + // Replace invalid characters with underscore + let processed: String = self + .0 + .chars() + .enumerate() + .map(|(i, c)| { + if i == 0 { + if c.is_ascii_alphabetic() || c == '_' { + c + } else { + '_' + } + } else if c.is_ascii_alphanumeric() || c == '_' { + c + } else { + '_' + } + }) + .collect(); + + // If the label name starts with two underscores, add an additional + if processed.starts_with("__") && !processed.starts_with("___") { + format!("_{processed}") + } else { + processed + } + } +} + +#[macro_export] +macro_rules! label_name { + ("") => { + compile_error!("Label name cannot be empty"); + }; + ($name:literal) => { + $crate::label::name::LabelName::new($name) + }; + ($name:ident) => { + $crate::label::name::LabelName::new($name) + }; +} +#[cfg(test)] +mod tests { + mod serialization_of_label_name_to_prometheus { + use rstest::rstest; + + use crate::label::LabelName; + use crate::prometheus::PrometheusSerializable; + + #[rstest] + #[case("1 valid name", "valid_name", "valid_name")] + #[case("2 leading underscore", "_leading_underscore", "_leading_underscore")] + #[case("3 leading lowercase", "v123", "v123")] + #[case("4 leading uppercase", "V123", "V123")] + fn valid_names_in_prometheus(#[case] case: &str, #[case] input: &str, #[case] output: &str) { + assert_eq!(label_name!(input).to_prometheus(), output, "{case} failed: {input:?}"); + } + + #[rstest] + #[case("1 invalid start 1", "9invalid_start", "_invalid_start")] + #[case("2 invalid start 2", "@test", "_test")] + #[case("3 invalid dash", "invalid-char", "invalid_char")] + #[case("4 invalid spaces", "spaces are bad", "spaces_are_bad")] + #[case("5 invalid special chars", "a!b@c#d$e%f^g&h*i(j)", "a_b_c_d_e_f_g_h_i_j_")] + #[case("6 invalid colon", "my:metric/version", "my_metric_version")] + #[case("7 all invalid characters", "!@#$%^&*()", "__________")] + #[case("8 non_ascii_characters", "ñaca©", "_aca_")] + fn names_that_need_changes_in_prometheus(#[case] case: &str, #[case] input: &str, #[case] output: &str) { + assert_eq!(label_name!(input).to_prometheus(), output, "{case} failed: {input:?}"); + } + + #[rstest] + #[case("1 double underscore start", "__private", "___private")] + #[case("2 double underscore only", "__", "___")] + #[case("3 processed to double underscore", "^^name", "___name")] + #[case("4 processed to double underscore after first char", "0__name", "___name")] + fn names_starting_with_double_underscore(#[case] case: &str, #[case] input: &str, #[case] output: &str) { + assert_eq!(label_name!(input).to_prometheus(), output, "{case} failed: {input:?}"); + } + + #[test] + #[should_panic(expected = "Label name cannot be empty.")] + fn empty_name() { + let _name = LabelName::new(""); + } + } +} diff --git a/packages/metrics/src/label/pair.rs b/packages/metrics/src/label/pair.rs new file mode 100644 index 000000000..858902451 --- /dev/null +++ b/packages/metrics/src/label/pair.rs @@ -0,0 +1,29 @@ +use super::{LabelName, LabelValue}; +use crate::prometheus::PrometheusSerializable; + +pub type LabelPair = (LabelName, LabelValue); + +// Generic implementation for any tuple (A, B) where A and B implement PrometheusSerializable +impl<A: PrometheusSerializable, B: PrometheusSerializable> PrometheusSerializable for (A, B) { + fn to_prometheus(&self) -> String { + format!("{}=\"{}\"", self.0.to_prometheus(), self.1.to_prometheus()) + } +} + +#[cfg(test)] +mod tests { + mod serialization_of_label_pair_to_prometheus { + use crate::label::LabelValue; + use crate::label_name; + use crate::prometheus::PrometheusSerializable; + + #[test] + fn test_label_pair_serialization_to_prometheus() { + let label_pair = (label_name!("label_name"), LabelValue::new("value")); + assert_eq!(label_pair.to_prometheus(), r#"label_name="value""#); + + let label_pair = (&label_name!("label_name"), &LabelValue::new("value")); + assert_eq!(label_pair.to_prometheus(), r#"label_name="value""#); + } + } +} diff --git a/packages/metrics/src/label/set.rs b/packages/metrics/src/label/set.rs new file mode 100644 index 000000000..46256e4d5 --- /dev/null +++ b/packages/metrics/src/label/set.rs @@ -0,0 +1,584 @@ +use std::collections::btree_map::Iter; +use std::collections::BTreeMap; +use std::fmt::Display; + +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +use super::{LabelName, LabelPair, LabelValue}; +use crate::prometheus::PrometheusSerializable; + +#[derive(Debug, Clone, Eq, PartialEq, Default, Ord, PartialOrd, Hash)] +pub struct LabelSet { + items: BTreeMap<LabelName, LabelValue>, +} + +impl LabelSet { + #[must_use] + pub fn empty() -> Self { + Self { items: BTreeMap::new() } + } + + /// Insert a new label pair or update the value of an existing label. + pub fn upsert(&mut self, name: LabelName, value: LabelValue) { + self.items.insert(name, value); + } + + pub fn is_empty(&self) -> bool { + self.items.is_empty() + } + + pub fn contains_pair(&self, name: &LabelName, value: &LabelValue) -> bool { + match self.items.get(name) { + Some(existing_value) => existing_value == value, + None => false, + } + } + + pub fn matches(&self, criteria: &LabelSet) -> bool { + criteria.iter().all(|(name, value)| self.contains_pair(name, value)) + } + + pub fn iter(&self) -> Iter<'_, LabelName, LabelValue> { + self.items.iter() + } +} + +impl Display for LabelSet { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let items = self + .items + .iter() + .map(|(name, value)| format!("{name}=\"{value}\"")) + .collect::<Vec<_>>() + .join(","); + + write!(f, "{{{items}}}") + } +} + +impl From<BTreeMap<LabelName, LabelValue>> for LabelSet { + fn from(values: BTreeMap<LabelName, LabelValue>) -> Self { + Self { items: values } + } +} + +impl From<Vec<(&str, &str)>> for LabelSet { + fn from(vec: Vec<(&str, &str)>) -> Self { + let mut items = BTreeMap::new(); + + for (name, value) in vec { + items.insert(LabelName::new(name), LabelValue::new(value)); + } + + Self { items } + } +} + +impl From<Vec<(String, String)>> for LabelSet { + fn from(vec: Vec<(String, String)>) -> Self { + let mut items = BTreeMap::new(); + + for (name, value) in vec { + items.insert(LabelName::new(&name), LabelValue::new(&value)); + } + + Self { items } + } +} + +impl From<Vec<LabelPair>> for LabelSet { + fn from(vec: Vec<LabelPair>) -> Self { + let mut items = BTreeMap::new(); + + for (name, value) in vec { + items.insert(name, value); + } + + Self { items } + } +} + +impl From<Vec<SerializedLabel>> for LabelSet { + fn from(vec: Vec<SerializedLabel>) -> Self { + let mut items = BTreeMap::new(); + + for serialized_label in vec { + items.insert(serialized_label.name, serialized_label.value); + } + + Self { items } + } +} + +impl<const N: usize> From<[LabelPair; N]> for LabelSet { + fn from(arr: [LabelPair; N]) -> Self { + let values = BTreeMap::from(arr); + Self { items: values } + } +} + +impl<const N: usize> From<[(String, String); N]> for LabelSet { + fn from(arr: [(String, String); N]) -> Self { + let values = arr + .iter() + .map(|(name, value)| (LabelName::new(name), LabelValue::new(value))) + .collect::<BTreeMap<_, _>>(); + Self { items: values } + } +} + +impl<const N: usize> From<[(&str, &str); N]> for LabelSet { + fn from(arr: [(&str, &str); N]) -> Self { + let values = arr + .iter() + .map(|(name, value)| (LabelName::new(name), LabelValue::new(value))) + .collect::<BTreeMap<_, _>>(); + Self { items: values } + } +} + +impl From<LabelPair> for LabelSet { + fn from(label_pair: LabelPair) -> Self { + let mut set = BTreeMap::new(); + + set.insert(label_pair.0, label_pair.1); + + Self { items: set } + } +} + +#[derive(Debug, Clone, Eq, PartialEq, Default, Deserialize, Serialize)] +struct SerializedLabel { + name: LabelName, + value: LabelValue, +} + +impl Serialize for LabelSet { + fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> + where + S: Serializer, + { + self.items + .iter() + .map(|(name, value)| SerializedLabel { + name: name.clone(), + value: value.clone(), + }) + .collect::<Vec<_>>() + .serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for LabelSet { + fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> + where + D: Deserializer<'de>, + { + let serialized_labels = Vec::<SerializedLabel>::deserialize(deserializer)?; + + Ok(LabelSet::from(serialized_labels)) + } +} + +impl PrometheusSerializable for LabelSet { + fn to_prometheus(&self) -> String { + if self.is_empty() { + return String::new(); + } + + let items = self.items.iter().fold(String::new(), |mut output, label_pair| { + if !output.is_empty() { + output.push(','); + } + + output.push_str(&label_pair.to_prometheus()); + + output + }); + + format!("{{{items}}}") + } +} + +#[cfg(test)] +mod tests { + + use std::collections::BTreeMap; + use std::hash::{DefaultHasher, Hash}; + + use pretty_assertions::assert_eq; + + use super::{LabelName, LabelValue}; + use crate::label::LabelSet; + use crate::label_name; + use crate::prometheus::PrometheusSerializable; + + fn sample_vec_of_label_pairs() -> Vec<(LabelName, LabelValue)> { + sample_array_of_label_pairs().into() + } + + fn sample_array_of_label_pairs() -> [(LabelName, LabelValue); 3] { + [ + (label_name!("server_service_binding_protocol"), LabelValue::new("http")), + (label_name!("server_service_binding_ip"), LabelValue::new("0.0.0.0")), + (label_name!("server_service_binding_port"), LabelValue::new("7070")), + ] + } + + #[test] + fn it_should_allow_inserting_a_new_label_pair() { + let mut label_set = LabelSet::default(); + + label_set.upsert(label_name!("label_name"), LabelValue::new("value")); + + assert_eq!( + label_set.items.get(&label_name!("label_name")).unwrap(), + &LabelValue::new("value") + ); + } + + #[test] + fn it_should_allow_updating_a_label_value() { + let mut label_set = LabelSet::default(); + + label_set.upsert(label_name!("label_name"), LabelValue::new("old value")); + label_set.upsert(label_name!("label_name"), LabelValue::new("new value")); + + assert_eq!( + label_set.items.get(&label_name!("label_name")).unwrap(), + &LabelValue::new("new value") + ); + } + + #[test] + fn it_should_allow_serializing_to_json_as_an_array_of_label_objects() { + let label_set = LabelSet::from((label_name!("label_name"), LabelValue::new("label value"))); + + let json = serde_json::to_string(&label_set).unwrap(); + + assert_eq!( + formatjson::format_json(&json).unwrap(), + formatjson::format_json( + r#" + [ + { + "name": "label_name", + "value": "label value" + } + ] + "# + ) + .unwrap() + ); + } + + #[test] + fn it_should_allow_deserializing_from_json_as_an_array_of_label_objects() { + let json = formatjson::format_json( + r#" + [ + { + "name": "label_name", + "value": "label value" + } + ] + "#, + ) + .unwrap(); + + let label_set: LabelSet = serde_json::from_str(&json).unwrap(); + + assert_eq!( + label_set, + LabelSet::from((label_name!("label_name"), LabelValue::new("label value"))) + ); + } + + #[test] + fn it_should_allow_serializing_to_prometheus_format() { + let label_set = LabelSet::from((label_name!("label_name"), LabelValue::new("label value"))); + assert_eq!(label_set.to_prometheus(), r#"{label_name="label value"}"#); + } + + #[test] + fn it_should_handle_prometheus_format_with_special_characters() { + let label_set: LabelSet = vec![("label_with_underscores", "value_with_underscores")].into(); + assert_eq!( + label_set.to_prometheus(), + r#"{label_with_underscores="value_with_underscores"}"# + ); + } + + #[test] + fn it_should_alphabetically_order_labels_in_prometheus_format() { + let label_set = LabelSet::from([ + (label_name!("b_label_name"), LabelValue::new("b label value")), + (label_name!("a_label_name"), LabelValue::new("a label value")), + ]); + + assert_eq!( + label_set.to_prometheus(), + r#"{a_label_name="a label value",b_label_name="b label value"}"# + ); + } + + #[test] + fn it_should_allow_displaying() { + let label_set = LabelSet::from((label_name!("label_name"), LabelValue::new("label value"))); + + assert_eq!(label_set.to_string(), r#"{label_name="label value"}"#); + } + + #[test] + fn it_should_allow_instantiation_from_an_array_of_label_pairs() { + let label_set: LabelSet = sample_array_of_label_pairs().into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from(sample_array_of_label_pairs()) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_a_vec_of_label_pairs() { + let label_set: LabelSet = sample_vec_of_label_pairs().into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from(sample_array_of_label_pairs()) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_a_b_tree_map() { + let label_set: LabelSet = BTreeMap::from(sample_array_of_label_pairs()).into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from(sample_array_of_label_pairs()) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_a_label_pair() { + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from([(label_name!("label_name"), LabelValue::new("value"))]) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_vec_of_str_tuples() { + let label_set: LabelSet = vec![("foo", "bar"), ("baz", "qux")].into(); + + let mut expected = BTreeMap::new(); + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_allow_instantiation_from_vec_of_string_tuples() { + let label_set: LabelSet = vec![("foo".to_string(), "bar".to_string()), ("baz".to_string(), "qux".to_string())].into(); + + let mut expected = BTreeMap::new(); + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_allow_instantiation_from_vec_of_serialized_label() { + use super::SerializedLabel; + let label_set: LabelSet = vec![ + SerializedLabel { + name: LabelName::new("foo"), + value: LabelValue::new("bar"), + }, + SerializedLabel { + name: LabelName::new("baz"), + value: LabelValue::new("qux"), + }, + ] + .into(); + + let mut expected = BTreeMap::new(); + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_allow_instantiation_from_array_of_string_tuples() { + let arr: [(String, String); 2] = [("foo".to_string(), "bar".to_string()), ("baz".to_string(), "qux".to_string())]; + let label_set: LabelSet = arr.into(); + + let mut expected = BTreeMap::new(); + + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_allow_instantiation_from_array_of_str_tuples() { + let arr: [(&str, &str); 2] = [("foo", "bar"), ("baz", "qux")]; + let label_set: LabelSet = arr.into(); + + let mut expected = BTreeMap::new(); + + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_be_comparable() { + let a: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + let b: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + let c: LabelSet = (label_name!("y"), LabelValue::new("2")).into(); + + assert_eq!(a, b); + assert_ne!(a, c); + } + + #[test] + fn it_should_be_allow_ordering() { + let a: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + let b: LabelSet = (label_name!("y"), LabelValue::new("2")).into(); + + assert!(a < b); + } + + #[test] + fn it_should_be_hashable() { + let a: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + + let mut hasher = DefaultHasher::new(); + + a.hash(&mut hasher); + } + + #[test] + fn it_should_implement_clone() { + let a: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + let _unused = a.clone(); + } + + #[test] + fn it_should_check_if_empty() { + let empty_set = LabelSet::empty(); + assert!(empty_set.is_empty()); + } + + #[test] + fn it_should_check_if_non_empty() { + let non_empty_set: LabelSet = (label_name!("label"), LabelValue::new("value")).into(); + assert!(!non_empty_set.is_empty()); + } + + #[test] + fn it_should_create_an_empty_label_set() { + let empty_set = LabelSet::empty(); + assert!(empty_set.is_empty()); + } + + #[test] + fn it_should_check_if_contains_specific_label_pair() { + let label_set: LabelSet = vec![("service", "tracker"), ("protocol", "http")].into(); + + // Test existing pair + assert!(label_set.contains_pair(&LabelName::new("service"), &LabelValue::new("tracker"))); + assert!(label_set.contains_pair(&LabelName::new("protocol"), &LabelValue::new("http"))); + + // Test non-existing name + assert!(!label_set.contains_pair(&LabelName::new("missing"), &LabelValue::new("value"))); + + // Test existing name with wrong value + assert!(!label_set.contains_pair(&LabelName::new("service"), &LabelValue::new("wrong"))); + } + + #[test] + fn it_should_match_against_criteria() { + let label_set: LabelSet = vec![("service", "tracker"), ("protocol", "http"), ("version", "v1")].into(); + + // Empty criteria should match any label set + assert!(label_set.matches(&LabelSet::empty())); + + // Single matching criterion + let single_criteria: LabelSet = vec![("service", "tracker")].into(); + assert!(label_set.matches(&single_criteria)); + + // Multiple matching criteria + let multiple_criteria: LabelSet = vec![("service", "tracker"), ("protocol", "http")].into(); + assert!(label_set.matches(&multiple_criteria)); + + // Non-matching criterion + let non_matching: LabelSet = vec![("service", "wrong")].into(); + assert!(!label_set.matches(&non_matching)); + + // Partially matching criteria (one matches, one doesn't) + let partial_matching: LabelSet = vec![("service", "tracker"), ("missing", "value")].into(); + assert!(!label_set.matches(&partial_matching)); + + // Criteria with label not in original set + let missing_label: LabelSet = vec![("missing_label", "value")].into(); + assert!(!label_set.matches(&missing_label)); + } + + #[test] + fn it_should_allow_iteration_over_label_pairs() { + let label_set: LabelSet = vec![("service", "tracker"), ("protocol", "http")].into(); + + let mut count = 0; + + for (name, value) in label_set.iter() { + count += 1; + // Verify we can access name and value + assert!(!name.to_string().is_empty()); + assert!(!value.to_string().is_empty()); + } + + assert_eq!(count, 2); + } + + #[test] + fn it_should_display_empty_label_set() { + let empty_set = LabelSet::empty(); + assert_eq!(empty_set.to_string(), "{}"); + } + + #[test] + fn it_should_serialize_empty_label_set_to_prometheus_format() { + let empty_set = LabelSet::empty(); + assert_eq!(empty_set.to_prometheus(), ""); + } + + #[test] + fn it_should_maintain_order_in_iteration() { + let label_set: LabelSet = vec![("z_label", "z_value"), ("a_label", "a_value"), ("m_label", "m_value")].into(); + + let mut labels: Vec<String> = vec![]; + for (name, _) in label_set.iter() { + labels.push(name.to_string()); + } + + // Should be in alphabetical order + assert_eq!(labels, vec!["a_label", "m_label", "z_label"]); + } +} diff --git a/packages/metrics/src/label/value.rs b/packages/metrics/src/label/value.rs new file mode 100644 index 000000000..4f25844a8 --- /dev/null +++ b/packages/metrics/src/label/value.rs @@ -0,0 +1,103 @@ +use derive_more::Display; +use serde::{Deserialize, Serialize}; + +use crate::prometheus::PrometheusSerializable; + +#[derive(Debug, Display, Clone, Eq, PartialEq, Default, Deserialize, Serialize, Hash, Ord, PartialOrd)] +pub struct LabelValue(String); + +impl LabelValue { + #[must_use] + pub fn new(value: &str) -> Self { + Self(value.to_owned()) + } + + /// Empty label values are ignored in Prometheus. + #[must_use] + pub fn ignore() -> Self { + Self(String::default()) + } +} + +impl PrometheusSerializable for LabelValue { + fn to_prometheus(&self) -> String { + self.0.clone() + } +} + +impl From<String> for LabelValue { + fn from(value: String) -> Self { + Self(value) + } +} + +#[cfg(test)] +mod tests { + use std::collections::hash_map::DefaultHasher; + use std::hash::Hash; + + use crate::label::value::LabelValue; + use crate::prometheus::PrometheusSerializable; + + #[test] + fn it_serializes_to_prometheus() { + let label_value = LabelValue::new("value"); + assert_eq!(label_value.to_prometheus(), "value"); + } + + #[test] + fn it_could_be_initialized_from_str() { + let lv = LabelValue::new("abc"); + assert_eq!(lv.0, "abc"); + } + + #[test] + fn it_should_allow_to_create_an_ignored_label_value() { + let lv = LabelValue::ignore(); + assert_eq!(lv.0, ""); + } + + #[test] + fn it_should_be_converted_from_string() { + let s = String::from("foo"); + let lv: LabelValue = s.clone().into(); + assert_eq!(lv.0, s); + } + + #[test] + fn it_should_be_comparable() { + let a = LabelValue::new("x"); + let b = LabelValue::new("x"); + let c = LabelValue::new("y"); + + assert_eq!(a, b); + assert_ne!(a, c); + } + + #[test] + fn it_should_be_allow_ordering() { + let a = LabelValue::new("x"); + let b = LabelValue::new("y"); + + assert!(a < b); + } + + #[test] + fn it_should_be_hashable() { + let a = LabelValue::new("x"); + let mut hasher = DefaultHasher::new(); + a.hash(&mut hasher); + } + + #[test] + fn it_should_implement_clone() { + let a = LabelValue::new("x"); + let _unused = a.clone(); + } + + #[test] + fn it_should_implement_display() { + let a = LabelValue::new("x"); + assert_eq!(format!("{a}"), "x"); + } +} diff --git a/packages/metrics/src/lib.rs b/packages/metrics/src/lib.rs new file mode 100644 index 000000000..997cd3c8c --- /dev/null +++ b/packages/metrics/src/lib.rs @@ -0,0 +1,30 @@ +pub mod counter; +pub mod gauge; +pub mod label; +pub mod metric; +pub mod metric_collection; +pub mod prometheus; +pub mod sample; +pub mod sample_collection; +pub mod unit; + +pub const METRICS_TARGET: &str = "METRICS"; + +#[cfg(test)] +mod tests { + /// It removes leading and trailing whitespace from each line. + pub fn format_prometheus_output(output: &str) -> String { + output + .lines() + .map(str::trim_start) + .map(str::trim_end) + .collect::<Vec<_>>() + .join("\n") + } + + pub fn sort_lines(s: &str) -> String { + let mut lines: Vec<&str> = s.split('\n').collect(); + lines.sort_unstable(); + lines.join("\n") + } +} diff --git a/packages/metrics/src/metric/aggregate/avg.rs b/packages/metrics/src/metric/aggregate/avg.rs new file mode 100644 index 000000000..95628450b --- /dev/null +++ b/packages/metrics/src/metric/aggregate/avg.rs @@ -0,0 +1,294 @@ +use crate::counter::Counter; +use crate::gauge::Gauge; +use crate::label::LabelSet; +use crate::metric::aggregate::sum::Sum; +use crate::metric::Metric; + +pub trait Avg { + type Output; + fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output; +} + +impl Avg for Metric<Counter> { + type Output = f64; + + fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output { + let matching_samples = self.collect_matching_samples(label_set_criteria); + + if matching_samples.is_empty() { + return 0.0; + } + + let sum = self.sum(label_set_criteria); + + #[allow(clippy::cast_precision_loss)] + (sum as f64 / matching_samples.len() as f64) + } +} + +impl Avg for Metric<Gauge> { + type Output = f64; + + fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output { + let matching_samples = self.collect_matching_samples(label_set_criteria); + + if matching_samples.is_empty() { + return 0.0; + } + + let sum = self.sum(label_set_criteria); + + #[allow(clippy::cast_precision_loss)] + (sum / matching_samples.len() as f64) + } +} + +#[cfg(test)] +mod tests { + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::counter::Counter; + use crate::gauge::Gauge; + use crate::label::LabelSet; + use crate::metric::aggregate::avg::Avg; + use crate::metric::{Metric, MetricName}; + use crate::metric_name; + use crate::sample::Sample; + use crate::sample_collection::SampleCollection; + + struct MetricBuilder<T> { + sample_time: DurationSinceUnixEpoch, + name: MetricName, + samples: Vec<Sample<T>>, + } + + impl<T> Default for MetricBuilder<T> { + fn default() -> Self { + Self { + sample_time: DurationSinceUnixEpoch::from_secs(1_743_552_000), + name: metric_name!("test_metric"), + samples: vec![], + } + } + } + + impl<T> MetricBuilder<T> { + fn with_sample(mut self, value: T, label_set: &LabelSet) -> Self { + let sample = Sample::new(value, self.sample_time, label_set.clone()); + self.samples.push(sample); + self + } + + fn build(self) -> Metric<T> { + Metric::new( + self.name, + None, + None, + SampleCollection::new(self.samples).expect("invalid samples"), + ) + } + } + + fn counter_cases() -> Vec<(Metric<Counter>, LabelSet, f64)> { + // (metric, label set criteria, expected_average_value) + vec![ + // Metric with one sample without label set + ( + MetricBuilder::default().with_sample(1.into(), &LabelSet::empty()).build(), + LabelSet::empty(), + 1.0, + ), + // Metric with one sample with a label set + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0, + ), + // Metric with two samples, different label sets, average all + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .with_sample(3.into(), &[("l2", "l2_value")].into()) + .build(), + LabelSet::empty(), + 2.0, // (1 + 3) / 2 = 2.0 + ), + // Metric with two samples, different label sets, average one + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .with_sample(2.into(), &[("l2", "l2_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0, + ), + // Metric with three samples, same label key, different label values, average by key + ( + MetricBuilder::default() + .with_sample(2.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(4.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .with_sample(6.into(), &[("l1", "l1_value"), ("lc", "lc_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 4.0, // (2 + 4 + 6) / 3 = 4.0 + ), + // Metric with two samples, different label values, average by subkey + ( + MetricBuilder::default() + .with_sample(5.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(7.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("la", "la_value")].into(), + 5.0, + ), + // Edge: Metric with no samples at all + (MetricBuilder::default().build(), LabelSet::empty(), 0.0), + // Edge: Metric with samples but no matching labels + ( + MetricBuilder::default() + .with_sample(5.into(), &[("foo", "bar")].into()) + .build(), + [("not", "present")].into(), + 0.0, + ), + // Edge: Metric with zero value + ( + MetricBuilder::default() + .with_sample(0.into(), &[("l3", "l3_value")].into()) + .build(), + [("l3", "l3_value")].into(), + 0.0, + ), + // Edge: Metric with a very large value + ( + MetricBuilder::default() + .with_sample((u64::MAX / 2).into(), &[("edge", "large1")].into()) + .with_sample((u64::MAX / 2).into(), &[("edge", "large2")].into()) + .build(), + LabelSet::empty(), + #[allow(clippy::cast_precision_loss)] + (u64::MAX as f64 / 2.0), // Average of (max/2) and (max/2) + ), + ] + } + + fn gauge_cases() -> Vec<(Metric<Gauge>, LabelSet, f64)> { + // (metric, label set criteria, expected_average_value) + vec![ + // Metric with one sample without label set + ( + MetricBuilder::default().with_sample(1.0.into(), &LabelSet::empty()).build(), + LabelSet::empty(), + 1.0, + ), + // Metric with one sample with a label set + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0, + ), + // Metric with two samples, different label sets, average all + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .with_sample(3.0.into(), &[("l2", "l2_value")].into()) + .build(), + LabelSet::empty(), + 2.0, // (1.0 + 3.0) / 2 = 2.0 + ), + // Metric with two samples, different label sets, average one + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .with_sample(2.0.into(), &[("l2", "l2_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0, + ), + // Metric with three samples, same label key, different label values, average by key + ( + MetricBuilder::default() + .with_sample(2.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(4.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .with_sample(6.0.into(), &[("l1", "l1_value"), ("lc", "lc_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 4.0, // (2.0 + 4.0 + 6.0) / 3 = 4.0 + ), + // Metric with two samples, different label values, average by subkey + ( + MetricBuilder::default() + .with_sample(5.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(7.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("la", "la_value")].into(), + 5.0, + ), + // Edge: Metric with no samples at all + (MetricBuilder::default().build(), LabelSet::empty(), 0.0), + // Edge: Metric with samples but no matching labels + ( + MetricBuilder::default() + .with_sample(5.0.into(), &[("foo", "bar")].into()) + .build(), + [("not", "present")].into(), + 0.0, + ), + // Edge: Metric with zero value + ( + MetricBuilder::default() + .with_sample(0.0.into(), &[("l3", "l3_value")].into()) + .build(), + [("l3", "l3_value")].into(), + 0.0, + ), + // Edge: Metric with negative values + ( + MetricBuilder::default() + .with_sample((-2.0).into(), &[("l4", "l4_value")].into()) + .with_sample(4.0.into(), &[("l5", "l5_value")].into()) + .build(), + LabelSet::empty(), + 1.0, // (-2.0 + 4.0) / 2 = 1.0 + ), + // Edge: Metric with decimal values + ( + MetricBuilder::default() + .with_sample(1.5.into(), &[("l6", "l6_value")].into()) + .with_sample(2.5.into(), &[("l7", "l7_value")].into()) + .build(), + LabelSet::empty(), + 2.0, // (1.5 + 2.5) / 2 = 2.0 + ), + ] + } + + #[test] + fn test_counter_cases() { + for (idx, (metric, criteria, expected_value)) in counter_cases().iter().enumerate() { + let avg = metric.avg(criteria); + + assert!( + (avg - expected_value).abs() <= f64::EPSILON, + "at case {idx}, expected avg to be {expected_value}, got {avg}" + ); + } + } + + #[test] + fn test_gauge_cases() { + for (idx, (metric, criteria, expected_value)) in gauge_cases().iter().enumerate() { + let avg = metric.avg(criteria); + + assert!( + (avg - expected_value).abs() <= f64::EPSILON, + "at case {idx}, expected avg to be {expected_value}, got {avg}" + ); + } + } +} diff --git a/packages/metrics/src/metric/aggregate/mod.rs b/packages/metrics/src/metric/aggregate/mod.rs new file mode 100644 index 000000000..1224a1f52 --- /dev/null +++ b/packages/metrics/src/metric/aggregate/mod.rs @@ -0,0 +1,2 @@ +pub mod avg; +pub mod sum; diff --git a/packages/metrics/src/metric/aggregate/sum.rs b/packages/metrics/src/metric/aggregate/sum.rs new file mode 100644 index 000000000..30c2819b7 --- /dev/null +++ b/packages/metrics/src/metric/aggregate/sum.rs @@ -0,0 +1,278 @@ +use crate::counter::Counter; +use crate::gauge::Gauge; +use crate::label::LabelSet; +use crate::metric::Metric; + +pub trait Sum { + type Output; + fn sum(&self, label_set_criteria: &LabelSet) -> Self::Output; +} + +impl Sum for Metric<Counter> { + type Output = u64; + + fn sum(&self, label_set_criteria: &LabelSet) -> Self::Output { + self.sample_collection + .iter() + .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) + .map(|(_label_set, measurement)| measurement.value().primitive()) + .sum() + } +} + +impl Sum for Metric<Gauge> { + type Output = f64; + + fn sum(&self, label_set_criteria: &LabelSet) -> Self::Output { + self.sample_collection + .iter() + .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) + .map(|(_label_set, measurement)| measurement.value().primitive()) + .sum() + } +} + +#[cfg(test)] +mod tests { + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::counter::Counter; + use crate::gauge::Gauge; + use crate::label::LabelSet; + use crate::metric::aggregate::sum::Sum; + use crate::metric::{Metric, MetricName}; + use crate::metric_name; + use crate::sample::Sample; + use crate::sample_collection::SampleCollection; + + struct MetricBuilder<T> { + sample_time: DurationSinceUnixEpoch, + name: MetricName, + samples: Vec<Sample<T>>, + } + + impl<T> Default for MetricBuilder<T> { + fn default() -> Self { + Self { + sample_time: DurationSinceUnixEpoch::from_secs(1_743_552_000), + name: metric_name!("test_metric"), + samples: vec![], + } + } + } + + impl<T> MetricBuilder<T> { + fn with_sample(mut self, value: T, label_set: &LabelSet) -> Self { + let sample = Sample::new(value, self.sample_time, label_set.clone()); + self.samples.push(sample); + self + } + + fn build(self) -> Metric<T> { + Metric::new( + self.name, + None, + None, + SampleCollection::new(self.samples).expect("invalid samples"), + ) + } + } + + fn counter_cases() -> Vec<(Metric<Counter>, LabelSet, u64)> { + // (metric, label set criteria, expected_aggregate_value) + vec![ + // Metric with one sample without label set + ( + MetricBuilder::default().with_sample(1.into(), &LabelSet::empty()).build(), + LabelSet::empty(), + 1, + ), + // Metric with one sample with a label set + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1, + ), + // Metric with two samples, different label sets, sum all + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .with_sample(2.into(), &[("l2", "l2_value")].into()) + .build(), + LabelSet::empty(), + 3, + ), + // Metric with two samples, different label sets, sum one + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .with_sample(2.into(), &[("l2", "l2_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1, + ), + // Metric with two samples, same label key, different label values, sum by key + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(2.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 3, + ), + // Metric with two samples, different label values, sum by subkey + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(2.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("la", "la_value")].into(), + 1, + ), + // Edge: Metric with no samples at all + (MetricBuilder::default().build(), LabelSet::empty(), 0), + // Edge: Metric with samples but no matching labels + ( + MetricBuilder::default() + .with_sample(5.into(), &[("foo", "bar")].into()) + .build(), + [("not", "present")].into(), + 0, + ), + // Edge: Metric with zero value + ( + MetricBuilder::default() + .with_sample(0.into(), &[("l3", "l3_value")].into()) + .build(), + [("l3", "l3_value")].into(), + 0, + ), + // Edge: Metric with a very large value + ( + MetricBuilder::default() + .with_sample(u64::MAX.into(), &LabelSet::empty()) + .build(), + LabelSet::empty(), + u64::MAX, + ), + ] + } + + fn gauge_cases() -> Vec<(Metric<Gauge>, LabelSet, f64)> { + // (metric, label set criteria, expected_aggregate_value) + vec![ + // Metric with one sample without label set + ( + MetricBuilder::default().with_sample(1.0.into(), &LabelSet::empty()).build(), + LabelSet::empty(), + 1.0, + ), + // Metric with one sample with a label set + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0, + ), + // Metric with two samples, different label sets, sum all + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .with_sample(2.0.into(), &[("l2", "l2_value")].into()) + .build(), + LabelSet::empty(), + 3.0, + ), + // Metric with two samples, different label sets, sum one + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .with_sample(2.0.into(), &[("l2", "l2_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0, + ), + // Metric with two samples, same label key, different label values, sum by key + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(2.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 3.0, + ), + // Metric with two samples, different label values, sum by subkey + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(2.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("la", "la_value")].into(), + 1.0, + ), + // Edge: Metric with no samples at all + (MetricBuilder::default().build(), LabelSet::empty(), 0.0), + // Edge: Metric with samples but no matching labels + ( + MetricBuilder::default() + .with_sample(5.0.into(), &[("foo", "bar")].into()) + .build(), + [("not", "present")].into(), + 0.0, + ), + // Edge: Metric with zero value + ( + MetricBuilder::default() + .with_sample(0.0.into(), &[("l3", "l3_value")].into()) + .build(), + [("l3", "l3_value")].into(), + 0.0, + ), + // Edge: Metric with negative values + ( + MetricBuilder::default() + .with_sample((-2.0).into(), &[("l4", "l4_value")].into()) + .with_sample(3.0.into(), &[("l5", "l5_value")].into()) + .build(), + LabelSet::empty(), + 1.0, + ), + // Edge: Metric with a very large value + ( + MetricBuilder::default() + .with_sample(f64::MAX.into(), &LabelSet::empty()) + .build(), + LabelSet::empty(), + f64::MAX, + ), + ] + } + + #[test] + fn test_counter_cases() { + for (idx, (metric, criteria, expected_value)) in counter_cases().iter().enumerate() { + let sum = metric.sum(criteria); + + assert_eq!( + sum, *expected_value, + "at case {idx}, expected sum to be {expected_value}, got {sum}" + ); + } + } + + #[test] + fn test_gauge_cases() { + for (idx, (metric, criteria, expected_value)) in gauge_cases().iter().enumerate() { + let sum = metric.sum(criteria); + + assert!( + (sum - expected_value).abs() <= f64::EPSILON, + "at case {idx}, expected sum to be {expected_value}, got {sum}" + ); + } + } +} diff --git a/packages/metrics/src/metric/description.rs b/packages/metrics/src/metric/description.rs new file mode 100644 index 000000000..6a0ca3432 --- /dev/null +++ b/packages/metrics/src/metric/description.rs @@ -0,0 +1,42 @@ +use derive_more::Display; +use serde::{Deserialize, Serialize}; + +use crate::prometheus::PrometheusSerializable; + +#[derive(Debug, Display, Clone, Eq, PartialEq, Default, Deserialize, Serialize, Hash, Ord, PartialOrd)] +pub struct MetricDescription(String); + +impl MetricDescription { + #[must_use] + pub fn new(name: &str) -> Self { + Self(name.to_owned()) + } +} + +impl PrometheusSerializable for MetricDescription { + fn to_prometheus(&self) -> String { + self.0.clone() + } +} +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_should_be_created_from_a_string_reference() { + let metric = MetricDescription::new("Metric description"); + assert_eq!(metric.0, "Metric description"); + } + + #[test] + fn it_serializes_to_prometheus() { + let label_value = MetricDescription::new("name"); + assert_eq!(label_value.to_prometheus(), "name"); + } + + #[test] + fn it_should_be_displayed() { + let metric = MetricDescription::new("Metric description"); + assert_eq!(metric.to_string(), "Metric description"); + } +} diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs new file mode 100644 index 000000000..6bc1a6075 --- /dev/null +++ b/packages/metrics/src/metric/mod.rs @@ -0,0 +1,365 @@ +pub mod aggregate; +pub mod description; +pub mod name; + +use serde::{Deserialize, Serialize}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::counter::Counter; +use super::label::LabelSet; +use super::prometheus::PrometheusSerializable; +use super::sample_collection::SampleCollection; +use crate::gauge::Gauge; +use crate::metric::description::MetricDescription; +use crate::sample::Measurement; +use crate::unit::Unit; + +pub type MetricName = name::MetricName; + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +pub struct Metric<T> { + name: MetricName, + + #[serde(rename = "unit")] + opt_unit: Option<Unit>, + + #[serde(rename = "description")] + opt_description: Option<MetricDescription>, + + #[serde(rename = "samples")] + sample_collection: SampleCollection<T>, +} + +impl<T> Metric<T> { + #[must_use] + pub fn new( + name: MetricName, + opt_unit: Option<Unit>, + opt_description: Option<MetricDescription>, + samples: SampleCollection<T>, + ) -> Self { + Self { + name, + opt_unit, + opt_description, + sample_collection: samples, + } + } + + /// # Panics + /// + /// This function will panic if the empty sample collection cannot be created. + #[must_use] + pub fn new_empty_with_name(name: MetricName) -> Self { + Self { + name, + opt_unit: None, + opt_description: None, + sample_collection: SampleCollection::new(vec![]).expect("Empty sample collection creation should not fail"), + } + } + + #[must_use] + pub fn name(&self) -> &MetricName { + &self.name + } + + #[must_use] + pub fn get_sample_data(&self, label_set: &LabelSet) -> Option<&Measurement<T>> { + self.sample_collection.get(label_set) + } + + #[must_use] + pub fn number_of_samples(&self) -> usize { + self.sample_collection.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.sample_collection.is_empty() + } + + #[must_use] + pub fn collect_matching_samples( + &self, + label_set_criteria: &LabelSet, + ) -> Vec<(&crate::label::LabelSet, &crate::sample::Measurement<T>)> { + self.sample_collection + .iter() + .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) + .collect() + } +} + +impl Metric<Counter> { + pub fn increment(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + self.sample_collection.increment(label_set, time); + } + + pub fn absolute(&mut self, label_set: &LabelSet, value: u64, time: DurationSinceUnixEpoch) { + self.sample_collection.absolute(label_set, value, time); + } +} + +impl Metric<Gauge> { + pub fn set(&mut self, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { + self.sample_collection.set(label_set, value, time); + } + + pub fn increment(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + self.sample_collection.increment(label_set, time); + } + + pub fn decrement(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + self.sample_collection.decrement(label_set, time); + } +} + +enum PrometheusType { + Counter, + Gauge, +} + +impl PrometheusSerializable for PrometheusType { + fn to_prometheus(&self) -> String { + match self { + PrometheusType::Counter => "counter".to_string(), + PrometheusType::Gauge => "gauge".to_string(), + } + } +} + +impl<T: PrometheusSerializable> Metric<T> { + #[must_use] + fn prometheus_help_line(&self) -> String { + if let Some(description) = &self.opt_description { + format!("# HELP {} {}", self.name.to_prometheus(), description.to_prometheus()) + } else { + String::new() + } + } + + #[must_use] + fn prometheus_type_line(&self, prometheus_type: &PrometheusType) -> String { + format!("# TYPE {} {}", self.name.to_prometheus(), prometheus_type.to_prometheus()) + } + + #[must_use] + fn prometheus_sample_line(&self, label_set: &LabelSet, measurement: &Measurement<T>) -> String { + format!( + "{}{} {}", + self.name.to_prometheus(), + label_set.to_prometheus(), + measurement.to_prometheus() + ) + } + + #[must_use] + fn prometheus_samples(&self) -> String { + self.sample_collection + .iter() + .map(|(label_set, measurement)| self.prometheus_sample_line(label_set, measurement)) + .collect::<Vec<_>>() + .join("\n") + } + + fn to_prometheus(&self, prometheus_type: &PrometheusType) -> String { + let help_line = self.prometheus_help_line(); + let type_line = self.prometheus_type_line(prometheus_type); + let samples = self.prometheus_samples(); + + format!("{help_line}\n{type_line}\n{samples}") + } +} + +impl PrometheusSerializable for Metric<Counter> { + fn to_prometheus(&self) -> String { + self.to_prometheus(&PrometheusType::Counter) + } +} + +impl PrometheusSerializable for Metric<Gauge> { + fn to_prometheus(&self) -> String { + self.to_prometheus(&PrometheusType::Gauge) + } +} + +#[cfg(test)] +mod tests { + mod for_generic_metrics { + use super::super::*; + use crate::gauge::Gauge; + use crate::label::LabelValue; + use crate::sample::Sample; + use crate::{label_name, metric_name}; + + #[test] + fn it_should_be_empty_when_it_does_not_have_any_sample() { + let name = metric_name!("test_metric"); + + let samples = SampleCollection::<Gauge>::default(); + + let metric = Metric::<Gauge>::new(name.clone(), None, None, samples); + + assert!(metric.is_empty()); + } + + fn counter_metric_with_one_sample() -> Metric<Counter> { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + let name = metric_name!("test_metric"); + + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); + + let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]).unwrap(); + + Metric::<Counter>::new(name.clone(), None, None, samples) + } + + #[test] + fn it_should_return_the_number_of_samples() { + assert_eq!(counter_metric_with_one_sample().number_of_samples(), 1); + } + + #[test] + fn it_should_return_zero_number_of_samples_for_an_empty_metric() { + let name = metric_name!("test_metric"); + + let samples = SampleCollection::<Gauge>::default(); + + let metric = Metric::<Gauge>::new(name.clone(), None, None, samples); + + assert_eq!(metric.number_of_samples(), 0); + } + } + + mod for_counter_metrics { + use super::super::*; + use crate::counter::Counter; + use crate::label::LabelValue; + use crate::sample::Sample; + use crate::{label_name, metric_name}; + + #[test] + fn it_should_be_created_from_its_name_and_a_collection_of_samples() { + let name = metric_name!("test_metric"); + + let samples = SampleCollection::<Counter>::default(); + + let _metric = Metric::<Counter>::new(name, None, None, samples); + } + + #[test] + fn it_should_allow_incrementing_a_sample() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let name = metric_name!("test_metric"); + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); + let samples = SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::<Counter>::new(name.clone(), None, None, samples); + + metric.increment(&label_set, time); + + assert_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1); + } + + #[test] + fn it_should_allow_setting_to_an_absolute_value() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let name = metric_name!("test_metric"); + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); + let samples = SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::<Counter>::new(name.clone(), None, None, samples); + + metric.absolute(&label_set, 1, time); + + assert_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1); + } + } + + mod for_gauge_metrics { + use approx::assert_relative_eq; + + use super::super::*; + use crate::gauge::Gauge; + use crate::label::LabelValue; + use crate::sample::Sample; + use crate::{label_name, metric_name}; + + #[test] + fn it_should_be_created_from_its_name_and_a_collection_of_samples() { + let name = metric_name!("test_metric"); + + let samples = SampleCollection::<Gauge>::default(); + + let _metric = Metric::<Gauge>::new(name, None, None, samples); + } + + #[test] + fn it_should_allow_incrementing_a_sample() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let name = metric_name!("test_metric"); + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); + let samples = SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::<Gauge>::new(name.clone(), None, None, samples); + + metric.increment(&label_set, time); + + assert_relative_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1.0); + } + + #[test] + fn it_should_allow_decrement_a_sample() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let name = metric_name!("test_metric"); + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); + let samples = SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::<Gauge>::new(name.clone(), None, None, samples); + + metric.decrement(&label_set, time); + + assert_relative_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 0.0); + } + + #[test] + fn it_should_allow_setting_a_sample() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let name = metric_name!("test_metric"); + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); + let samples = SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::<Gauge>::new(name.clone(), None, None, samples); + + metric.set(&label_set, 1.0, time); + + assert_relative_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1.0); + } + } + + mod for_prometheus_serialization { + use super::super::*; + use crate::counter::Counter; + use crate::metric_name; + + #[test] + fn it_should_return_empty_string_for_prometheus_help_line_when_description_is_none() { + let name = metric_name!("test_metric"); + let samples = SampleCollection::<Counter>::default(); + let metric = Metric::<Counter>::new(name, None, None, samples); + + let help_line = metric.prometheus_help_line(); + + assert_eq!(help_line, String::new()); + } + + #[test] + fn it_should_return_formatted_help_line_for_prometheus_when_description_is_some() { + let name = metric_name!("test_metric"); + let description = MetricDescription::new("This is a test metric description"); + let samples = SampleCollection::<Counter>::default(); + let metric = Metric::<Counter>::new(name, None, Some(description), samples); + + let help_line = metric.prometheus_help_line(); + + assert_eq!(help_line, "# HELP test_metric This is a test metric description"); + } + } +} diff --git a/packages/metrics/src/metric/name.rs b/packages/metrics/src/metric/name.rs new file mode 100644 index 000000000..09c8c9e6d --- /dev/null +++ b/packages/metrics/src/metric/name.rs @@ -0,0 +1,97 @@ +use derive_more::Display; +use serde::{Deserialize, Serialize}; + +use crate::prometheus::PrometheusSerializable; + +#[derive(Debug, Display, Clone, Eq, PartialEq, Default, Deserialize, Serialize, Hash, Ord, PartialOrd)] +pub struct MetricName(String); + +impl MetricName { + /// Creates a new `MetricName` instance. + /// + /// # Panics + /// + /// Panics if the provided name is empty. + #[must_use] + pub fn new(name: &str) -> Self { + assert!(!name.is_empty(), "Metric name cannot be empty."); + Self(name.to_owned()) + } +} + +impl PrometheusSerializable for MetricName { + fn to_prometheus(&self) -> String { + // Metric names may contain ASCII letters, digits, underscores, and + // colons. It must match the regex [a-zA-Z_:][a-zA-Z0-9_:]*. + // If the metric name starts with, or contains, an invalid character: + // replace character with underscore. + + self.0 + .chars() + .enumerate() + .map(|(i, c)| { + if i == 0 { + if c.is_ascii_alphabetic() || c == '_' || c == ':' { + c + } else { + '_' + } + } else if c.is_ascii_alphanumeric() || c == '_' || c == ':' { + c + } else { + '_' + } + }) + .collect() + } +} + +#[macro_export] +macro_rules! metric_name { + ("") => { + compile_error!("Metric name cannot be empty"); + }; + ($name:literal) => { + $crate::metric::name::MetricName::new($name) + }; + ($name:ident) => { + $crate::metric::name::MetricName::new($name) + }; +} + +#[cfg(test)] +mod tests { + + mod serialization_of_metric_name_to_prometheus { + + use crate::metric::name::MetricName; + use crate::prometheus::PrometheusSerializable; + + #[test] + fn valid_names_in_prometheus() { + assert_eq!(metric_name!("valid_name").to_prometheus(), "valid_name"); + assert_eq!(metric_name!("_leading_underscore").to_prometheus(), "_leading_underscore"); + assert_eq!(metric_name!(":leading_colon").to_prometheus(), ":leading_colon"); + assert_eq!(metric_name!("v123").to_prometheus(), "v123"); // leading lowercase + assert_eq!(metric_name!("V123").to_prometheus(), "V123"); // leading lowercase + } + + #[test] + fn names_that_need_changes_in_prometheus() { + assert_eq!(metric_name!("9invalid_start").to_prometheus(), "_invalid_start"); + assert_eq!(metric_name!("@test").to_prometheus(), "_test"); + assert_eq!(metric_name!("invalid-char").to_prometheus(), "invalid_char"); + assert_eq!(metric_name!("spaces are bad").to_prometheus(), "spaces_are_bad"); + assert_eq!(metric_name!("a!b@c#d$e%f^g&h*i(j)").to_prometheus(), "a_b_c_d_e_f_g_h_i_j_"); + assert_eq!(metric_name!("my:metric/version").to_prometheus(), "my:metric_version"); + assert_eq!(metric_name!("!@#$%^&*()").to_prometheus(), "__________"); + assert_eq!(metric_name!("ñaca©").to_prometheus(), "_aca_"); + } + + #[test] + #[should_panic(expected = "Metric name cannot be empty.")] + fn empty_name() { + let _name = MetricName::new(""); + } + } +} diff --git a/packages/metrics/src/metric_collection/aggregate/avg.rs b/packages/metrics/src/metric_collection/aggregate/avg.rs new file mode 100644 index 000000000..0aef4e325 --- /dev/null +++ b/packages/metrics/src/metric_collection/aggregate/avg.rs @@ -0,0 +1,212 @@ +use crate::counter::Counter; +use crate::gauge::Gauge; +use crate::label::LabelSet; +use crate::metric::aggregate::avg::Avg as MetricAvgTrait; +use crate::metric::MetricName; +use crate::metric_collection::{MetricCollection, MetricKindCollection}; + +pub trait Avg { + fn avg(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option<f64>; +} + +impl Avg for MetricCollection { + fn avg(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option<f64> { + if let Some(value) = self.counters.avg(metric_name, label_set_criteria) { + return Some(value); + } + + if let Some(value) = self.gauges.avg(metric_name, label_set_criteria) { + return Some(value); + } + + None + } +} + +impl Avg for MetricKindCollection<Counter> { + fn avg(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option<f64> { + self.metrics.get(metric_name).map(|metric| metric.avg(label_set_criteria)) + } +} + +impl Avg for MetricKindCollection<Gauge> { + fn avg(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option<f64> { + self.metrics.get(metric_name).map(|metric| metric.avg(label_set_criteria)) + } +} + +#[cfg(test)] +mod tests { + + mod it_should_allow_averaging_all_metric_samples_containing_some_given_labels { + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::label::LabelValue; + use crate::label_name; + use crate::metric_collection::aggregate::avg::Avg; + + #[test] + fn type_counter_with_two_samples() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_counter"); + + let mut collection = MetricCollection::default(); + + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + // Two samples with value 1 each, average should be 1.0 + assert_eq!(collection.avg(&metric_name, &LabelSet::empty()), Some(1.0)); + assert_eq!( + collection.avg(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(1.0) + ); + } + + #[test] + fn type_counter_with_different_values() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_counter"); + + let mut collection = MetricCollection::default(); + + // First increment: value goes from 0 to 1 + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + // Second increment on the same label: value goes from 1 to 2 + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(2), + ) + .unwrap(); + + // Create another counter with a different value + collection + .set_counter( + &metric_name!("test_counter"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + 4, + DurationSinceUnixEpoch::from_secs(3), + ) + .unwrap(); + + // Average of 2 and 4 should be 3.0 + assert_eq!(collection.avg(&metric_name, &LabelSet::empty()), Some(3.0)); + assert_eq!( + collection.avg(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(2.0) + ); + assert_eq!( + collection.avg(&metric_name, &(label_name!("label_2"), LabelValue::new("value_2")).into()), + Some(4.0) + ); + } + + #[test] + fn type_gauge_with_two_samples() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_gauge"); + + let mut collection = MetricCollection::default(); + + collection + .set_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + 2.0, + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .set_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + 4.0, + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + // Average of 2.0 and 4.0 should be 3.0 + assert_eq!(collection.avg(&metric_name, &LabelSet::empty()), Some(3.0)); + assert_eq!( + collection.avg(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(2.0) + ); + } + + #[test] + fn type_gauge_with_negative_values() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_gauge"); + + let mut collection = MetricCollection::default(); + + collection + .set_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + -2.0, + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .set_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + 6.0, + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + // Average of -2.0 and 6.0 should be 2.0 + assert_eq!(collection.avg(&metric_name, &LabelSet::empty()), Some(2.0)); + } + + #[test] + fn nonexistent_metric() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let collection = MetricCollection::default(); + + assert_eq!(collection.avg(&metric_name!("nonexistent"), &LabelSet::empty()), None); + } + } +} diff --git a/packages/metrics/src/metric_collection/aggregate/mod.rs b/packages/metrics/src/metric_collection/aggregate/mod.rs new file mode 100644 index 000000000..1224a1f52 --- /dev/null +++ b/packages/metrics/src/metric_collection/aggregate/mod.rs @@ -0,0 +1,2 @@ +pub mod avg; +pub mod sum; diff --git a/packages/metrics/src/metric_collection/aggregate/sum.rs b/packages/metrics/src/metric_collection/aggregate/sum.rs new file mode 100644 index 000000000..3285fa8f1 --- /dev/null +++ b/packages/metrics/src/metric_collection/aggregate/sum.rs @@ -0,0 +1,118 @@ +use crate::counter::Counter; +use crate::gauge::Gauge; +use crate::label::LabelSet; +use crate::metric::aggregate::sum::Sum as MetricSumTrait; +use crate::metric::MetricName; +use crate::metric_collection::{MetricCollection, MetricKindCollection}; + +pub trait Sum { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option<f64>; +} + +impl Sum for MetricCollection { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option<f64> { + if let Some(value) = self.counters.sum(metric_name, label_set_criteria) { + return Some(value); + } + + if let Some(value) = self.gauges.sum(metric_name, label_set_criteria) { + return Some(value); + } + + None + } +} + +impl Sum for MetricKindCollection<Counter> { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option<f64> { + #[allow(clippy::cast_precision_loss)] + self.metrics + .get(metric_name) + .map(|metric| metric.sum(label_set_criteria) as f64) + } +} + +impl Sum for MetricKindCollection<Gauge> { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option<f64> { + self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) + } +} + +#[cfg(test)] +mod tests { + + mod it_should_allow_summing_all_metric_samples_containing_some_given_labels { + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::label::LabelValue; + use crate::label_name; + use crate::metric_collection::aggregate::sum::Sum; + + #[test] + fn type_counter_with_two_samples() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_counter"); + + let mut collection = MetricCollection::default(); + + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + assert_eq!(collection.sum(&metric_name, &LabelSet::empty()), Some(2.0)); + assert_eq!( + collection.sum(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(1.0) + ); + } + + #[test] + fn type_gauge_with_two_samples() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_gauge"); + + let mut collection = MetricCollection::default(); + + collection + .increment_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .increment_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + assert_eq!(collection.sum(&metric_name, &LabelSet::empty()), Some(2.0)); + assert_eq!( + collection.sum(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(1.0) + ); + } + } +} diff --git a/packages/metrics/src/metric_collection/mod.rs b/packages/metrics/src/metric_collection/mod.rs new file mode 100644 index 000000000..e183236aa --- /dev/null +++ b/packages/metrics/src/metric_collection/mod.rs @@ -0,0 +1,1196 @@ +pub mod aggregate; + +use std::collections::{HashMap, HashSet}; + +use serde::ser::{SerializeSeq, Serializer}; +use serde::{Deserialize, Deserializer, Serialize}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::counter::Counter; +use super::gauge::Gauge; +use super::label::LabelSet; +use super::metric::{Metric, MetricName}; +use super::prometheus::PrometheusSerializable; +use crate::metric::description::MetricDescription; +use crate::sample_collection::SampleCollection; +use crate::unit::Unit; +use crate::METRICS_TARGET; + +// code-review: serialize in a deterministic order? For example: +// - First the counter metrics ordered by name. +// - Then the gauge metrics ordered by name. + +#[derive(Debug, Clone, Default, PartialEq)] +pub struct MetricCollection { + counters: MetricKindCollection<Counter>, + gauges: MetricKindCollection<Gauge>, +} + +impl MetricCollection { + /// # Errors + /// + /// Returns an error if there are duplicate metric names across counters and + /// gauges. + pub fn new(counters: MetricKindCollection<Counter>, gauges: MetricKindCollection<Gauge>) -> Result<Self, Error> { + // Check for name collisions across metric types + let counter_names: HashSet<_> = counters.names().collect(); + let gauge_names: HashSet<_> = gauges.names().collect(); + + if !counter_names.is_disjoint(&gauge_names) { + return Err(Error::MetricNameCollisionInConstructor { + counter_names: counter_names.iter().map(std::string::ToString::to_string).collect(), + gauge_names: gauge_names.iter().map(std::string::ToString::to_string).collect(), + }); + } + + Ok(Self { counters, gauges }) + } + + /// Merges another `MetricCollection` into this one. + /// + /// # Errors + /// + /// Returns an error if a metric name already exists in the current collection. + pub fn merge(&mut self, other: &Self) -> Result<(), Error> { + self.check_cross_type_collision(other)?; + self.counters.merge(&other.counters)?; + self.gauges.merge(&other.gauges)?; + Ok(()) + } + + /// Returns a set of all metric names in this collection. + fn collect_names(&self) -> HashSet<MetricName> { + self.counters.names().chain(self.gauges.names()).cloned().collect() + } + + /// Checks for name collisions between this collection and another one. + fn check_cross_type_collision(&self, other: &Self) -> Result<(), Error> { + let self_names: HashSet<_> = self.collect_names(); + let other_names: HashSet<_> = other.collect_names(); + + let cross_type_collisions = self_names.intersection(&other_names).next(); + + if let Some(name) = cross_type_collisions { + return Err(Error::MetricNameCollisionInMerge { + metric_name: (*name).clone(), + }); + } + + Ok(()) + } + + // Counter-specific methods + + pub fn describe_counter(&mut self, name: &MetricName, opt_unit: Option<Unit>, opt_description: Option<MetricDescription>) { + tracing::info!(target: METRICS_TARGET, type = "counter", name = name.to_string(), unit = ?opt_unit, description = ?opt_description); + + let metric = Metric::<Counter>::new(name.clone(), opt_unit, opt_description, SampleCollection::default()); + + self.counters.insert(metric); + } + + #[must_use] + pub fn contains_counter(&self, name: &MetricName) -> bool { + self.counters.metrics.contains_key(name) + } + + #[must_use] + pub fn get_counter_value(&self, name: &MetricName, label_set: &LabelSet) -> Option<Counter> { + self.counters.get_value(name, label_set) + } + + /// Increases the counter for the given metric name and labels. + /// + /// # Errors + /// + /// Return an error if a metrics of a different type with the same name + /// already exists. + pub fn increment_counter( + &mut self, + name: &MetricName, + label_set: &LabelSet, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + if self.gauges.metrics.contains_key(name) { + return Err(Error::MetricNameCollisionAdding { + metric_name: name.clone(), + }); + } + + self.counters.increment(name, label_set, time); + + Ok(()) + } + + /// Sets the counter for the given metric name and labels. + /// + /// # Errors + /// + /// Return an error if a metrics of a different type with the same name + /// already exists. + pub fn set_counter( + &mut self, + name: &MetricName, + label_set: &LabelSet, + value: u64, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + if self.gauges.metrics.contains_key(name) { + return Err(Error::MetricNameCollisionAdding { + metric_name: name.clone(), + }); + } + + self.counters.absolute(name, label_set, value, time); + + Ok(()) + } + + // Gauge-specific methods + + pub fn describe_gauge(&mut self, name: &MetricName, opt_unit: Option<Unit>, opt_description: Option<MetricDescription>) { + tracing::info!(target: METRICS_TARGET, type = "gauge", name = name.to_string(), unit = ?opt_unit, description = ?opt_description); + + let metric = Metric::<Gauge>::new(name.clone(), opt_unit, opt_description, SampleCollection::default()); + + self.gauges.insert(metric); + } + + #[must_use] + pub fn contains_gauge(&self, name: &MetricName) -> bool { + self.gauges.metrics.contains_key(name) + } + + #[must_use] + pub fn get_gauge_value(&self, name: &MetricName, label_set: &LabelSet) -> Option<Gauge> { + self.gauges.get_value(name, label_set) + } + + /// # Errors + /// + /// Return an error if a metrics of a different type with the same name + /// already exists. + pub fn set_gauge( + &mut self, + name: &MetricName, + label_set: &LabelSet, + value: f64, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + if self.counters.metrics.contains_key(name) { + return Err(Error::MetricNameCollisionAdding { + metric_name: name.clone(), + }); + } + + self.gauges.set(name, label_set, value, time); + + Ok(()) + } + + /// # Errors + /// + /// Return an error if a metrics of a different type with the same name + /// already exists. + pub fn increment_gauge( + &mut self, + name: &MetricName, + label_set: &LabelSet, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + if self.counters.metrics.contains_key(name) { + return Err(Error::MetricNameCollisionAdding { + metric_name: name.clone(), + }); + } + + self.gauges.increment(name, label_set, time); + + Ok(()) + } + + /// # Errors + /// + /// Return an error if a metrics of a different type with the same name + /// already exists. + pub fn decrement_gauge( + &mut self, + name: &MetricName, + label_set: &LabelSet, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + if self.counters.metrics.contains_key(name) { + return Err(Error::MetricNameCollisionAdding { + metric_name: name.clone(), + }); + } + + self.gauges.decrement(name, label_set, time); + + Ok(()) + } +} + +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + #[error("Metric names must be unique across all metrics types.")] + MetricNameCollisionInConstructor { + counter_names: Vec<String>, + gauge_names: Vec<String>, + }, + + #[error("Found duplicate metric name in list. Metric names must be unique across all metrics types.")] + DuplicateMetricNameInList { metric_name: MetricName }, + + #[error("Cannot merge metric '{metric_name}': it already exists in the current collection")] + MetricNameCollisionInMerge { metric_name: MetricName }, + + #[error("Cannot create metric with name '{metric_name}': another metric with this name already exists")] + MetricNameCollisionAdding { metric_name: MetricName }, +} + +/// Implements serialization for `MetricCollection`. +impl Serialize for MetricCollection { + fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> + where + S: Serializer, + { + #[derive(Serialize)] + #[serde(tag = "type", rename_all = "lowercase")] + enum SerializableMetric<'a> { + Counter(&'a Metric<Counter>), + Gauge(&'a Metric<Gauge>), + } + + let mut seq = serializer.serialize_seq(Some(self.counters.metrics.len() + self.gauges.metrics.len()))?; + + for metric in self.counters.metrics.values() { + seq.serialize_element(&SerializableMetric::Counter(metric))?; + } + + for metric in self.gauges.metrics.values() { + seq.serialize_element(&SerializableMetric::Gauge(metric))?; + } + + seq.end() + } +} + +impl<'de> Deserialize<'de> for MetricCollection { + fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(tag = "type", rename_all = "lowercase")] + enum MetricPayload { + Counter(Metric<Counter>), + Gauge(Metric<Gauge>), + } + + let payload = Vec::<MetricPayload>::deserialize(deserializer)?; + + let mut counters = Vec::new(); + let mut gauges = Vec::new(); + + for metric in payload { + match metric { + MetricPayload::Counter(counter) => counters.push(counter), + MetricPayload::Gauge(gauge) => gauges.push(gauge), + } + } + + let counters = MetricKindCollection::new(counters).map_err(serde::de::Error::custom)?; + let gauges = MetricKindCollection::new(gauges).map_err(serde::de::Error::custom)?; + + let metric_collection = MetricCollection::new(counters, gauges).map_err(serde::de::Error::custom)?; + + Ok(metric_collection) + } +} + +impl PrometheusSerializable for MetricCollection { + fn to_prometheus(&self) -> String { + self.counters + .metrics + .values() + .filter(|metric| !metric.is_empty()) + .map(Metric::<Counter>::to_prometheus) + .chain( + self.gauges + .metrics + .values() + .filter(|metric| !metric.is_empty()) + .map(Metric::<Gauge>::to_prometheus), + ) + .collect::<Vec<String>>() + .join("\n\n") + } +} + +#[derive(Debug, Clone, Default, PartialEq)] +pub struct MetricKindCollection<T> { + metrics: HashMap<MetricName, Metric<T>>, +} + +impl<T> MetricKindCollection<T> { + /// Creates a new `MetricKindCollection` from a vector of metrics + /// + /// # Errors + /// + /// Returns an error if duplicate metric names are passed. + pub fn new(metrics: Vec<Metric<T>>) -> Result<Self, Error> { + let mut map = HashMap::with_capacity(metrics.len()); + + for metric in metrics { + let metric_name = metric.name().clone(); + + if let Some(_old_metric) = map.insert(metric.name().clone(), metric) { + return Err(Error::DuplicateMetricNameInList { metric_name }); + } + } + + Ok(Self { metrics: map }) + } + + /// Returns an iterator over all metric names in this collection. + pub fn names(&self) -> impl Iterator<Item = &MetricName> { + self.metrics.keys() + } + + pub fn insert_if_absent(&mut self, metric: Metric<T>) { + if !self.metrics.contains_key(metric.name()) { + self.insert(metric); + } + } + + pub fn insert(&mut self, metric: Metric<T>) { + self.metrics.insert(metric.name().clone(), metric); + } +} + +impl<T: Clone> MetricKindCollection<T> { + /// Merges another `MetricKindCollection` into this one. + /// + /// # Errors + /// + /// Returns an error if a metric name already exists in the current collection. + pub fn merge(&mut self, other: &Self) -> Result<(), Error> { + self.check_for_name_collision(other)?; + + for (metric_name, metric) in &other.metrics { + self.metrics.insert(metric_name.clone(), metric.clone()); + } + + Ok(()) + } + + fn check_for_name_collision(&self, other: &Self) -> Result<(), Error> { + for metric_name in other.metrics.keys() { + if self.metrics.contains_key(metric_name) { + return Err(Error::MetricNameCollisionInMerge { + metric_name: metric_name.clone(), + }); + } + } + + Ok(()) + } +} + +impl MetricKindCollection<Counter> { + /// Increments the counter for the given metric name and labels. + /// + /// If the metric name does not exist, it will be created. + /// + /// # Panics + /// + /// Panics if the metric does not exist. + pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + let metric = Metric::<Counter>::new_empty_with_name(name.clone()); + + self.insert_if_absent(metric); + + let metric = self.metrics.get_mut(name).expect("Counter metric should exist"); + + metric.increment(label_set, time); + } + + /// Sets the counter to an absolute value for the given metric name and labels. + /// + /// If the metric name does not exist, it will be created. + /// + /// # Panics + /// + /// Panics if the metric does not exist. + pub fn absolute(&mut self, name: &MetricName, label_set: &LabelSet, value: u64, time: DurationSinceUnixEpoch) { + let metric = Metric::<Counter>::new_empty_with_name(name.clone()); + + self.insert_if_absent(metric); + + let metric = self.metrics.get_mut(name).expect("Counter metric should exist"); + + metric.absolute(label_set, value, time); + } + + #[must_use] + pub fn get_value(&self, name: &MetricName, label_set: &LabelSet) -> Option<Counter> { + self.metrics + .get(name) + .and_then(|metric| metric.get_sample_data(label_set)) + .map(|sample| sample.value().clone()) + } +} + +impl MetricKindCollection<Gauge> { + /// Sets the gauge for the given metric name and labels. + /// + /// If the metric name does not exist, it will be created. + /// + /// # Panics + /// + /// Panics if the metric does not exist and it could not be created. + pub fn set(&mut self, name: &MetricName, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { + let metric = Metric::<Gauge>::new_empty_with_name(name.clone()); + + self.insert_if_absent(metric); + + let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); + + metric.set(label_set, value, time); + } + + /// Increments the gauge for the given metric name and labels. + /// + /// If the metric name does not exist, it will be created. + /// + /// # Panics + /// + /// Panics if the metric does not exist and it could not be created. + pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + let metric = Metric::<Gauge>::new_empty_with_name(name.clone()); + + self.insert_if_absent(metric); + + let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); + + metric.increment(label_set, time); + } + + /// Decrements the gauge for the given metric name and labels. + /// + /// If the metric name does not exist, it will be created. + /// + /// # Panics + /// + /// Panics if the metric does not exist and it could not be created. + pub fn decrement(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + let metric = Metric::<Gauge>::new_empty_with_name(name.clone()); + + self.insert_if_absent(metric); + + let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); + + metric.decrement(label_set, time); + } + + #[must_use] + pub fn get_value(&self, name: &MetricName, label_set: &LabelSet) -> Option<Gauge> { + self.metrics + .get(name) + .and_then(|metric| metric.get_sample_data(label_set)) + .map(|sample| sample.value().clone()) + } +} + +#[cfg(test)] +mod tests { + + use pretty_assertions::assert_eq; + + use super::*; + use crate::label::LabelValue; + use crate::sample::Sample; + use crate::sample_collection::SampleCollection; + use crate::tests::{format_prometheus_output, sort_lines}; + use crate::{label_name, metric_name}; + + /// Fixture for testing serialization and deserialization of `MetricCollection`. + /// + /// It contains a default `MetricCollection` object, its JSON representation, + /// and its Prometheus format representation. + struct MetricCollectionFixture { + pub object: MetricCollection, + pub json: String, + pub prometheus: String, + } + + impl Default for MetricCollectionFixture { + fn default() -> Self { + Self { + object: Self::object(), + json: Self::json(), + prometheus: Self::prometheus(), + } + } + } + + impl MetricCollectionFixture { + fn deconstruct(&self) -> (MetricCollection, String, String) { + (self.object.clone(), self.json.clone(), self.prometheus.clone()) + } + + fn object() -> MetricCollection { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + let label_set_1: LabelSet = [ + (label_name!("server_binding_protocol"), LabelValue::new("http")), + (label_name!("server_binding_ip"), LabelValue::new("0.0.0.0")), + (label_name!("server_binding_port"), LabelValue::new("7070")), + ] + .into(); + + MetricCollection::new( + MetricKindCollection::new(vec![Metric::new( + metric_name!("http_tracker_core_announce_requests_received_total"), + None, + Some(MetricDescription::new("The number of announce requests received.")), + SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set_1.clone())]).unwrap(), + )]) + .unwrap(), + MetricKindCollection::new(vec![Metric::new( + metric_name!("udp_tracker_server_performance_avg_announce_processing_time_ns"), + None, + Some(MetricDescription::new("The average announce processing time in nanoseconds.")), + SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set_1.clone())]).unwrap(), + )]) + .unwrap(), + ) + .unwrap() + } + + fn json() -> String { + r#" + [ + { + "type":"counter", + "name":"http_tracker_core_announce_requests_received_total", + "unit": null, + "description": "The number of announce requests received.", + "samples":[ + { + "value":1, + "recorded_at":"2025-04-02T00:00:00+00:00", + "labels":[ + { + "name":"server_binding_ip", + "value":"0.0.0.0" + }, + { + "name":"server_binding_port", + "value":"7070" + }, + { + "name":"server_binding_protocol", + "value":"http" + } + ] + } + ] + }, + { + "type":"gauge", + "name":"udp_tracker_server_performance_avg_announce_processing_time_ns", + "unit": null, + "description": "The average announce processing time in nanoseconds.", + "samples":[ + { + "value":1.0, + "recorded_at":"2025-04-02T00:00:00+00:00", + "labels":[ + { + "name":"server_binding_ip", + "value":"0.0.0.0" + }, + { + "name":"server_binding_port", + "value":"7070" + }, + { + "name":"server_binding_protocol", + "value":"http" + } + ] + } + ] + } + ] + "# + .to_owned() + } + + fn prometheus() -> String { + format_prometheus_output( + r#"# HELP http_tracker_core_announce_requests_received_total The number of announce requests received. +# TYPE http_tracker_core_announce_requests_received_total counter +http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 + +# HELP udp_tracker_server_performance_avg_announce_processing_time_ns The average announce processing time in nanoseconds. +# TYPE udp_tracker_server_performance_avg_announce_processing_time_ns gauge +udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 +"#, + ) + } + } + + #[test] + fn it_should_not_allow_duplicate_names_across_types() { + let counters = MetricKindCollection::new(vec![Metric::new( + metric_name!("test_metric"), + None, + None, + SampleCollection::default(), + )]) + .unwrap(); + let gauges = MetricKindCollection::new(vec![Metric::new( + metric_name!("test_metric"), + None, + None, + SampleCollection::default(), + )]) + .unwrap(); + + assert!(MetricCollection::new(counters, gauges).is_err()); + } + + #[test] + fn it_should_not_allow_creating_a_gauge_with_the_same_name_as_a_counter() { + let mut collection = MetricCollection::default(); + let label_set = LabelSet::default(); + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + // First create a counter + collection + .increment_counter(&metric_name!("test_metric"), &label_set, time) + .unwrap(); + + // Then try to create a gauge with the same name + let result = collection.set_gauge(&metric_name!("test_metric"), &label_set, 1.0, time); + + assert!(result.is_err()); + } + + #[test] + fn it_should_not_allow_creating_a_counter_with_the_same_name_as_a_gauge() { + let mut collection = MetricCollection::default(); + let label_set = LabelSet::default(); + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + // First set the gauge + collection + .set_gauge(&metric_name!("test_metric"), &label_set, 1.0, time) + .unwrap(); + + // Then try to create a counter with the same name + let result = collection.increment_counter(&metric_name!("test_metric"), &label_set, time); + + assert!(result.is_err()); + } + + #[test] + fn it_should_allow_serializing_to_json() { + // todo: this test does work with metric with multiple samples because + // samples are not serialized in the same order as they are created. + let (metric_collection, expected_json, _expected_prometheus) = MetricCollectionFixture::default().deconstruct(); + + let json = serde_json::to_string_pretty(&metric_collection).unwrap(); + + assert_eq!( + serde_json::from_str::<serde_json::Value>(&json).unwrap(), + serde_json::from_str::<serde_json::Value>(&expected_json).unwrap() + ); + } + + #[test] + fn it_should_allow_deserializing_from_json() { + let (expected_metric_collection, metric_collection_json, _expected_prometheus) = + MetricCollectionFixture::default().deconstruct(); + + let metric_collection: MetricCollection = serde_json::from_str(&metric_collection_json).unwrap(); + + assert_eq!(metric_collection, expected_metric_collection); + } + + #[test] + fn it_should_allow_serializing_to_prometheus_format() { + let (metric_collection, _expected_json, expected_prometheus) = MetricCollectionFixture::default().deconstruct(); + + let prometheus_output = metric_collection.to_prometheus(); + + assert_eq!(prometheus_output, expected_prometheus); + } + + #[test] + fn it_should_allow_serializing_to_prometheus_format_with_multiple_samples_per_metric() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + let label_set_1: LabelSet = [ + (label_name!("server_binding_protocol"), LabelValue::new("http")), + (label_name!("server_binding_ip"), LabelValue::new("0.0.0.0")), + (label_name!("server_binding_port"), LabelValue::new("7070")), + ] + .into(); + + let label_set_2: LabelSet = [ + (label_name!("server_binding_protocol"), LabelValue::new("http")), + (label_name!("server_binding_ip"), LabelValue::new("0.0.0.0")), + (label_name!("server_binding_port"), LabelValue::new("7171")), + ] + .into(); + + let metric_collection = MetricCollection::new( + MetricKindCollection::new(vec![Metric::new( + metric_name!("http_tracker_core_announce_requests_received_total"), + None, + Some(MetricDescription::new("The number of announce requests received.")), + SampleCollection::new(vec![ + Sample::new(Counter::new(1), time, label_set_1.clone()), + Sample::new(Counter::new(2), time, label_set_2.clone()), + ]) + .unwrap(), + )]) + .unwrap(), + MetricKindCollection::default(), + ) + .unwrap(); + + let prometheus_output = metric_collection.to_prometheus(); + + let expected_prometheus_output = format_prometheus_output( + r#"# HELP http_tracker_core_announce_requests_received_total The number of announce requests received. +# TYPE http_tracker_core_announce_requests_received_total counter +http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 +http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7171",server_binding_protocol="http"} 2 +"#, + ); + + // code-review: samples are not serialized in the same order as they are created. + // Should we use a deterministic order? + + assert_eq!(sort_lines(&prometheus_output), sort_lines(&expected_prometheus_output)); + } + + #[test] + fn it_should_exclude_metrics_without_samples_from_prometheus_format() { + let mut counters = MetricKindCollection::default(); + let mut gauges = MetricKindCollection::default(); + + let counter = Metric::<Counter>::new_empty_with_name(metric_name!("test_counter")); + counters.insert_if_absent(counter); + + let gauge = Metric::<Gauge>::new_empty_with_name(metric_name!("test_gauge")); + gauges.insert_if_absent(gauge); + + let metric_collection = MetricCollection::new(counters, gauges).unwrap(); + + let prometheus_output = metric_collection.to_prometheus(); + + assert_eq!(prometheus_output, ""); + } + + #[test] + fn it_should_allow_merging_metric_collections() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection1 = MetricCollection::default(); + collection1 + .increment_counter(&metric_name!("test_counter"), &label_set, time) + .unwrap(); + + let mut collection2 = MetricCollection::default(); + collection2 + .set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time) + .unwrap(); + + collection1.merge(&collection2).unwrap(); + + assert!(collection1.contains_counter(&metric_name!("test_counter"))); + assert!(collection1.contains_gauge(&metric_name!("test_gauge"))); + } + + #[test] + fn it_should_not_allow_merging_metric_collections_with_name_collisions_for_the_same_metric_types() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection1 = MetricCollection::default(); + collection1 + .increment_counter(&metric_name!("test_metric"), &label_set, time) + .unwrap(); + + let mut collection2 = MetricCollection::default(); + collection2 + .increment_counter(&metric_name!("test_metric"), &label_set, time) + .unwrap(); + let result = collection1.merge(&collection2); + + assert!(result.is_err()); + } + + #[test] + fn it_should_not_allow_merging_metric_collections_with_name_collisions_for_different_metric_types() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection1 = MetricCollection::default(); + collection1 + .increment_counter(&metric_name!("test_metric"), &label_set, time) + .unwrap(); + + let mut collection2 = MetricCollection::default(); + collection2 + .set_gauge(&metric_name!("test_metric"), &label_set, 1.0, time) + .unwrap(); + + let result = collection1.merge(&collection2); + + assert!(result.is_err()); + } + + fn collection_with_one_counter(metric_name: &MetricName, label_set: &LabelSet, counter: Counter) -> MetricCollection { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + MetricCollection::new( + MetricKindCollection::new(vec![Metric::new( + metric_name.clone(), + None, + None, + SampleCollection::new(vec![Sample::new(counter, time, label_set.clone())]).unwrap(), + )]) + .unwrap(), + MetricKindCollection::default(), + ) + .unwrap() + } + + fn collection_with_one_gauge(metric_name: &MetricName, label_set: &LabelSet, gauge: Gauge) -> MetricCollection { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + MetricCollection::new( + MetricKindCollection::default(), + MetricKindCollection::new(vec![Metric::new( + metric_name.clone(), + None, + None, + SampleCollection::new(vec![Sample::new(gauge, time, label_set.clone())]).unwrap(), + )]) + .unwrap(), + ) + .unwrap() + } + + mod for_counters { + + use pretty_assertions::assert_eq; + + use super::*; + use crate::label::LabelValue; + use crate::sample::Sample; + use crate::sample_collection::SampleCollection; + + #[test] + fn it_should_allow_setting_to_an_absolute_value() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_counter"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_counter(&metric_name, &label_set, Counter::new(0)); + + collection + .set_counter(&metric_name!("test_counter"), &label_set, 1, time) + .unwrap(); + + assert_eq!( + collection.get_counter_value(&metric_name!("test_counter"), &label_set), + Some(Counter::new(1)) + ); + } + + #[test] + fn it_should_fail_setting_to_an_absolute_value_if_a_gauge_with_the_same_name_exists() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_counter"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_gauge(&metric_name, &label_set, Gauge::new(0.0)); + + let result = collection.set_counter(&metric_name!("test_counter"), &label_set, 1, time); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionAdding { metric_name }) if metric_name == metric_name!("test_counter")) + ); + } + + #[test] + fn it_should_increase_a_preexistent_counter() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_counter"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_counter(&metric_name, &label_set, Counter::new(0)); + + collection + .increment_counter(&metric_name!("test_counter"), &label_set, time) + .unwrap(); + + assert_eq!( + collection.get_counter_value(&metric_name!("test_counter"), &label_set), + Some(Counter::new(1)) + ); + } + + #[test] + fn it_should_automatically_create_a_counter_when_increasing_if_it_does_not_exist() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut metric_collection = + MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); + + metric_collection + .increment_counter(&metric_name!("test_counter"), &label_set, time) + .unwrap(); + metric_collection + .increment_counter(&metric_name!("test_counter"), &label_set, time) + .unwrap(); + + assert_eq!( + metric_collection.get_counter_value(&metric_name!("test_counter"), &label_set), + Some(Counter::new(2)) + ); + } + + #[test] + fn it_should_allow_describing_a_counter_before_using_it() { + let mut metric_collection = + MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); + + metric_collection.describe_counter(&metric_name!("test_counter"), None, None); + + assert!(metric_collection.contains_counter(&metric_name!("test_counter"))); + } + + #[test] + fn it_should_not_allow_duplicate_metric_names_when_instantiating() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let result = MetricKindCollection::new(vec![ + Metric::new( + metric_name!("test_counter"), + None, + None, + SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(), + ), + Metric::new( + metric_name!("test_counter"), + None, + None, + SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(), + ), + ]); + + assert!(result.is_err()); + } + } + + mod for_gauges { + + use pretty_assertions::assert_eq; + + use super::*; + use crate::label::LabelValue; + use crate::sample::Sample; + use crate::sample_collection::SampleCollection; + + #[test] + fn it_should_set_a_preexistent_gauge() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_gauge(&metric_name, &label_set, Gauge::new(0.0)); + + collection + .set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time) + .unwrap(); + + assert_eq!( + collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), + Some(Gauge::new(1.0)) + ); + } + + #[test] + fn it_should_allow_incrementing_a_gauge() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_gauge(&metric_name, &label_set, Gauge::new(0.0)); + + collection + .increment_gauge(&metric_name!("test_gauge"), &label_set, time) + .unwrap(); + + assert_eq!( + collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), + Some(Gauge::new(1.0)) + ); + } + + #[test] + fn it_should_fail_incrementing_a_gauge_if_it_exists_a_counter_with_the_same_name() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_counter(&metric_name, &label_set, Counter::new(0)); + + let result = collection.increment_gauge(&metric_name!("test_gauge"), &label_set, time); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionAdding { metric_name }) if metric_name == metric_name!("test_gauge")) + ); + } + + #[test] + fn it_should_allow_decrementing_a_gauge() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_gauge(&metric_name, &label_set, Gauge::new(1.0)); + + collection + .decrement_gauge(&metric_name!("test_gauge"), &label_set, time) + .unwrap(); + + assert_eq!( + collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), + Some(Gauge::new(0.0)) + ); + } + + #[test] + fn it_should_fail_decrementing_a_gauge_if_it_exists_a_counter_with_the_same_name() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_counter(&metric_name, &label_set, Counter::new(0)); + + let result = collection.decrement_gauge(&metric_name!("test_gauge"), &label_set, time); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionAdding { metric_name }) if metric_name == metric_name!("test_gauge")) + ); + } + + #[test] + fn it_should_automatically_create_a_gauge_when_setting_if_it_does_not_exist() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut metric_collection = + MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); + + metric_collection + .set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time) + .unwrap(); + + assert_eq!( + metric_collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), + Some(Gauge::new(1.0)) + ); + } + + #[test] + fn it_should_allow_describing_a_gauge_before_using_it() { + let mut metric_collection = + MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); + + metric_collection.describe_gauge(&metric_name!("test_gauge"), None, None); + + assert!(metric_collection.contains_gauge(&metric_name!("test_gauge"))); + } + + #[test] + fn it_should_not_allow_duplicate_metric_names_when_instantiating() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let result = MetricKindCollection::new(vec![ + Metric::new( + metric_name!("test_gauge"), + None, + None, + SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(), + ), + Metric::new( + metric_name!("test_gauge"), + None, + None, + SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(), + ), + ]); + + assert!(result.is_err()); + } + } + + mod metric_kind_collection { + + use crate::counter::Counter; + use crate::gauge::Gauge; + use crate::metric::Metric; + use crate::metric_collection::{Error, MetricKindCollection}; + use crate::metric_name; + + #[test] + fn it_should_not_allow_merging_counter_metric_collections_with_name_collisions() { + let mut collection1 = MetricKindCollection::<Counter>::default(); + collection1.insert(Metric::<Counter>::new_empty_with_name(metric_name!("test_metric"))); + + let mut collection2 = MetricKindCollection::<Counter>::default(); + collection2.insert(Metric::<Counter>::new_empty_with_name(metric_name!("test_metric"))); + + let result = collection1.merge(&collection2); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionInMerge { metric_name }) if metric_name == metric_name!("test_metric")) + ); + } + + #[test] + fn it_should_not_allow_merging_gauge_metric_collections_with_name_collisions() { + let mut collection1 = MetricKindCollection::<Gauge>::default(); + collection1.insert(Metric::<Gauge>::new_empty_with_name(metric_name!("test_metric"))); + + let mut collection2 = MetricKindCollection::<Gauge>::default(); + collection2.insert(Metric::<Gauge>::new_empty_with_name(metric_name!("test_metric"))); + + let result = collection1.merge(&collection2); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionInMerge { metric_name }) if metric_name == metric_name!("test_metric")) + ); + } + } +} diff --git a/packages/metrics/src/prometheus.rs b/packages/metrics/src/prometheus.rs new file mode 100644 index 000000000..bf058e442 --- /dev/null +++ b/packages/metrics/src/prometheus.rs @@ -0,0 +1,15 @@ +pub trait PrometheusSerializable { + /// Convert the implementing type into a Prometheus exposition format string. + /// + /// # Returns + /// + /// A `String` containing the serialized representation. + fn to_prometheus(&self) -> String; +} + +// Blanket implementation for references +impl<T: PrometheusSerializable> PrometheusSerializable for &T { + fn to_prometheus(&self) -> String { + (*self).to_prometheus() + } +} diff --git a/packages/metrics/src/sample.rs b/packages/metrics/src/sample.rs new file mode 100644 index 000000000..63f46b9b8 --- /dev/null +++ b/packages/metrics/src/sample.rs @@ -0,0 +1,469 @@ +use chrono::{DateTime, Utc}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::counter::Counter; +use super::gauge::Gauge; +use super::label::LabelSet; +use super::prometheus::PrometheusSerializable; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Sample<T> { + #[serde(flatten)] + measurement: Measurement<T>, + + #[serde(rename = "labels")] + label_set: LabelSet, +} + +impl<T> Sample<T> { + #[must_use] + pub fn new(value: T, recorded_at: DurationSinceUnixEpoch, label_set: LabelSet) -> Self { + let data = Measurement { value, recorded_at }; + + Self { + measurement: data, + label_set, + } + } + + #[must_use] + pub fn measurement(&self) -> &Measurement<T> { + &self.measurement + } + + #[must_use] + pub fn value(&self) -> &T { + &self.measurement.value + } + + #[must_use] + pub fn recorded_at(&self) -> DurationSinceUnixEpoch { + self.measurement.recorded_at + } + + #[must_use] + pub fn labels(&self) -> &LabelSet { + &self.label_set + } +} + +impl<T: PrometheusSerializable> PrometheusSerializable for Sample<T> { + fn to_prometheus(&self) -> String { + if self.label_set.is_empty() { + format!(" {}", self.measurement.to_prometheus()) + } else { + format!("{} {}", self.label_set.to_prometheus(), self.measurement.to_prometheus()) + } + } +} + +impl Sample<Counter> { + pub fn increment(&mut self, time: DurationSinceUnixEpoch) { + self.measurement.increment(time); + } +} + +impl Sample<Gauge> { + pub fn set(&mut self, value: f64, time: DurationSinceUnixEpoch) { + self.measurement.set(value, time); + } + + pub fn increment(&mut self, time: DurationSinceUnixEpoch) { + self.measurement.increment(time); + } + + pub fn decrement(&mut self, time: DurationSinceUnixEpoch) { + self.measurement.decrement(time); + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Measurement<T> { + /// The value of the sample. + value: T, + + /// The time when the sample was last updated. + #[serde(serialize_with = "serialize_duration", deserialize_with = "deserialize_duration")] + recorded_at: DurationSinceUnixEpoch, +} + +impl<T> Measurement<T> { + #[must_use] + pub fn new(value: T, recorded_at: DurationSinceUnixEpoch) -> Self { + Self { value, recorded_at } + } + + #[must_use] + pub fn value(&self) -> &T { + &self.value + } + + #[must_use] + pub fn recorded_at(&self) -> DurationSinceUnixEpoch { + self.recorded_at + } + + fn set_recorded_at(&mut self, time: DurationSinceUnixEpoch) { + self.recorded_at = time; + } +} + +impl<T> From<Sample<T>> for (LabelSet, Measurement<T>) { + fn from(sample: Sample<T>) -> Self { + (sample.label_set, sample.measurement) + } +} + +impl<T: PrometheusSerializable> PrometheusSerializable for Measurement<T> { + fn to_prometheus(&self) -> String { + self.value.to_prometheus() + } +} + +impl Measurement<Counter> { + pub fn increment(&mut self, time: DurationSinceUnixEpoch) { + self.value.increment(1); + self.set_recorded_at(time); + } + + pub fn absolute(&mut self, value: u64, time: DurationSinceUnixEpoch) { + self.value.absolute(value); + self.set_recorded_at(time); + } +} + +impl Measurement<Gauge> { + pub fn set(&mut self, value: f64, time: DurationSinceUnixEpoch) { + self.value.set(value); + self.set_recorded_at(time); + } + + pub fn increment(&mut self, time: DurationSinceUnixEpoch) { + self.value.increment(1.0); + self.set_recorded_at(time); + } + + pub fn decrement(&mut self, time: DurationSinceUnixEpoch) { + self.value.decrement(1.0); + self.set_recorded_at(time); + } +} + +/// Serializes the `recorded_at` field as a string in ISO 8601 format (RFC 3339). +/// +/// # Errors +/// +/// Returns an error if: +/// - The conversion from `u64` to `i64` fails. +/// - The timestamp is invalid. +fn serialize_duration<S>(duration: &DurationSinceUnixEpoch, serializer: S) -> Result<S::Ok, S::Error> +where + S: Serializer, +{ + let secs = i64::try_from(duration.as_secs()).map_err(|_| serde::ser::Error::custom("Timestamp too large"))?; + let nanos = duration.subsec_nanos(); + + let datetime = DateTime::from_timestamp(secs, nanos).ok_or_else(|| serde::ser::Error::custom("Invalid timestamp"))?; + + serializer.serialize_str(&datetime.to_rfc3339()) // Serializes as ISO 8601 (RFC 3339) +} + +fn deserialize_duration<'de, D>(deserializer: D) -> Result<DurationSinceUnixEpoch, D::Error> +where + D: Deserializer<'de>, +{ + // Deserialize theISO 8601 (RFC 3339) formatted string + let datetime_str = String::deserialize(deserializer)?; + + let datetime = + DateTime::parse_from_rfc3339(&datetime_str).map_err(|e| de::Error::custom(format!("Invalid datetime format: {e}")))?; + + let datetime_utc = datetime.with_timezone(&Utc); + + let secs = u64::try_from(datetime_utc.timestamp()).map_err(|_| de::Error::custom("Timestamp out of range"))?; + + Ok(DurationSinceUnixEpoch::new(secs, datetime_utc.timestamp_subsec_nanos())) +} + +#[cfg(test)] +mod tests { + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use super::*; + + // Helper function to create a sample update time. + fn updated_at_time() -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::from_secs(1_743_552_000) + } + + #[test] + fn it_should_have_a_value() { + let sample = Sample::new( + 42, + DurationSinceUnixEpoch::from_secs(1_743_552_000), + LabelSet::from(vec![("test", "label")]), + ); + + assert_eq!(sample.value(), &42); + } + + #[test] + fn it_should_record_the_latest_update_time() { + let sample = Sample::new( + 42, + DurationSinceUnixEpoch::from_secs(1_743_552_000), + LabelSet::from(vec![("test", "label")]), + ); + + assert_eq!(sample.recorded_at(), updated_at_time()); + } + + #[test] + fn it_should_include_a_label_set() { + let sample = Sample::new( + 42, + DurationSinceUnixEpoch::from_secs(1_743_552_000), + LabelSet::from(vec![("test", "label")]), + ); + + assert_eq!(sample.labels(), &LabelSet::from(vec![("test", "label")])); + } + + mod for_counter_type_sample { + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::label::LabelSet; + use crate::prometheus::PrometheusSerializable; + use crate::sample::tests::updated_at_time; + use crate::sample::{Counter, Sample}; + + #[test] + fn it_should_allow_a_counter_type_value() { + let sample = Sample::new( + Counter::new(42), + DurationSinceUnixEpoch::from_secs(1_743_552_000), + LabelSet::from(vec![("label_name", "label vale")]), + ); + + assert_eq!(sample.value(), &Counter::new(42)); + } + + #[test] + fn it_should_allow_incrementing_the_counter() { + let mut sample = Sample::new(Counter::default(), DurationSinceUnixEpoch::default(), LabelSet::default()); + + sample.increment(updated_at_time()); + + assert_eq!(sample.value(), &Counter::new(1)); + } + + #[test] + fn it_should_record_the_latest_update_time_when_the_counter_is_incremented() { + let mut sample = Sample::new(Counter::default(), DurationSinceUnixEpoch::default(), LabelSet::default()); + + let time = updated_at_time(); + + sample.increment(time); + + assert_eq!(sample.recorded_at(), time); + } + + #[test] + fn it_should_allow_exporting_to_prometheus_format() { + let counter = Counter::new(42); + + let labels = LabelSet::from(vec![("label_name", "label_value"), ("method", "GET")]); + + let sample = Sample::new(counter, DurationSinceUnixEpoch::default(), labels); + + assert_eq!(sample.to_prometheus(), r#"{label_name="label_value",method="GET"} 42"#); + } + + #[test] + fn it_should_allow_exporting_to_prometheus_format_with_empty_label_set() { + let counter = Counter::new(42); + + let sample = Sample::new(counter, DurationSinceUnixEpoch::default(), LabelSet::default()); + + assert_eq!(sample.to_prometheus(), " 42"); + } + } + mod for_gauge_type_sample { + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::label::LabelSet; + use crate::prometheus::PrometheusSerializable; + use crate::sample::tests::updated_at_time; + use crate::sample::{Gauge, Sample}; + + #[test] + fn it_should_allow_a_counter_type_value() { + let sample = Sample::new( + Gauge::new(42.0), + DurationSinceUnixEpoch::from_secs(1_743_552_000), + LabelSet::from(vec![("label_name", "label vale")]), + ); + + assert_eq!(sample.value(), &Gauge::new(42.0)); + } + + #[test] + fn it_should_allow_setting_a_value() { + let mut sample = Sample::new(Gauge::default(), DurationSinceUnixEpoch::default(), LabelSet::default()); + + sample.set(1.0, updated_at_time()); + + assert_eq!(sample.value(), &Gauge::new(1.0)); + } + + #[test] + fn it_should_allow_incrementing_the_value() { + let mut sample = Sample::new(Gauge::new(0.0), DurationSinceUnixEpoch::default(), LabelSet::default()); + + sample.increment(updated_at_time()); + + assert_eq!(sample.value(), &Gauge::new(1.0)); + } + + #[test] + fn it_should_allow_decrementing_the_value() { + let mut sample = Sample::new(Gauge::new(1.0), DurationSinceUnixEpoch::default(), LabelSet::default()); + + sample.decrement(updated_at_time()); + + assert_eq!(sample.value(), &Gauge::new(0.0)); + } + + #[test] + fn it_should_record_the_latest_update_time_when_the_counter_is_incremented() { + let mut sample = Sample::new(Gauge::default(), DurationSinceUnixEpoch::default(), LabelSet::default()); + + let time = updated_at_time(); + + sample.set(1.0, time); + + assert_eq!(sample.recorded_at(), time); + } + + #[test] + fn it_should_allow_exporting_to_prometheus_format() { + let counter = Gauge::new(42.0); + + let labels = LabelSet::from(vec![("label_name", "label_value"), ("method", "GET")]); + + let sample = Sample::new(counter, DurationSinceUnixEpoch::default(), labels); + + assert_eq!(sample.to_prometheus(), r#"{label_name="label_value",method="GET"} 42"#); + } + + #[test] + fn it_should_allow_exporting_to_prometheus_format_with_empty_label_set() { + let gauge = Gauge::new(42.0); + + let sample = Sample::new(gauge, DurationSinceUnixEpoch::default(), LabelSet::default()); + + assert_eq!(sample.to_prometheus(), " 42"); + } + } + + mod serialization_to_json { + use pretty_assertions::assert_eq; + use serde_json::json; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::label::LabelSet; + use crate::sample::tests::updated_at_time; + use crate::sample::Sample; + + #[test] + fn test_serialization_round_trip() { + let original = Sample::new(42, updated_at_time(), LabelSet::from(vec![("test", "serialization")])); + + let json = serde_json::to_string(&original).unwrap(); + let deserialized: Sample<i32> = serde_json::from_str(&json).unwrap(); + + assert_eq!(original.measurement.value, deserialized.measurement.value); + assert_eq!(original.measurement.recorded_at, deserialized.measurement.recorded_at); + assert_eq!(original.label_set, deserialized.label_set); + } + + #[test] + fn test_rfc3339_serialization_format_for_update_time() { + let sample = Sample::new( + 42, + DurationSinceUnixEpoch::new(1_743_552_000, 100), + LabelSet::from(vec![("label_name", "label value")]), + ); + + let json = serde_json::to_string(&sample).unwrap(); + + let expected_json = r#" + { + "value": 42, + "recorded_at": "2025-04-02T00:00:00.000000100+00:00", + "labels": [ + { + "name": "label_name", + "value": "label value" + } + ] + } + "#; + + assert_eq!( + serde_json::from_str::<serde_json::Value>(&json).unwrap(), + serde_json::from_str::<serde_json::Value>(expected_json).unwrap() + ); + } + + #[test] + fn test_invalid_update_timestamp_serialization() { + let timestamp_too_large = DurationSinceUnixEpoch::new(i64::MAX as u64 + 1, 0); + + let sample = Sample::new(42, timestamp_too_large, LabelSet::from(vec![("label_name", "label value")])); + + let result = serde_json::to_string(&sample); + + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("Timestamp too large")); + } + + #[test] + fn test_invalid_update_datetime_deserialization() { + let invalid_json = json!( + r#" + { + "value": 42, + "recorded_at": "1-1-2023T25:00:00Z", + "labels": [ + { + "name": "label_name", + "value": "label value" + } + ] + } + "# + ); + + let result: Result<DurationSinceUnixEpoch, _> = serde_json::from_value(invalid_json); + + assert!(result.unwrap_err().to_string().contains("invalid type")); + } + + #[test] + fn test_update_datetime_high_precision_nanoseconds() { + let sample = Sample::new( + 42, + DurationSinceUnixEpoch::new(1_743_552_000, 100), + LabelSet::from(vec![("label_name", "label value")]), + ); + + let json = serde_json::to_string(&sample).unwrap(); + + let deserialized: Sample<i32> = serde_json::from_str(&json).unwrap(); + + assert_eq!(deserialized, sample); + } + } +} diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs new file mode 100644 index 000000000..e520d7310 --- /dev/null +++ b/packages/metrics/src/sample_collection.rs @@ -0,0 +1,543 @@ +use std::collections::hash_map::Iter; +use std::collections::HashMap; +use std::fmt::Write as _; + +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::counter::Counter; +use super::gauge::Gauge; +use super::label::LabelSet; +use super::prometheus::PrometheusSerializable; +use super::sample::Sample; +use crate::sample::Measurement; + +#[derive(Debug, Clone, Default, PartialEq)] +pub struct SampleCollection<T> { + samples: HashMap<LabelSet, Measurement<T>>, +} + +impl<T> SampleCollection<T> { + /// Creates a new `MetricKindCollection` from a vector of metrics + /// + /// # Errors + /// + /// Returns an error if there are duplicate `LabelSets` in the provided + /// samples. + pub fn new(samples: Vec<Sample<T>>) -> Result<Self, Error> { + let mut map: HashMap<LabelSet, Measurement<T>> = HashMap::with_capacity(samples.len()); + + for sample in samples { + let (label_set, sample_data): (LabelSet, Measurement<T>) = sample.into(); + + let label_set_clone = label_set.clone(); + + if let Some(_old_measurement) = map.insert(label_set, sample_data) { + return Err(Error::DuplicateLabelSetInList { + label_set: label_set_clone, + }); + } + } + + Ok(Self { samples: map }) + } + + #[must_use] + pub fn get(&self, label: &LabelSet) -> Option<&Measurement<T>> { + self.samples.get(label) + } + + #[must_use] + pub fn len(&self) -> usize { + self.samples.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.samples.is_empty() + } + + #[must_use] + #[allow(clippy::iter_without_into_iter)] + pub fn iter(&self) -> Iter<'_, LabelSet, Measurement<T>> { + self.samples.iter() + } +} + +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + #[error("Found duplicate label set in list. Label set must be unique in a SampleCollection.")] + DuplicateLabelSetInList { label_set: LabelSet }, +} + +impl SampleCollection<Counter> { + pub fn increment(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + let sample = self + .samples + .entry(label_set.clone()) + .or_insert_with(|| Measurement::new(Counter::default(), time)); + + sample.increment(time); + } + + pub fn absolute(&mut self, label_set: &LabelSet, value: u64, time: DurationSinceUnixEpoch) { + let sample = self + .samples + .entry(label_set.clone()) + .or_insert_with(|| Measurement::new(Counter::default(), time)); + + sample.absolute(value, time); + } +} + +impl SampleCollection<Gauge> { + pub fn set(&mut self, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { + let sample = self + .samples + .entry(label_set.clone()) + .or_insert_with(|| Measurement::new(Gauge::default(), time)); + + sample.set(value, time); + } + + pub fn increment(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + let sample = self + .samples + .entry(label_set.clone()) + .or_insert_with(|| Measurement::new(Gauge::default(), time)); + + sample.increment(time); + } + + pub fn decrement(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + let sample = self + .samples + .entry(label_set.clone()) + .or_insert_with(|| Measurement::new(Gauge::default(), time)); + + sample.decrement(time); + } +} + +impl<T: Serialize> Serialize for SampleCollection<T> { + fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> + where + S: Serializer, + { + let mut samples: Vec<Sample<&T>> = vec![]; + + for (label_set, sample_data) in &self.samples { + samples.push(Sample::new(sample_data.value(), sample_data.recorded_at(), label_set.clone())); + } + + samples.serialize(serializer) + } +} + +impl<'de, T> Deserialize<'de> for SampleCollection<T> +where + T: Deserialize<'de>, +{ + fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> + where + D: Deserializer<'de>, + { + let samples = Vec::<Sample<T>>::deserialize(deserializer)?; + + let sample_collection = SampleCollection::new(samples).map_err(serde::de::Error::custom)?; + + Ok(sample_collection) + } +} + +impl<T: PrometheusSerializable> PrometheusSerializable for SampleCollection<T> { + fn to_prometheus(&self) -> String { + let mut output = String::new(); + + for (label_set, sample_data) in &self.samples { + if label_set.is_empty() { + let _ = write!(output, "{}", sample_data.to_prometheus()); + } else { + let _ = write!(output, "{} {}", label_set.to_prometheus(), sample_data.to_prometheus()); + } + } + + output + } +} + +#[cfg(test)] +mod tests { + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::counter::Counter; + use crate::label::LabelSet; + use crate::sample::Sample; + use crate::sample_collection::SampleCollection; + + fn sample_update_time() -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::from_secs(1_743_552_000) + } + + #[test] + fn it_should_fail_trying_to_create_a_sample_collection_with_duplicate_label_sets() { + let samples = vec![ + Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), + Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), + ]; + + let result = SampleCollection::new(samples); + + assert!(result.is_err()); + } + + #[test] + fn it_should_return_a_sample_searching_by_label_set_with_one_empty_label_set() { + let label_set = LabelSet::default(); + + let sample = Sample::new(Counter::default(), sample_update_time(), label_set.clone()); + + let collection = SampleCollection::new(vec![sample.clone()]).unwrap(); + + let retrieved = collection.get(&label_set); + + assert_eq!(retrieved.unwrap(), sample.measurement()); + } + + #[test] + fn it_should_return_a_sample_searching_by_label_set_with_two_label_sets() { + let label_set_1 = LabelSet::from(vec![("label_name_1", "label value 1")]); + let label_set_2 = LabelSet::from(vec![("label_name_2", "label value 2")]); + + let sample_1 = Sample::new(Counter::new(1), sample_update_time(), label_set_1.clone()); + let sample_2 = Sample::new(Counter::new(2), sample_update_time(), label_set_2.clone()); + + let collection = SampleCollection::new(vec![sample_1.clone(), sample_2.clone()]).unwrap(); + + let retrieved = collection.get(&label_set_1); + assert_eq!(retrieved.unwrap(), sample_1.measurement()); + + let retrieved = collection.get(&label_set_2); + assert_eq!(retrieved.unwrap(), sample_2.measurement()); + } + + #[test] + fn it_should_return_the_number_of_samples_in_the_collection() { + let samples = vec![Sample::new(Counter::default(), sample_update_time(), LabelSet::default())]; + let collection = SampleCollection::new(samples).unwrap(); + assert_eq!(collection.len(), 1); + } + + #[test] + fn it_should_return_zero_number_of_samples_when_empty() { + let empty = SampleCollection::<Counter>::default(); + assert_eq!(empty.len(), 0); + } + + #[test] + fn it_should_indicate_is_it_is_empty() { + let empty = SampleCollection::<Counter>::default(); + assert!(empty.is_empty()); + + let samples = vec![Sample::new(Counter::default(), sample_update_time(), LabelSet::default())]; + let collection = SampleCollection::new(samples).unwrap(); + assert!(!collection.is_empty()); + } + + mod json_serialization { + use crate::counter::Counter; + use crate::label::LabelSet; + use crate::sample::Sample; + use crate::sample_collection::tests::sample_update_time; + use crate::sample_collection::SampleCollection; + + #[test] + fn it_should_be_serializable_and_deserializable_for_json_format() { + let sample = Sample::new(Counter::default(), sample_update_time(), LabelSet::default()); + let collection = SampleCollection::new(vec![sample]).unwrap(); + + let serialized = serde_json::to_string(&collection).unwrap(); + let deserialized: SampleCollection<Counter> = serde_json::from_str(&serialized).unwrap(); + + assert_eq!(deserialized, collection); + } + + #[test] + fn it_should_fail_deserializing_from_json_with_duplicate_label_sets() { + let samples = vec![ + Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), + Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), + ]; + + let serialized = serde_json::to_string(&samples).unwrap(); + + let result: Result<SampleCollection<Counter>, _> = serde_json::from_str(&serialized); + + assert!(result.is_err()); + } + } + + mod prometheus_serialization { + use crate::counter::Counter; + use crate::label::LabelSet; + use crate::prometheus::PrometheusSerializable; + use crate::sample::Sample; + use crate::sample_collection::tests::sample_update_time; + use crate::sample_collection::SampleCollection; + use crate::tests::format_prometheus_output; + + #[test] + fn it_should_be_exportable_to_prometheus_format_when_empty() { + let sample = Sample::new(Counter::default(), sample_update_time(), LabelSet::default()); + let collection = SampleCollection::new(vec![sample]).unwrap(); + + let prometheus_output = collection.to_prometheus(); + + assert!(!prometheus_output.is_empty()); + } + + #[test] + fn it_should_be_exportable_to_prometheus_format() { + let sample = Sample::new( + Counter::new(1), + sample_update_time(), + LabelSet::from(vec![("labe_name_1", "label value value 1")]), + ); + + let collection = SampleCollection::new(vec![sample]).unwrap(); + + let prometheus_output = collection.to_prometheus(); + + let expected_prometheus_output = format_prometheus_output("{labe_name_1=\"label value value 1\"} 1"); + + assert_eq!(prometheus_output, expected_prometheus_output); + } + } + + #[cfg(test)] + mod for_counters { + + use std::ops::Add; + + use super::super::LabelSet; + use super::*; + + #[test] + fn it_should_increment_the_counter_for_a_preexisting_label_set() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::<Counter>::default(); + + // Initialize the sample + collection.increment(&label_set, sample_update_time()); + + // Verify initial state + let sample = collection.get(&label_set).unwrap(); + assert_eq!(sample.value(), &Counter::new(1)); + + // Increment again + collection.increment(&label_set, sample_update_time()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Counter::new(2)); + } + + #[test] + fn it_should_allow_increment_the_counter_for_a_non_existent_label_set() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::<Counter>::default(); + + // Increment a non-existent label + collection.increment(&label_set, sample_update_time()); + + // Verify the label exists + assert!(collection.get(&label_set).is_some()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Counter::new(1)); + } + + #[test] + fn it_should_update_the_latest_update_time_when_incremented() { + let label_set = LabelSet::default(); + let initial_time = sample_update_time(); + + let mut collection = SampleCollection::<Counter>::default(); + collection.increment(&label_set, initial_time); + + // Increment with a new time + let new_time = initial_time.add(DurationSinceUnixEpoch::from_secs(1)); + collection.increment(&label_set, new_time); + + let sample = collection.get(&label_set).unwrap(); + assert_eq!(sample.recorded_at(), new_time); + assert_eq!(*sample.value(), Counter::new(2)); + } + + #[test] + fn it_should_increment_the_counter_for_multiple_labels() { + let label1 = LabelSet::from([("name", "value1")]); + let label2 = LabelSet::from([("name", "value2")]); + let now = sample_update_time(); + + let mut collection = SampleCollection::<Counter>::default(); + + collection.increment(&label1, now); + collection.increment(&label2, now); + + assert_eq!(collection.get(&label1).unwrap().value(), &Counter::new(1)); + assert_eq!(collection.get(&label2).unwrap().value(), &Counter::new(1)); + assert_eq!(collection.len(), 2); + } + + #[test] + fn it_should_allow_setting_absolute_value_for_a_counter() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::<Counter>::default(); + + // Set absolute value for a non-existent label + collection.absolute(&label_set, 42, sample_update_time()); + + // Verify the label exists and has the absolute value + assert!(collection.get(&label_set).is_some()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Counter::new(42)); + } + + #[test] + fn it_should_allow_setting_absolute_value_for_existing_counter() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::<Counter>::default(); + + // Initialize the sample with increment + collection.increment(&label_set, sample_update_time()); + + // Verify initial state + let sample = collection.get(&label_set).unwrap(); + assert_eq!(sample.value(), &Counter::new(1)); + + // Set absolute value + collection.absolute(&label_set, 100, sample_update_time()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Counter::new(100)); + } + + #[test] + fn it_should_update_time_when_setting_absolute_value() { + let label_set = LabelSet::default(); + let initial_time = sample_update_time(); + let mut collection = SampleCollection::<Counter>::default(); + + // Set absolute value with initial time + collection.absolute(&label_set, 50, initial_time); + + // Set absolute value with a new time + let new_time = initial_time.add(DurationSinceUnixEpoch::from_secs(1)); + collection.absolute(&label_set, 75, new_time); + + let sample = collection.get(&label_set).unwrap(); + assert_eq!(sample.recorded_at(), new_time); + assert_eq!(*sample.value(), Counter::new(75)); + } + } + + #[cfg(test)] + mod for_gauges { + + use std::ops::Add; + + use super::super::LabelSet; + use super::*; + use crate::gauge::Gauge; + + #[test] + fn it_should_allow_setting_the_gauge_for_a_preexisting_label_set() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::<Gauge>::default(); + + // Initialize the sample + collection.set(&label_set, 1.0, sample_update_time()); + + // Verify initial state + let sample = collection.get(&label_set).unwrap(); + assert_eq!(sample.value(), &Gauge::new(1.0)); + + // Set again + collection.set(&label_set, 2.0, sample_update_time()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Gauge::new(2.0)); + } + + #[test] + fn it_should_allow_setting_the_gauge_for_a_non_existent_label_set() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::<Gauge>::default(); + + // Set a non-existent label + collection.set(&label_set, 1.0, sample_update_time()); + + // Verify the label exists + assert!(collection.get(&label_set).is_some()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Gauge::new(1.0)); + } + + #[test] + fn it_should_update_the_latest_update_time_when_setting() { + let label_set = LabelSet::default(); + let initial_time = sample_update_time(); + + let mut collection = SampleCollection::<Gauge>::default(); + collection.set(&label_set, 1.0, initial_time); + + // Set with a new time + let new_time = initial_time.add(DurationSinceUnixEpoch::from_secs(1)); + collection.set(&label_set, 2.0, new_time); + + let sample = collection.get(&label_set).unwrap(); + assert_eq!(sample.recorded_at(), new_time); + assert_eq!(*sample.value(), Gauge::new(2.0)); + } + + #[test] + fn it_should_allow_setting_the_gauge_for_multiple_labels() { + let label1 = LabelSet::from([("name", "value1")]); + let label2 = LabelSet::from([("name", "value2")]); + let now = sample_update_time(); + + let mut collection = SampleCollection::<Gauge>::default(); + + collection.set(&label1, 1.0, now); + collection.set(&label2, 2.0, now); + + assert_eq!(collection.get(&label1).unwrap().value(), &Gauge::new(1.0)); + assert_eq!(collection.get(&label2).unwrap().value(), &Gauge::new(2.0)); + assert_eq!(collection.len(), 2); + } + + #[test] + fn it_should_allow_incrementing_the_gauge() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::<Gauge>::default(); + + // Initialize the sample + collection.set(&label_set, 1.0, sample_update_time()); + + // Increment + collection.increment(&label_set, sample_update_time()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Gauge::new(2.0)); + } + + #[test] + fn it_should_allow_decrementing_the_gauge() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::<Gauge>::default(); + + // Initialize the sample + collection.set(&label_set, 1.0, sample_update_time()); + + // Increment + collection.decrement(&label_set, sample_update_time()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Gauge::new(0.0)); + } + } +} diff --git a/packages/metrics/src/unit.rs b/packages/metrics/src/unit.rs new file mode 100644 index 000000000..43b42bf79 --- /dev/null +++ b/packages/metrics/src/unit.rs @@ -0,0 +1,30 @@ +//! This module defines the `Unit` enum, which represents various units of +//! measurement. +//! +//! The `Unit` enum is used to specify the unit of measurement for metrics. +//! +//! They were copied from the `metrics` crate, to allow future compatibility. + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum Unit { + Count, + Percent, + Seconds, + Milliseconds, + Microseconds, + Nanoseconds, + Tebibytes, + Gibibytes, + Mebibytes, + Kibibytes, + Bytes, + TerabitsPerSecond, + GigabitsPerSecond, + MegabitsPerSecond, + KilobitsPerSecond, + BitsPerSecond, + CountPerSecond, +} diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 1396d8bc8..c9ce64177 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with the primitive types shared by the Torrust tracker packages." -keywords = ["api", "library", "primitives"] +keywords = [ "api", "library", "primitives" ] name = "torrust-tracker-primitives" readme = "README.md" @@ -18,10 +18,14 @@ version.workspace = true aquatic_udp_protocol = "0" binascii = "0" bittorrent-primitives = "0.1.0" -derive_more = { version = "2", features = ["constructor"] } -serde = { version = "1", features = ["derive"] } +derive_more = { version = "2", features = [ "constructor" ] } +serde = { version = "1", features = [ "derive" ] } tdyne-peer-id = "1" tdyne-peer-id-registry = "0" thiserror = "2" torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +url = "2.5.4" zerocopy = "0.7" + +[dev-dependencies] +rstest = "0.25.0" diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index b50516893..ec2edda97 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -7,6 +7,7 @@ pub mod core; pub mod pagination; pub mod peer; +pub mod service_binding; pub mod swarm_metadata; use std::collections::BTreeMap; @@ -17,5 +18,5 @@ use bittorrent_primitives::info_hash::InfoHash; /// Duration since the Unix Epoch. pub type DurationSinceUnixEpoch = Duration; -pub type PersistentTorrent = u32; -pub type PersistentTorrents = BTreeMap<InfoHash, PersistentTorrent>; +pub type NumberOfDownloads = u32; +pub type NumberOfDownloadsBTreeMap = BTreeMap<InfoHash, NumberOfDownloads>; diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index c8ff1791d..ef47f28f8 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -22,8 +22,10 @@ //! }; //! ``` +use std::fmt; use std::net::{IpAddr, SocketAddr}; use std::ops::{Deref, DerefMut}; +use std::str::FromStr; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; @@ -32,6 +34,59 @@ use zerocopy::FromBytes as _; use crate::DurationSinceUnixEpoch; +pub type PeerAnnouncement = Peer; + +#[derive(Debug, Serialize, Copy, Clone, PartialEq, Eq, Hash)] +#[serde(rename_all_fields = "lowercase")] +pub enum PeerRole { + Seeder, + Leecher, +} + +impl PeerRole { + /// Returns the opposite role: Seeder becomes Leecher, and vice versa. + #[must_use] + pub fn opposite(self) -> Self { + match self { + PeerRole::Seeder => PeerRole::Leecher, + PeerRole::Leecher => PeerRole::Seeder, + } + } +} + +impl fmt::Display for PeerRole { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PeerRole::Seeder => write!(f, "seeder"), + PeerRole::Leecher => write!(f, "leecher"), + } + } +} + +impl FromStr for PeerRole { + type Err = ParsePeerRoleError; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + match s.to_lowercase().as_str() { + "seeder" => Ok(PeerRole::Seeder), + "leecher" => Ok(PeerRole::Leecher), + _ => Err(ParsePeerRoleError::InvalidPeerRole { + location: Location::caller(), + raw_param: s.to_string(), + }), + } + } +} + +#[derive(Error, Debug)] +pub enum ParsePeerRoleError { + #[error("invalid param {raw_param} in {location}")] + InvalidPeerRole { + location: &'static Location<'static>, + raw_param: String, + }, +} + /// Peer struct used by the core `Tracker`. /// /// A sample peer: @@ -139,12 +194,13 @@ impl Ord for Peer { impl PartialOrd for Peer { fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { - Some(self.peer_id.cmp(&other.peer_id)) + Some(self.cmp(other)) } } pub trait ReadInfo { fn is_seeder(&self) -> bool; + fn is_leecher(&self) -> bool; fn get_event(&self) -> AnnounceEvent; fn get_id(&self) -> PeerId; fn get_updated(&self) -> DurationSinceUnixEpoch; @@ -156,6 +212,10 @@ impl ReadInfo for Peer { self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } + fn is_leecher(&self) -> bool { + !self.is_seeder() + } + fn get_event(&self) -> AnnounceEvent { self.event } @@ -178,6 +238,10 @@ impl ReadInfo for Arc<Peer> { self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } + fn is_leecher(&self) -> bool { + !self.is_seeder() + } + fn get_event(&self) -> AnnounceEvent { self.event } @@ -201,6 +265,25 @@ impl Peer { self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } + #[must_use] + pub fn is_leecher(&self) -> bool { + !self.is_seeder() + } + + #[must_use] + pub fn is_completed(&self) -> bool { + self.event == AnnounceEvent::Completed + } + + #[must_use] + pub fn role(&self) -> PeerRole { + if self.is_seeder() { + PeerRole::Seeder + } else { + PeerRole::Leecher + } + } + pub fn ip(&mut self) -> IpAddr { self.peer_addr.ip() } @@ -208,6 +291,26 @@ impl Peer { pub fn change_ip(&mut self, new_ip: &IpAddr) { self.peer_addr = SocketAddr::new(*new_ip, self.peer_addr.port()); } + + pub fn mark_as_completed(&mut self) { + self.event = AnnounceEvent::Completed; + } + + #[must_use] + pub fn into_completed(self) -> Self { + Self { + event: AnnounceEvent::Completed, + ..self + } + } + + #[must_use] + pub fn into_seeder(self) -> Self { + Self { + left: NumberOfBytes::new(0), + ..self + } + } } use std::panic::Location; @@ -414,7 +517,7 @@ pub mod fixture { pub fn seeder() -> Self { let peer = Peer { peer_id: PeerId(*b"-qB00000000000000001"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), downloaded: NumberOfBytes::new(0), @@ -455,34 +558,59 @@ pub mod fixture { self } - #[allow(dead_code)] #[must_use] - pub fn with_bytes_pending_to_download(mut self, left: i64) -> Self { + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + #[must_use] + pub fn updated_on(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + #[must_use] + pub fn with_bytes_left_to_download(mut self, left: i64) -> Self { self.peer.left = NumberOfBytes::new(left); self } - #[allow(dead_code)] #[must_use] - pub fn with_no_bytes_pending_to_download(mut self) -> Self { + pub fn with_no_bytes_left_to_download(mut self) -> Self { self.peer.left = NumberOfBytes::new(0); self } - #[allow(dead_code)] #[must_use] pub fn last_updated_on(mut self, updated: DurationSinceUnixEpoch) -> Self { self.peer.updated = updated; self } - #[allow(dead_code)] + #[must_use] + pub fn with_event(mut self, event: AnnounceEvent) -> Self { + self.peer.event = event; + self + } + + #[must_use] + pub fn with_event_started(mut self) -> Self { + self.peer.event = AnnounceEvent::Started; + self + } + + #[must_use] + pub fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + #[must_use] pub fn build(self) -> Peer { self.into() } - #[allow(dead_code)] #[must_use] pub fn into(self) -> Peer { self.peer @@ -493,7 +621,7 @@ pub mod fixture { fn default() -> Self { Self { peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), downloaded: NumberOfBytes::new(0), @@ -513,6 +641,22 @@ pub mod fixture { #[cfg(test)] pub mod test { + + mod peer { + use crate::peer::fixture::PeerBuilder; + + #[test] + fn should_be_comparable() { + let seeder1 = PeerBuilder::seeder().build(); + let seeder2 = PeerBuilder::seeder().build(); + + let leecher1 = PeerBuilder::leecher().build(); + + assert!(seeder1 == seeder2); + assert!(seeder1 != leecher1); + } + } + mod torrent_peer_id { use aquatic_udp_protocol::PeerId; diff --git a/packages/primitives/src/service_binding.rs b/packages/primitives/src/service_binding.rs new file mode 100644 index 000000000..c1ec308c8 --- /dev/null +++ b/packages/primitives/src/service_binding.rs @@ -0,0 +1,297 @@ +use std::fmt; +use std::net::{IpAddr, SocketAddr}; + +use serde::{Deserialize, Serialize}; +use url::Url; + +const DUAL_STACK_IP_V4_MAPPED_V6_PREFIX: &str = "::ffff:"; + +/// Represents the supported network protocols. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] +pub enum Protocol { + UDP, + HTTP, + HTTPS, +} + +impl fmt::Display for Protocol { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let proto_str = match self { + Protocol::UDP => "udp", + Protocol::HTTP => "http", + Protocol::HTTPS => "https", + }; + write!(f, "{proto_str}") + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] +pub enum IpType { + /// Represents a plain IPv4 or IPv6 address. + Plain, + + /// Represents an IPv6 address that is a mapped IPv4 address. + /// + /// This is used for IPv6 addresses that represent an IPv4 address in a dual-stack network. + /// + /// For example: `[::ffff:192.0.2.33]` + V4MappedV6, +} + +impl fmt::Display for IpType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let ip_type_str = match self { + Self::Plain => "plain", + Self::V4MappedV6 => "v4_mapped_v6", + }; + write!(f, "{ip_type_str}") + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] +pub enum IpFamily { + // IPv4 + Inet, + // IPv6 + Inet6, +} + +impl fmt::Display for IpFamily { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let ip_family_str = match self { + Self::Inet => "inet", + Self::Inet6 => "inet6", + }; + write!(f, "{ip_family_str}") + } +} + +impl From<IpAddr> for IpFamily { + fn from(ip: IpAddr) -> Self { + if ip.is_ipv4() { + return IpFamily::Inet; + } + + if ip.is_ipv6() { + return IpFamily::Inet6; + } + + panic!("Unsupported IP address type: {ip}"); + } +} + +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + #[error("The port number cannot be zero. It must be an assigned valid port.")] + PortZeroNotAllowed, +} + +/// Represents a network service binding, encapsulating protocol and socket +/// address. +/// +/// This struct is used to define how a service binds to a network interface and +/// port. +/// +/// It's an URL without path and some restrictions: +/// +/// - Only some schemes are accepted: `udp`, `http`, `https`. +/// - The port number must be greater than zero. The service should be already +/// listening on that port. +/// - The authority part of the URL must be a valid socket address (wildcard is +/// accepted). +/// +/// Besides it accepts some non well-formed URLs, like:<http://127.0.0.1:7070> +/// or <https://127.0.0.1:7070>. Those URLs are not valid because they use non +/// standard ports (80 and 443). +/// +/// NOTICE: It does not represent a public valid URL clients can connect to. It +/// represents the service's internal URL configuration after assigning a port. +/// If the port in the configuration is not zero, it's basically the same +/// information you get from the configuration (binding address + protocol). +/// +/// # Examples +/// +/// ``` +/// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +/// use torrust_tracker_primitives::service_binding::{ServiceBinding, Protocol}; +/// +/// let service_binding = ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(); +/// +/// assert_eq!(service_binding.url().to_string(), "http://127.0.0.1:7070/".to_string()); +/// ``` +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] +pub struct ServiceBinding { + /// The network protocol used by the service (UDP, HTTP, HTTPS). + protocol: Protocol, + + /// The socket address (IP and port) to which the service binds. + bind_address: SocketAddr, +} + +impl ServiceBinding { + /// # Errors + /// + /// This function will return an error if the port number is zero. + pub fn new(protocol: Protocol, bind_address: SocketAddr) -> Result<Self, Error> { + if bind_address.port() == 0 { + return Err(Error::PortZeroNotAllowed); + } + + Ok(Self { protocol, bind_address }) + } + + /// Returns the protocol used by the service. + #[must_use] + pub fn protocol(&self) -> Protocol { + self.protocol.clone() + } + + #[must_use] + pub fn bind_address(&self) -> SocketAddr { + self.bind_address + } + + #[must_use] + pub fn bind_address_ip_type(&self) -> IpType { + if self.is_v4_mapped_v6() { + return IpType::V4MappedV6; + } + + IpType::Plain + } + + #[must_use] + pub fn bind_address_ip_family(&self) -> IpFamily { + self.bind_address.ip().into() + } + + /// # Panics + /// + /// It never panics because the URL is always valid. + #[must_use] + pub fn url(&self) -> Url { + Url::parse(&format!("{}://{}", self.protocol, self.bind_address)) + .expect("Service binding can always be parsed into a URL") + } + + fn is_v4_mapped_v6(&self) -> bool { + self.bind_address.ip().is_ipv6() + && self + .bind_address + .ip() + .to_string() + .starts_with(DUAL_STACK_IP_V4_MAPPED_V6_PREFIX) + } +} + +impl From<ServiceBinding> for Url { + fn from(service_binding: ServiceBinding) -> Self { + service_binding.url() + } +} + +impl fmt::Display for ServiceBinding { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.url()) + } +} + +#[cfg(test)] +mod tests { + + mod the_service_binding { + use std::net::SocketAddr; + use std::str::FromStr; + + use rstest::rstest; + use url::Url; + + use crate::service_binding::{Error, IpType, Protocol, ServiceBinding}; + + #[rstest] + #[case("wildcard_ip", Protocol::UDP, SocketAddr::from_str("0.0.0.0:6969").unwrap())] + #[case("udp_service", Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap())] + #[case("http_service", Protocol::HTTP, SocketAddr::from_str("127.0.0.1:7070").unwrap())] + #[case("https_service", Protocol::HTTPS, SocketAddr::from_str("127.0.0.1:7070").unwrap())] + fn should_allow_a_subset_of_urls(#[case] case: &str, #[case] protocol: Protocol, #[case] bind_address: SocketAddr) { + let service_binding = ServiceBinding::new(protocol.clone(), bind_address); + + assert!(service_binding.is_ok(), "{}", format!("{case} failed: {service_binding:?}")); + } + + #[test] + fn should_not_allow_undefined_port_zero() { + let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:0").unwrap()); + + assert!(matches!(service_binding, Err(Error::PortZeroNotAllowed))); + } + + #[test] + fn should_return_the_bind_address() { + let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap()).unwrap(); + + assert_eq!( + service_binding.bind_address(), + SocketAddr::from_str("127.0.0.1:6969").unwrap() + ); + } + + #[test] + fn should_return_the_bind_address_plain_type_for_ipv4_ips() { + let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap()).unwrap(); + + assert_eq!(service_binding.bind_address_ip_type(), IpType::Plain); + } + + #[test] + fn should_return_the_bind_address_plain_type_for_ipv6_ips() { + let service_binding = + ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("[0:0:0:0:0:0:0:1]:6969").unwrap()).unwrap(); + + assert_eq!(service_binding.bind_address_ip_type(), IpType::Plain); + } + + #[test] + fn should_return_the_bind_address_v4_mapped_v7_type_for_ipv4_ips_mapped_to_ipv6() { + let service_binding = + ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("[::ffff:192.0.2.33]:6969").unwrap()).unwrap(); + + assert_eq!(service_binding.bind_address_ip_type(), IpType::V4MappedV6); + } + + #[test] + fn should_return_the_corresponding_url() { + let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap()).unwrap(); + + assert_eq!(service_binding.url(), Url::parse("udp://127.0.0.1:6969").unwrap()); + } + + #[test] + fn should_be_converted_into_an_url() { + let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap()).unwrap(); + + let url: Url = service_binding.clone().into(); + + assert_eq!(url, Url::parse("udp://127.0.0.1:6969").unwrap()); + } + + #[rstest] + #[case("udp_service", Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap(), "udp://127.0.0.1:6969")] + #[case("http_service", Protocol::HTTP, SocketAddr::from_str("127.0.0.1:7070").unwrap(), "http://127.0.0.1:7070/")] + #[case("https_service", Protocol::HTTPS, SocketAddr::from_str("127.0.0.1:7070").unwrap(), "https://127.0.0.1:7070/")] + fn should_always_have_a_corresponding_unique_url( + #[case] case: &str, + #[case] protocol: Protocol, + #[case] bind_address: SocketAddr, + #[case] expected_url: String, + ) { + let service_binding = ServiceBinding::new(protocol.clone(), bind_address).unwrap(); + + assert_eq!( + service_binding.url().to_string(), + expected_url, + "{case} failed: {service_binding:?}", + ); + } + } +} diff --git a/packages/primitives/src/swarm_metadata.rs b/packages/primitives/src/swarm_metadata.rs index 792eff632..57ba816d3 100644 --- a/packages/primitives/src/swarm_metadata.rs +++ b/packages/primitives/src/swarm_metadata.rs @@ -7,7 +7,7 @@ use derive_more::Constructor; /// Swarm metadata dictionary in the scrape response. /// /// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) -#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Constructor)] pub struct SwarmMetadata { /// (i.e `completed`): The number of peers that have ever completed /// downloading a given torrent. @@ -27,28 +27,42 @@ impl SwarmMetadata { pub fn zeroed() -> Self { Self::default() } + + #[must_use] + pub fn downloads(&self) -> u32 { + self.downloaded + } + + #[must_use] + pub fn seeders(&self) -> u32 { + self.complete + } + + #[must_use] + pub fn leechers(&self) -> u32 { + self.incomplete + } } /// Structure that holds aggregate swarm metadata. /// -/// Metrics are aggregate values for all torrents. +/// Metrics are aggregate values for all active torrents/swarms. #[derive(Copy, Clone, Debug, PartialEq, Default)] -pub struct AggregateSwarmMetadata { - /// Total number of peers that have ever completed downloading for all - /// torrents. +pub struct AggregateActiveSwarmMetadata { + /// Total number of peers that have ever completed downloading. pub total_downloaded: u64, - /// Total number of seeders for all torrents. + /// Total number of seeders. pub total_complete: u64, - /// Total number of leechers for all torrents. + /// Total number of leechers. pub total_incomplete: u64, /// Total number of torrents. pub total_torrents: u64, } -impl AddAssign for AggregateSwarmMetadata { +impl AddAssign for AggregateActiveSwarmMetadata { fn add_assign(&mut self, rhs: Self) { self.total_complete += rhs.total_complete; self.total_downloaded += rhs.total_downloaded; diff --git a/packages/rest-tracker-api-client/Cargo.toml b/packages/rest-tracker-api-client/Cargo.toml index cba580e18..47307df9a 100644 --- a/packages/rest-tracker-api-client/Cargo.toml +++ b/packages/rest-tracker-api-client/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to interact with the Torrust Tracker REST API." -keywords = ["bittorrent", "client", "tracker"] +keywords = [ "bittorrent", "client", "tracker" ] license = "LGPL-3.0" name = "torrust-rest-tracker-api-client" readme = "README.md" @@ -16,8 +16,8 @@ version.workspace = true [dependencies] hyper = "1" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } +reqwest = { version = "0", features = [ "json", "query" ] } +serde = { version = "1", features = [ "derive" ] } thiserror = "2" -url = { version = "2", features = ["serde"] } -uuid = { version = "1", features = ["v4"] } +url = { version = "2", features = [ "serde" ] } +uuid = { version = "1", features = [ "v4" ] } diff --git a/packages/rest-tracker-api-client/src/v1/client.rs b/packages/rest-tracker-api-client/src/v1/client.rs index da1b709da..02a5b0d9c 100644 --- a/packages/rest-tracker-api-client/src/v1/client.rs +++ b/packages/rest-tracker-api-client/src/v1/client.rs @@ -16,10 +16,11 @@ const API_PATH: &str = "api/v1/"; const DEFAULT_REQUEST_TIMEOUT_IN_SECS: u64 = 5; /// API Client +#[allow(clippy::struct_field_names)] pub struct Client { connection_info: ConnectionInfo, base_path: String, - client: reqwest::Client, + http_client: reqwest::Client, } impl Client { @@ -34,7 +35,7 @@ impl Client { Ok(Self { connection_info, base_path: API_PATH.to_string(), - client, + http_client: client, }) } @@ -92,7 +93,7 @@ impl Client { /// /// Will panic if the request can't be sent pub async fn post_empty(&self, path: &str, headers: Option<HeaderMap>) -> Response { - let builder = self.client.post(self.base_url(path).clone()); + let builder = self.http_client.post(self.base_url(path).clone()); let builder = match headers { Some(headers) => builder.headers(headers), @@ -111,7 +112,7 @@ impl Client { /// /// Will panic if the request can't be sent pub async fn post_form<T: Serialize + ?Sized>(&self, path: &str, form: &T, headers: Option<HeaderMap>) -> Response { - let builder = self.client.post(self.base_url(path).clone()).json(&form); + let builder = self.http_client.post(self.base_url(path).clone()).json(&form); let builder = match headers { Some(headers) => builder.headers(headers), @@ -130,7 +131,7 @@ impl Client { /// /// Will panic if the request can't be sent async fn delete(&self, path: &str, headers: Option<HeaderMap>) -> Response { - let builder = self.client.delete(self.base_url(path).clone()); + let builder = self.http_client.delete(self.base_url(path).clone()); let builder = match headers { Some(headers) => builder.headers(headers), @@ -203,22 +204,22 @@ impl Client { /// /// Will panic if the request can't be sent pub async fn get(path: Url, query: Option<Query>, headers: Option<HeaderMap>) -> Response { - let builder = reqwest::Client::builder() + let client = reqwest::Client::builder() .timeout(Duration::from_secs(DEFAULT_REQUEST_TIMEOUT_IN_SECS)) .build() .unwrap(); - let builder = match query { - Some(params) => builder.get(path).query(&ReqwestQuery::from(params)), - None => builder.get(path), - }; + let mut request_builder = client.get(path); - let builder = match headers { - Some(headers) => builder.headers(headers), - None => builder, - }; + if let Some(params) = query { + request_builder = request_builder.query(&ReqwestQuery::from(params)); + } + + if let Some(headers) = headers { + request_builder = request_builder.headers(headers); + } - builder.send().await.unwrap() + request_builder.send().await.unwrap() } /// Returns a `HeaderMap` with a request id header. diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index d9ccb5d3f..0808c2dd6 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -4,7 +4,7 @@ description = "A library with the core functionality needed to implement a BitTo documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["api", "bittorrent", "core", "library", "tracker"] +keywords = [ "api", "bittorrent", "core", "library", "tracker" ] license.workspace = true name = "torrust-rest-tracker-api-core" publish.workspace = true @@ -17,10 +17,14 @@ version.workspace = true bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-tracker-core" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } +tokio-util = "0.7.15" torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } [dev-dependencies] +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index eb770c1c5..bcc5a0186 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -1,35 +1,31 @@ use std::sync::Arc; use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; -use bittorrent_tracker_core::authentication::handler::KeysHandler; use bittorrent_tracker_core::container::TrackerCoreContainer; -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; pub struct TrackerHttpApiCoreContainer { - // todo: replace with TrackerCoreContainer - pub core_config: Arc<Core>, - pub in_memory_torrent_repository: Arc<InMemoryTorrentRepository>, - pub keys_handler: Arc<KeysHandler>, - pub whitelist_manager: Arc<WhitelistManager>, + pub http_api_config: Arc<HttpApi>, + + // Swarm Coordination Registry Container + pub swarm_coordination_registry_container: Arc<SwarmCoordinationRegistryContainer>, - // todo: replace with HttpTrackerCoreContainer + // Tracker core + pub tracker_core_container: Arc<TrackerCoreContainer>, + + // HTTP tracker core pub http_stats_repository: Arc<bittorrent_http_tracker_core::statistics::repository::Repository>, - // todo: replace with UdpTrackerCoreContainer + // UDP tracker core pub ban_service: Arc<RwLock<BanService>>, pub udp_core_stats_repository: Arc<bittorrent_udp_tracker_core::statistics::repository::Repository>, - - // todo: replace with UdpTrackerServerContainer pub udp_server_stats_repository: Arc<torrust_udp_tracker_server::statistics::repository::Repository>, - - pub http_api_config: Arc<HttpApi>, } impl TrackerHttpApiCoreContainer { @@ -40,12 +36,25 @@ impl TrackerHttpApiCoreContainer { udp_tracker_config: &Arc<UdpTracker>, http_api_config: &Arc<HttpApi>, ) -> Arc<TrackerHttpApiCoreContainer> { - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); - let http_tracker_core_container = HttpTrackerCoreContainer::initialize_from(&tracker_core_container, http_tracker_config); - let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from(&tracker_core_container, udp_tracker_config); + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + core_config, + &swarm_coordination_registry_container, + )); + + let http_tracker_core_container = + HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, http_tracker_config); + + let udp_tracker_core_container = + UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(core_config); Self::initialize_from( + &swarm_coordination_registry_container, &tracker_core_container, &http_tracker_core_container, &udp_tracker_core_container, @@ -56,6 +65,7 @@ impl TrackerHttpApiCoreContainer { #[must_use] pub fn initialize_from( + swarm_coordination_registry_container: &Arc<SwarmCoordinationRegistryContainer>, tracker_core_container: &Arc<TrackerCoreContainer>, http_tracker_core_container: &Arc<HttpTrackerCoreContainer>, udp_tracker_core_container: &Arc<UdpTrackerCoreContainer>, @@ -63,19 +73,21 @@ impl TrackerHttpApiCoreContainer { http_api_config: &Arc<HttpApi>, ) -> Arc<TrackerHttpApiCoreContainer> { Arc::new(TrackerHttpApiCoreContainer { - core_config: tracker_core_container.core_config.clone(), - in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository.clone(), - keys_handler: tracker_core_container.keys_handler.clone(), - whitelist_manager: tracker_core_container.whitelist_manager.clone(), + http_api_config: http_api_config.clone(), - http_stats_repository: http_tracker_core_container.http_stats_repository.clone(), + // Swarm Coordination Registry Container + swarm_coordination_registry_container: swarm_coordination_registry_container.clone(), - ban_service: udp_tracker_core_container.ban_service.clone(), - udp_core_stats_repository: udp_tracker_core_container.udp_core_stats_repository.clone(), + // Tracker core + tracker_core_container: tracker_core_container.clone(), - udp_server_stats_repository: udp_tracker_server_container.udp_server_stats_repository.clone(), + // HTTP tracker core + http_stats_repository: http_tracker_core_container.stats_repository.clone(), - http_api_config: http_api_config.clone(), + // UDP tracker core + ban_service: udp_tracker_core_container.ban_service.clone(), + udp_core_stats_repository: udp_tracker_core_container.stats_repository.clone(), + udp_server_stats_repository: udp_tracker_server_container.stats_repository.clone(), }) } } diff --git a/packages/rest-tracker-api-core/src/statistics/metrics.rs b/packages/rest-tracker-api-core/src/statistics/metrics.rs index 7e41cf713..ecdecd130 100644 --- a/packages/rest-tracker-api-core/src/statistics/metrics.rs +++ b/packages/rest-tracker-api-core/src/statistics/metrics.rs @@ -1,4 +1,33 @@ -/// Metrics collected by the tracker. +use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; + +/// Metrics collected by the tracker at the swarm layer. +#[derive(Copy, Clone, Debug, PartialEq, Default)] +pub struct TorrentsMetrics { + /// Total number of peers that have ever completed downloading. + pub total_downloaded: u64, + + /// Total number of seeders. + pub total_complete: u64, + + /// Total number of leechers. + pub total_incomplete: u64, + + /// Total number of torrents. + pub total_torrents: u64, +} + +impl From<AggregateActiveSwarmMetadata> for TorrentsMetrics { + fn from(value: AggregateActiveSwarmMetadata) -> Self { + Self { + total_downloaded: value.total_downloaded, + total_complete: value.total_complete, + total_incomplete: value.total_incomplete, + total_torrents: value.total_torrents, + } + } +} + +/// Metrics collected by the tracker at the delivery layer. /// /// - Number of connections handled /// - Number of `announce` requests handled @@ -7,7 +36,7 @@ /// These metrics are collected for each connection type: UDP and HTTP /// and also for each IP version used by the peers: IPv4 and IPv6. #[derive(Debug, PartialEq, Default)] -pub struct Metrics { +pub struct ProtocolMetrics { /// Total number of TCP (HTTP tracker) connections from IPv4 peers. /// Since the HTTP tracker spec does not require a handshake, this metric /// increases for every HTTP request. diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 5d7629443..f87cb8c76 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -2,12 +2,13 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::services::banning::BanService; -use bittorrent_udp_tracker_core::{self, statistics as udp_core_statistics}; +use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; -use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; -use torrust_udp_tracker_server::statistics as udp_server_statistics; +use torrust_tracker_metrics::metric_collection::MetricCollection; +use torrust_udp_tracker_server::statistics::{self as udp_server_statistics}; -use crate::statistics::metrics::Metrics; +use super::metrics::TorrentsMetrics; +use crate::statistics::metrics::ProtocolMetrics; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -15,84 +16,195 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateSwarmMetadata, + pub torrents_metrics: TorrentsMetrics, /// Application level metrics. Usage statistics/metrics. /// /// Metrics about how the tracker is been used (number of udp announce requests, number of http scrape requests, etcetera) - pub protocol_metrics: Metrics, + pub protocol_metrics: ProtocolMetrics, } /// It returns all the [`TrackerMetrics`] -#[allow(deprecated)] pub async fn get_metrics( in_memory_torrent_repository: Arc<InMemoryTorrentRepository>, - ban_service: Arc<RwLock<BanService>>, + tracker_core_stats_repository: Arc<bittorrent_tracker_core::statistics::repository::Repository>, http_stats_repository: Arc<bittorrent_http_tracker_core::statistics::repository::Repository>, - udp_core_stats_repository: Arc<udp_core_statistics::repository::Repository>, udp_server_stats_repository: Arc<udp_server_statistics::repository::Repository>, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); - let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); + TrackerMetrics { + torrents_metrics: get_torrents_metrics(in_memory_torrent_repository, tracker_core_stats_repository).await, + protocol_metrics: get_protocol_metrics(http_stats_repository.clone(), udp_server_stats_repository.clone()).await, + } +} + +async fn get_torrents_metrics( + in_memory_torrent_repository: Arc<InMemoryTorrentRepository>, + + tracker_core_stats_repository: Arc<bittorrent_tracker_core::statistics::repository::Repository>, +) -> TorrentsMetrics { + let aggregate_active_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; + + let mut torrents_metrics: TorrentsMetrics = aggregate_active_swarm_metadata.into(); + torrents_metrics.total_downloaded = tracker_core_stats_repository.get_torrents_downloads_total().await; + + torrents_metrics +} + +#[allow(deprecated)] +#[allow(clippy::too_many_lines)] +async fn get_protocol_metrics( + http_stats_repository: Arc<bittorrent_http_tracker_core::statistics::repository::Repository>, + udp_server_stats_repository: Arc<udp_server_statistics::repository::Repository>, +) -> ProtocolMetrics { let http_stats = http_stats_repository.get_stats().await; - let udp_core_stats = udp_core_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; + // TCPv4 + + let tcp4_announces_handled = http_stats.tcp4_announces_handled(); + let tcp4_scrapes_handled = http_stats.tcp4_scrapes_handled(); + + // TCPv6 + + let tcp6_announces_handled = http_stats.tcp6_announces_handled(); + let tcp6_scrapes_handled = http_stats.tcp6_scrapes_handled(); + + // UDP + + let udp_requests_aborted = udp_server_stats.udp_requests_aborted_total(); + let udp_requests_banned = udp_server_stats.udp_requests_banned_total(); + let udp_banned_ips_total = udp_server_stats.udp_banned_ips_total(); + let udp_avg_connect_processing_time_ns = udp_server_stats.udp_avg_connect_processing_time_ns_averaged(); + let udp_avg_announce_processing_time_ns = udp_server_stats.udp_avg_announce_processing_time_ns_averaged(); + let udp_avg_scrape_processing_time_ns = udp_server_stats.udp_avg_scrape_processing_time_ns_averaged(); + + // UDPv4 + + let udp4_requests = udp_server_stats.udp4_requests_received_total(); + let udp4_connections_handled = udp_server_stats.udp4_connect_requests_accepted_total(); + let udp4_announces_handled = udp_server_stats.udp4_announce_requests_accepted_total(); + let udp4_scrapes_handled = udp_server_stats.udp4_scrape_requests_accepted_total(); + let udp4_responses = udp_server_stats.udp4_responses_sent_total(); + let udp4_errors_handled = udp_server_stats.udp4_errors_total(); + + // UDPv6 + + let udp6_requests = udp_server_stats.udp6_requests_received_total(); + let udp6_connections_handled = udp_server_stats.udp6_connect_requests_accepted_total(); + let udp6_announces_handled = udp_server_stats.udp6_announce_requests_accepted_total(); + let udp6_scrapes_handled = udp_server_stats.udp6_scrape_requests_accepted_total(); + let udp6_responses = udp_server_stats.udp6_responses_sent_total(); + let udp6_errors_handled = udp_server_stats.udp6_errors_total(); + // For backward compatibility we keep the `tcp4_connections_handled` and // `tcp6_connections_handled` metrics. They don't make sense for the HTTP // tracker, but we keep them for now. In new major versions we should remove // them. - TrackerMetrics { - torrents_metrics, - protocol_metrics: Metrics { - // TCPv4 - tcp4_connections_handled: http_stats.tcp4_announces_handled + http_stats.tcp4_scrapes_handled, - tcp4_announces_handled: http_stats.tcp4_announces_handled, - tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled, - // TCPv6 - tcp6_connections_handled: http_stats.tcp6_announces_handled + http_stats.tcp6_scrapes_handled, - tcp6_announces_handled: http_stats.tcp6_announces_handled, - tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled, - // UDP - udp_requests_aborted: udp_server_stats.udp_requests_aborted, - udp_requests_banned: udp_server_stats.udp_requests_banned, - udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns, - udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns, - udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns, - // UDPv4 - udp4_requests: udp_server_stats.udp4_requests, - udp4_connections_handled: udp_core_stats.udp4_connections_handled, - udp4_announces_handled: udp_core_stats.udp4_announces_handled, - udp4_scrapes_handled: udp_core_stats.udp4_scrapes_handled, - udp4_responses: udp_server_stats.udp4_responses, - udp4_errors_handled: udp_server_stats.udp4_errors_handled, - // UDPv6 - udp6_requests: udp_server_stats.udp6_requests, - udp6_connections_handled: udp_core_stats.udp6_connections_handled, - udp6_announces_handled: udp_core_stats.udp6_announces_handled, - udp6_scrapes_handled: udp_core_stats.udp6_scrapes_handled, - udp6_responses: udp_server_stats.udp6_responses, - udp6_errors_handled: udp_server_stats.udp6_errors_handled, - }, + ProtocolMetrics { + // TCPv4 + tcp4_connections_handled: tcp4_announces_handled + tcp4_scrapes_handled, + tcp4_announces_handled, + tcp4_scrapes_handled, + // TCPv6 + tcp6_connections_handled: tcp6_announces_handled + tcp6_scrapes_handled, + tcp6_announces_handled, + tcp6_scrapes_handled, + // UDP + udp_requests_aborted, + udp_requests_banned, + udp_banned_ips_total, + udp_avg_connect_processing_time_ns, + udp_avg_announce_processing_time_ns, + udp_avg_scrape_processing_time_ns, + // UDPv4 + udp4_requests, + udp4_connections_handled, + udp4_announces_handled, + udp4_scrapes_handled, + udp4_responses, + udp4_errors_handled, + // UDPv6 + udp6_requests, + udp6_connections_handled, + udp6_announces_handled, + udp6_scrapes_handled, + udp6_responses, + udp6_errors_handled, } } +#[derive(Debug, PartialEq)] +pub struct TrackerLabeledMetrics { + pub metrics: MetricCollection, +} + +/// It returns all the [`TrackerLabeledMetrics`] +/// +/// # Panics +/// +/// Will panic if the metrics cannot be merged. This could happen if the +/// packages are producing duplicate metric names, for example. +pub async fn get_labeled_metrics( + in_memory_torrent_repository: Arc<InMemoryTorrentRepository>, + ban_service: Arc<RwLock<BanService>>, + swarms_stats_repository: Arc<torrust_tracker_swarm_coordination_registry::statistics::repository::Repository>, + tracker_core_stats_repository: Arc<bittorrent_tracker_core::statistics::repository::Repository>, + http_stats_repository: Arc<bittorrent_http_tracker_core::statistics::repository::Repository>, + udp_stats_repository: Arc<bittorrent_udp_tracker_core::statistics::repository::Repository>, + udp_server_stats_repository: Arc<udp_server_statistics::repository::Repository>, +) -> TrackerLabeledMetrics { + let _torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let _udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); + + let swarms_stats = swarms_stats_repository.get_metrics().await; + let tracker_core_stats = tracker_core_stats_repository.get_metrics().await; + let http_stats = http_stats_repository.get_stats().await; + let udp_stats_repository = udp_stats_repository.get_stats().await; + let udp_server_stats = udp_server_stats_repository.get_stats().await; + + // Merge all the metrics into a single collection + let mut metrics = MetricCollection::default(); + + metrics + .merge(&swarms_stats.metric_collection) + .expect("msg: failed to merge torrent repository metrics"); + metrics + .merge(&tracker_core_stats.metric_collection) + .expect("msg: failed to merge tracker core metrics"); + metrics + .merge(&http_stats.metric_collection) + .expect("msg: failed to merge HTTP core metrics"); + metrics + .merge(&udp_stats_repository.metric_collection) + .expect("failed to merge UDP core metrics"); + metrics + .merge(&udp_server_stats.metric_collection) + .expect("failed to merge UDP server metrics"); + + TrackerLabeledMetrics { metrics } +} + #[cfg(test)] mod tests { use std::sync::Arc; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_http_tracker_core::event::bus::EventBus; + use bittorrent_http_tracker_core::event::sender::Broadcaster; + use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; + use bittorrent_http_tracker_core::statistics::repository::Repository; + use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_tracker_core::{self}; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; + use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_tracker_test_helpers::configuration; - use crate::statistics::metrics::Metrics; + use crate::statistics::metrics::{ProtocolMetrics, TorrentsMetrics}; use crate::statistics::services::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Configuration { @@ -101,31 +213,38 @@ mod tests { #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { + let cancellation_token = CancellationToken::new(); + let config = tracker_configuration(); + let core_config = Arc::new(config.core.clone()); + + let swarm_coordination_registry_container = + Arc::new(SwarmCoordinationRegistryContainer::initialize(SenderStatus::Enabled)); + + let tracker_core_container = + TrackerCoreContainer::initialize_from(&core_config, &swarm_coordination_registry_container.clone()); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let _ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); // HTTP core stats - let (_http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_repository = Arc::new(http_stats_repository); + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_event_bus = Arc::new(EventBus::new( + config.core.tracker_usage_statistics.into(), + http_core_broadcaster.clone(), + )); - // UDP core stats - let (_udp_stats_event_sender, udp_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let udp_stats_repository = Arc::new(udp_stats_repository); + if config.core.tracker_usage_statistics { + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); + } // UDP server stats - let (_udp_server_stats_event_sender, udp_server_stats_repository) = - torrust_udp_tracker_server::statistics::setup::factory(config.core.tracker_usage_statistics); - let udp_server_stats_repository = Arc::new(udp_server_stats_repository); + let udp_server_stats_repository = Arc::new(torrust_udp_tracker_server::statistics::repository::Repository::new()); let tracker_metrics = get_metrics( - in_memory_torrent_repository.clone(), - ban_service.clone(), + tracker_core_container.in_memory_torrent_repository.clone(), + tracker_core_container.stats_repository.clone(), http_stats_repository.clone(), - udp_stats_repository.clone(), udp_server_stats_repository.clone(), ) .await; @@ -133,8 +252,8 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata::default(), - protocol_metrics: Metrics::default(), + torrents_metrics: TorrentsMetrics::default(), + protocol_metrics: ProtocolMetrics::default(), } ); } diff --git a/packages/server-lib/Cargo.toml b/packages/server-lib/Cargo.toml index b8514fbf4..fbd7a7a7f 100644 --- a/packages/server-lib/Cargo.toml +++ b/packages/server-lib/Cargo.toml @@ -4,7 +4,7 @@ description = "Common functionality used in all Torrust HTTP servers." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["lib", "server", "torrust"] +keywords = [ "lib", "server", "torrust" ] license.workspace = true name = "torrust-server-lib" publish.workspace = true @@ -14,9 +14,11 @@ rust-version.workspace = true version.workspace = true [dependencies] -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "display", "from" ] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +tower-http = { version = "0", features = [ "compression-full", "cors", "propagate-header", "request-id", "trace" ] } tracing = "0" [dev-dependencies] +rstest = "0.25.0" diff --git a/packages/server-lib/src/logging.rs b/packages/server-lib/src/logging.rs index c503cfd35..c63ba3caf 100644 --- a/packages/server-lib/src/logging.rs +++ b/packages/server-lib/src/logging.rs @@ -10,7 +10,7 @@ use tower_http::LatencyUnit; /// ```text /// 2024-06-25T12:36:25.025312Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 /// 2024-06-25T12:36:25.025445Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 -/// 2024-06-25T12:36:25.025527Z INFO API: Started on http://0.0.0.0:1212 +/// 2024-06-25T12:36:25.025527Z INFO API: Started on: http://0.0.0.0:1212 /// 2024-06-25T12:36:25.025580Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 /// ``` pub const STARTED_ON: &str = "Started on"; diff --git a/packages/server-lib/src/registar.rs b/packages/server-lib/src/registar.rs index 6b67188dc..efa94034b 100644 --- a/packages/server-lib/src/registar.rs +++ b/packages/server-lib/src/registar.rs @@ -1,12 +1,12 @@ //! Registar. Registers Services for Health Check. use std::collections::HashMap; -use std::net::SocketAddr; use std::sync::Arc; use derive_more::Constructor; use tokio::sync::Mutex; use tokio::task::JoinHandle; +use torrust_tracker_primitives::service_binding::ServiceBinding; /// A [`ServiceHeathCheckResult`] is returned by a completed health check. pub type ServiceHeathCheckResult = Result<String, String>; @@ -16,29 +16,30 @@ pub type ServiceHeathCheckResult = Result<String, String>; /// The `job` awaits a [`ServiceHeathCheckResult`]. #[derive(Debug, Constructor)] pub struct ServiceHealthCheckJob { - pub binding: SocketAddr, + pub service_binding: ServiceBinding, pub info: String, + pub service_type: String, pub job: JoinHandle<ServiceHeathCheckResult>, } /// The function specification [`FnSpawnServiceHeathCheck`]. /// /// A function fulfilling this specification will spawn a new [`ServiceHealthCheckJob`]. -pub type FnSpawnServiceHeathCheck = fn(&SocketAddr) -> ServiceHealthCheckJob; +pub type FnSpawnServiceHeathCheck = fn(&ServiceBinding) -> ServiceHealthCheckJob; /// A [`ServiceRegistration`] is provided to the [`Registar`] for registration. /// /// Each registration includes a function that fulfils the [`FnSpawnServiceHeathCheck`] specification. #[derive(Clone, Debug, Constructor)] pub struct ServiceRegistration { - binding: SocketAddr, + service_binding: ServiceBinding, check_fn: FnSpawnServiceHeathCheck, } impl ServiceRegistration { #[must_use] pub fn spawn_check(&self) -> ServiceHealthCheckJob { - (self.check_fn)(&self.binding) + (self.check_fn)(&self.service_binding) } } @@ -46,7 +47,7 @@ impl ServiceRegistration { pub type ServiceRegistrationForm = tokio::sync::oneshot::Sender<ServiceRegistration>; /// The [`ServiceRegistry`] contains each unique [`ServiceRegistration`] by it's [`SocketAddr`]. -pub type ServiceRegistry = Arc<Mutex<HashMap<SocketAddr, ServiceRegistration>>>; +pub type ServiceRegistry = Arc<Mutex<HashMap<ServiceBinding, ServiceRegistration>>>; /// The [`Registar`] manages the [`ServiceRegistry`]. #[derive(Clone, Debug)] @@ -89,7 +90,7 @@ impl Registar { let mut mutex = self.registry.lock().await; - mutex.insert(service_registration.binding, service_registration); + mutex.insert(service_registration.service_binding.clone(), service_registration); } /// Returns the [`ServiceRegistry`] of services diff --git a/packages/server-lib/src/signals.rs b/packages/server-lib/src/signals.rs index 63f7554c8..581729e57 100644 --- a/packages/server-lib/src/signals.rs +++ b/packages/server-lib/src/signals.rs @@ -1,5 +1,6 @@ //! This module contains functions to handle signals. use derive_more::Display; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::instrument; /// This is the message that the "launcher" spawned task sends to the main @@ -7,6 +8,7 @@ use tracing::instrument; /// #[derive(Debug)] pub struct Started { + pub service_binding: ServiceBinding, pub address: std::net::SocketAddr, } diff --git a/packages/swarm-coordination-registry/.gitignore b/packages/swarm-coordination-registry/.gitignore new file mode 100644 index 000000000..c9907ae11 --- /dev/null +++ b/packages/swarm-coordination-registry/.gitignore @@ -0,0 +1 @@ +/.coverage/ diff --git a/packages/swarm-coordination-registry/Cargo.toml b/packages/swarm-coordination-registry/Cargo.toml new file mode 100644 index 000000000..f9513d3c4 --- /dev/null +++ b/packages/swarm-coordination-registry/Cargo.toml @@ -0,0 +1,41 @@ +[package] +description = "A library that provides a repository of torrents files and their peers." +keywords = [ "library", "repository", "torrents" ] +name = "torrust-tracker-swarm-coordination-registry" +readme = "README.md" + +authors.workspace = true +categories.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +aquatic_udp_protocol = "0" +bittorrent-primitives = "0.1.0" +chrono = { version = "0", default-features = false, features = [ "clock" ] } +crossbeam-skiplist = "0" +futures = "0" +serde = { version = "1.0.219", features = [ "derive" ] } +thiserror = "2.0.12" +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } +tokio-util = "0.7.15" +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +tracing = "0" + +[dev-dependencies] +async-std = { version = "1", features = [ "attributes", "tokio1" ] } +criterion = { version = "0", features = [ "async_tokio" ] } +mockall = "0" +rand = "0" +rstest = "0" +torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/packages/swarm-coordination-registry/README.md b/packages/swarm-coordination-registry/README.md new file mode 100644 index 000000000..a8c55746b --- /dev/null +++ b/packages/swarm-coordination-registry/README.md @@ -0,0 +1,22 @@ +# Torrust Tracker Torrent Repository + +A library to provide a torrent repository to the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +Its main responsibilities include: + +- Managing Torrent Entries: It stores, retrieves, and manages torrent entries, which are torrents being tracked. +- Persistence: It supports lading tracked torrents from a persistent storage, ensuring that torrent data can be restored across restarts. +- Pagination and sorting: It provides paginated and stable/sorted access to torrent entries. +- Peer management: It manages peers associated with torrents, including removing inactive peers and handling torrents with no peers (peerless torrents). +- Policy handling: It supports different policies for handling torrents, such as persisting, removing, or custom policies for torrents with no peers. +- Metrics: It can provide metrics about the torrents, such as counts or statuses, likely for monitoring or statistics. + +This repo is a core component for managing the state and lifecycle of torrents and their peers in a BitTorrent tracker, with peer management, and flexible policies. + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-torrent-repository). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/swarm-coordination-registry/src/container.rs b/packages/swarm-coordination-registry/src/container.rs new file mode 100644 index 000000000..718e3ee52 --- /dev/null +++ b/packages/swarm-coordination-registry/src/container.rs @@ -0,0 +1,38 @@ +use std::sync::Arc; + +use torrust_tracker_events::bus::SenderStatus; + +use crate::event::bus::EventBus; +use crate::event::sender::Broadcaster; +use crate::event::{self}; +use crate::statistics::repository::Repository; +use crate::{statistics, Registry}; + +pub struct SwarmCoordinationRegistryContainer { + pub swarms: Arc<Registry>, + pub event_bus: Arc<event::bus::EventBus>, + pub stats_event_sender: event::sender::Sender, + pub stats_repository: Arc<statistics::repository::Repository>, +} + +impl SwarmCoordinationRegistryContainer { + #[must_use] + pub fn initialize(sender_status: SenderStatus) -> Self { + // // Swarm Coordination Registry Container stats + let broadcaster = Broadcaster::default(); + let stats_repository = Arc::new(Repository::new()); + + let event_bus = Arc::new(EventBus::new(sender_status, broadcaster.clone())); + + let stats_event_sender = event_bus.sender(); + + let swarms = Arc::new(Registry::new(stats_event_sender.clone())); + + Self { + swarms, + event_bus, + stats_event_sender, + stats_repository, + } + } +} diff --git a/packages/swarm-coordination-registry/src/event.rs b/packages/swarm-coordination-registry/src/event.rs new file mode 100644 index 000000000..65a65ce8c --- /dev/null +++ b/packages/swarm-coordination-registry/src/event.rs @@ -0,0 +1,111 @@ +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::{Peer, PeerAnnouncement}; + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Event { + TorrentAdded { + info_hash: InfoHash, + announcement: PeerAnnouncement, + }, + TorrentRemoved { + info_hash: InfoHash, + }, + PeerAdded { + info_hash: InfoHash, + peer: Peer, + }, + PeerRemoved { + info_hash: InfoHash, + peer: Peer, + }, + PeerUpdated { + info_hash: InfoHash, + old_peer: Peer, + new_peer: Peer, + }, + PeerDownloadCompleted { + info_hash: InfoHash, + peer: Peer, + }, +} + +pub mod sender { + use std::sync::Arc; + + use super::Event; + + pub type Sender = Option<Arc<dyn torrust_tracker_events::sender::Sender<Event = Event>>>; + pub type Broadcaster = torrust_tracker_events::broadcaster::Broadcaster<Event>; + + #[cfg(test)] + pub mod tests { + + use futures::future::{self, BoxFuture}; + use mockall::mock; + use mockall::predicate::eq; + use torrust_tracker_events::sender::{SendError, Sender}; + + use crate::event::Event; + + mock! { + pub EventSender {} + + impl Sender for EventSender { + type Event = Event; + + fn send(&self, event: Event) -> BoxFuture<'static,Option<Result<usize,SendError<Event> > > > ; + } + } + + pub fn expect_event(mock: &mut MockEventSender, event: Event) { + mock.expect_send() + .with(eq(event)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + } + + pub fn expect_event_sequence(mock: &mut MockEventSender, event: Vec<Event>) { + for e in event { + expect_event(mock, e); + } + } + } +} + +pub mod receiver { + use super::Event; + + pub type Receiver = Box<dyn torrust_tracker_events::receiver::Receiver<Event = Event>>; +} + +pub mod bus { + use crate::event::Event; + + pub type EventBus = torrust_tracker_events::bus::EventBus<Event>; +} + +#[cfg(test)] +pub mod test { + + use torrust_tracker_primitives::peer::Peer; + + use super::Event; + use crate::tests::sample_info_hash; + + #[test] + fn events_should_be_comparable() { + let info_hash = sample_info_hash(); + + let event1 = Event::TorrentAdded { + info_hash, + announcement: Peer::default(), + }; + + let event2 = Event::TorrentRemoved { info_hash }; + + let event1_clone = event1.clone(); + + assert!(event1 == event1_clone); + assert!(event1 != event2); + } +} diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs new file mode 100644 index 000000000..eb2721a0c --- /dev/null +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -0,0 +1,145 @@ +pub mod container; +pub mod event; +pub mod statistics; +pub mod swarm; + +use std::sync::Arc; + +use tokio::sync::Mutex; +use torrust_tracker_clock::clock; + +pub type Registry = swarm::registry::Registry; +pub type CoordinatorHandle = Arc<Mutex<Coordinator>>; +pub type Coordinator = swarm::coordinator::Coordinator; + +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + +pub const SWARM_COORDINATION_REGISTRY_LOG_TARGET: &str = "SWARM_COORDINATION_REGISTRY"; + +#[cfg(test)] +pub(crate) mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::<InfoHash>() + .expect("String should be a valid info hash") + } + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash_one() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::<InfoHash>() + .expect("String should be a valid info hash") + } + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash_alphabetically_ordered_after_sample_info_hash_one() -> InfoHash { + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1" // DevSkim: ignore DS173237 + .parse::<InfoHash>() + .expect("String should be a valid info hash") + } + + /// Sample peer whose state is not relevant for the tests. + #[must_use] + pub fn sample_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + #[must_use] + pub fn sample_peer_one() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000001"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + #[must_use] + pub fn sample_peer_two() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + #[must_use] + pub fn seeder() -> Peer { + complete_peer() + } + + #[must_use] + pub fn leecher() -> Peer { + incomplete_peer() + } + + /// A peer that counts as `complete` is swarm metadata + /// IMPORTANT!: it only counts if the it has been announce at least once before + /// announcing the `AnnounceEvent::Completed` event. + #[must_use] + pub fn complete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + /// A peer that counts as `incomplete` is swarm metadata + #[must_use] + pub fn incomplete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(1000), // Still bytes to download + event: AnnounceEvent::Started, + } + } +} diff --git a/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs b/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs new file mode 100644 index 000000000..cf814e810 --- /dev/null +++ b/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs @@ -0,0 +1,104 @@ +//! Job that runs a task on intervals to update peers' activity metrics. +use std::sync::Arc; + +use chrono::Utc; +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use tracing::instrument; + +use super::repository::Repository; +use crate::statistics::{SWARM_COORDINATION_REGISTRY_PEERS_INACTIVE_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_INACTIVE_TOTAL}; +use crate::{CurrentClock, Registry}; + +#[must_use] +#[instrument(skip(swarms, stats_repository))] +pub fn start_job( + swarms: &Arc<Registry>, + stats_repository: &Arc<Repository>, + inactivity_cutoff: DurationSinceUnixEpoch, +) -> JoinHandle<()> { + let weak_swarms = std::sync::Arc::downgrade(swarms); + let weak_stats_repository = std::sync::Arc::downgrade(stats_repository); + + let interval_in_secs = 15; // todo: make this configurable + + tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval_in_secs); + let mut interval = tokio::time::interval(interval); + interval.tick().await; + + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + tracing::info!("Stopping peers activity metrics update job (ctrl-c signal received) ..."); + break; + } + _ = interval.tick() => { + if let (Some(swarms), Some(stats_repository)) = (weak_swarms.upgrade(), weak_stats_repository.upgrade()) { + update_activity_metrics(interval_in_secs, &swarms, &stats_repository, inactivity_cutoff).await; + } else { + tracing::info!("Stopping peers activity metrics update job (can't upgrade weak pointers) ..."); + break; + } + } + } + } + }) +} + +async fn update_activity_metrics( + interval_in_secs: u64, + swarms: &Arc<Registry>, + stats_repository: &Arc<Repository>, + inactivity_cutoff: DurationSinceUnixEpoch, +) { + let start_time = Utc::now().time(); + + tracing::debug!( + "Updating peers and torrents activity metrics (executed every {} secs) ...", + interval_in_secs + ); + + let activity_metadata = swarms.get_activity_metadata(inactivity_cutoff).await; + + activity_metadata.log(); + + update_inactive_peers_total(stats_repository, activity_metadata.inactive_peers_total).await; + update_inactive_torrents_total(stats_repository, activity_metadata.inactive_torrents_total).await; + + tracing::debug!( + "Peers and torrents activity metrics updated in {} ms", + (Utc::now().time() - start_time).num_milliseconds() + ); +} + +async fn update_inactive_peers_total(stats_repository: &Arc<Repository>, inactive_peers_total: usize) { + #[allow(clippy::cast_precision_loss)] + let inactive_peers_total = inactive_peers_total as f64; + + let _unused = stats_repository + .set_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_INACTIVE_TOTAL), + &LabelSet::default(), + inactive_peers_total, + CurrentClock::now(), + ) + .await; +} + +async fn update_inactive_torrents_total(stats_repository: &Arc<Repository>, inactive_torrents_total: usize) { + #[allow(clippy::cast_precision_loss)] + let inactive_torrents_total = inactive_torrents_total as f64; + + let _unused = stats_repository + .set_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_INACTIVE_TOTAL), + &LabelSet::default(), + inactive_torrents_total, + CurrentClock::now(), + ) + .await; +} diff --git a/packages/swarm-coordination-registry/src/statistics/event/handler.rs b/packages/swarm-coordination-registry/src/statistics/event/handler.rs new file mode 100644 index 000000000..1d3f8f32c --- /dev/null +++ b/packages/swarm-coordination-registry/src/statistics/event/handler.rs @@ -0,0 +1,655 @@ +use std::sync::Arc; + +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::Event; +use crate::statistics::repository::Repository; +use crate::statistics::{ + SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_PEERS_COMPLETED_STATE_REVERTED_TOTAL, + SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL, SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL, + SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL, +}; + +#[allow(clippy::too_many_lines)] +pub async fn handle_event(event: Event, stats_repository: &Arc<Repository>, now: DurationSinceUnixEpoch) { + match event { + // Torrent events + Event::TorrentAdded { info_hash, .. } => { + tracing::debug!(info_hash = ?info_hash, "Torrent added",); + + let _unused = stats_repository + .increment_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL), + &LabelSet::default(), + now, + ) + .await; + + let _unused = stats_repository + .increment_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL), + &LabelSet::default(), + now, + ) + .await; + } + Event::TorrentRemoved { info_hash } => { + tracing::debug!(info_hash = ?info_hash, "Torrent removed",); + + let _unused = stats_repository + .decrement_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL), + &LabelSet::default(), + now, + ) + .await; + + let _unused = stats_repository + .increment_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL), + &LabelSet::default(), + now, + ) + .await; + } + + // Peer events + Event::PeerAdded { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer added", ); + + let label_set = label_set_for_peer(&peer); + + let _unused = stats_repository + .increment_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), + &label_set, + now, + ) + .await; + + let _unused = stats_repository + .increment_counter(&metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL), &label_set, now) + .await; + } + Event::PeerRemoved { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer removed", ); + + let label_set = label_set_for_peer(&peer); + + let _unused = stats_repository + .decrement_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), + &label_set, + now, + ) + .await; + + let _unused = stats_repository + .increment_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL), + &label_set, + now, + ) + .await; + } + Event::PeerUpdated { + info_hash, + old_peer, + new_peer, + } => { + tracing::debug!(info_hash = ?info_hash, old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated", ); + + // If the peer's role has changed, we need to adjust the number of + // connections + if old_peer.role() != new_peer.role() { + let _unused = stats_repository + .increment_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), + &label_set_for_peer(&new_peer), + now, + ) + .await; + + let _unused = stats_repository + .decrement_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), + &label_set_for_peer(&old_peer), + now, + ) + .await; + } + + // If the peer reverted from a completed state to any other state, + // we need to increment the counter for reverted completed. + if old_peer.is_completed() && !new_peer.is_completed() { + let _unused = stats_repository + .increment_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_COMPLETED_STATE_REVERTED_TOTAL), + &LabelSet::default(), + now, + ) + .await; + } + + // Regardless of the role change, we still need to increment the + // counter for updated peers. + let label_set = label_set_for_peer(&new_peer); + + let _unused = stats_repository + .increment_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL), + &label_set, + now, + ) + .await; + } + Event::PeerDownloadCompleted { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); + + let _unused: Result<(), torrust_tracker_metrics::metric_collection::Error> = stats_repository + .increment_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL), + &label_set_for_peer(&peer), + now, + ) + .await; + } + } +} + +/// Returns the label set to be included in the metrics for the given peer. +pub(crate) fn label_set_for_peer(peer: &Peer) -> LabelSet { + if peer.is_seeder() { + (label_name!("peer_role"), LabelValue::new("seeder")).into() + } else { + (label_name!("peer_role"), LabelValue::new("leecher")).into() + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use aquatic_udp_protocol::NumberOfBytes; + use torrust_tracker_metrics::label::LabelSet; + use torrust_tracker_metrics::metric::MetricName; + use torrust_tracker_primitives::peer::{Peer, PeerRole}; + + use crate::statistics::repository::Repository; + use crate::tests::{leecher, seeder}; + + fn make_peer(role: PeerRole) -> Peer { + match role { + PeerRole::Seeder => seeder(), + PeerRole::Leecher => leecher(), + } + } + + // It returns a peer with the opposite role of the given peer. + fn make_opposite_role_peer(peer: &Peer) -> Peer { + let mut opposite_role_peer = *peer; + + match peer.role() { + PeerRole::Seeder => { + opposite_role_peer.left = NumberOfBytes::new(1); + } + PeerRole::Leecher => { + opposite_role_peer.left = NumberOfBytes::new(0); + } + } + + opposite_role_peer + } + + pub async fn expect_counter_metric_to_be( + stats_repository: &Arc<Repository>, + metric_name: &MetricName, + label_set: &LabelSet, + expected_value: u64, + ) { + let value = get_counter_metric(stats_repository, metric_name, label_set).await; + assert_eq!(value.to_string(), expected_value.to_string()); + } + + async fn get_counter_metric(stats_repository: &Arc<Repository>, metric_name: &MetricName, label_set: &LabelSet) -> u64 { + stats_repository + .get_metrics() + .await + .metric_collection + .get_counter_value(metric_name, label_set) + .unwrap_or_else(|| panic!("Failed to get counter value for metric name '{metric_name}' and label set '{label_set}'")) + .value() + } + + async fn expect_gauge_metric_to_be( + stats_repository: &Arc<Repository>, + metric_name: &MetricName, + label_set: &LabelSet, + expected_value: f64, + ) { + let value = get_gauge_metric(stats_repository, metric_name, label_set).await; + assert_eq!(value.to_string(), expected_value.to_string()); + } + + async fn get_gauge_metric(stats_repository: &Arc<Repository>, metric_name: &MetricName, label_set: &LabelSet) -> f64 { + stats_repository + .get_metrics() + .await + .metric_collection + .get_gauge_value(metric_name, label_set) + .unwrap_or_else(|| panic!("Failed to get gauge value for metric name '{metric_name}' and label set '{label_set}'")) + .value() + } + + mod for_torrent_metrics { + + use std::sync::Arc; + + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::label::LabelSet; + use torrust_tracker_metrics::metric_name; + + use crate::event::Event; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::handler::tests::{expect_counter_metric_to_be, expect_gauge_metric_to_be}; + use crate::statistics::repository::Repository; + use crate::statistics::{ + SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL, + }; + use crate::tests::{sample_info_hash, sample_peer}; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_increment_the_number_of_torrents_when_a_torrent_added_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + handle_event( + Event::TorrentAdded { + info_hash: sample_info_hash(), + announcement: sample_peer(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be( + &stats_repository, + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL), + &LabelSet::default(), + 1.0, + ) + .await; + } + + #[tokio::test] + async fn it_should_decrement_the_number_of_torrents_when_a_torrent_removed_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL); + let label_set = LabelSet::default(); + + // Increment the gauge first to simulate a torrent being added. + stats_repository + .increment_gauge(&metric_name, &label_set, CurrentClock::now()) + .await + .unwrap(); + + handle_event( + Event::TorrentRemoved { + info_hash: sample_info_hash(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be(&stats_repository, &metric_name, &label_set, 0.0).await; + } + + #[tokio::test] + async fn it_should_increment_the_number_of_torrents_added_when_a_torrent_added_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + handle_event( + Event::TorrentAdded { + info_hash: sample_info_hash(), + announcement: sample_peer(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL), + &LabelSet::default(), + 1, + ) + .await; + } + + #[tokio::test] + async fn it_should_increment_the_number_of_torrents_removed_when_a_torrent_removed_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + handle_event( + Event::TorrentRemoved { + info_hash: sample_info_hash(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL), + &LabelSet::default(), + 1, + ) + .await; + } + } + + mod for_peer_metrics { + use std::sync::Arc; + + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::metric_name; + + use crate::event::Event; + use crate::statistics::event::handler::tests::expect_counter_metric_to_be; + use crate::statistics::event::handler::{handle_event, label_set_for_peer}; + use crate::statistics::repository::Repository; + use crate::statistics::{ + SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL, + SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL, + }; + use crate::tests::{sample_info_hash, sample_peer}; + use crate::CurrentClock; + + mod peer_connections_total { + + use std::sync::Arc; + + use rstest::rstest; + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::label::LabelValue; + use torrust_tracker_metrics::{label_name, metric_name}; + use torrust_tracker_primitives::peer::PeerRole; + + use crate::event::Event; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::handler::tests::{ + expect_gauge_metric_to_be, get_gauge_metric, make_opposite_role_peer, make_peer, + }; + use crate::statistics::repository::Repository; + use crate::statistics::SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL; + use crate::tests::sample_info_hash; + use crate::CurrentClock; + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_increment_the_number_of_peer_connections_when_a_peer_added_event_is_received( + #[case] role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let peer = make_peer(role); + + let stats_repository = Arc::new(Repository::new()); + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL); + let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); + + handle_event( + Event::PeerAdded { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be(&stats_repository, &metric_name, &label_set, 1.0).await; + } + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_decrement_the_number_of_peer_connections_when_a_peer_removed_event_is_received( + #[case] role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let peer = make_peer(role); + + let stats_repository = Arc::new(Repository::new()); + + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL); + let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); + + // Increment the gauge first to simulate a peer being added. + stats_repository + .increment_gauge(&metric_name, &label_set, CurrentClock::now()) + .await + .unwrap(); + + handle_event( + Event::PeerRemoved { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be(&stats_repository, &metric_name, &label_set, 0.0).await; + } + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_adjust_the_number_of_seeders_and_leechers_when_a_peer_updated_event_is_received_and_the_peer_changed_its_role( + #[case] old_role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + let old_peer = make_peer(old_role); + let new_peer = make_opposite_role_peer(&old_peer); + + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL); + let old_role_label_set = (label_name!("peer_role"), LabelValue::new(&old_peer.role().to_string())).into(); + let new_role_label_set = (label_name!("peer_role"), LabelValue::new(&new_peer.role().to_string())).into(); + + // Increment the gauge first by simulating a peer was added. + handle_event( + Event::PeerAdded { + info_hash: sample_info_hash(), + peer: old_peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let old_role_total = get_gauge_metric(&stats_repository, &metric_name, &old_role_label_set).await; + let new_role_total = 0.0; + + // The peer's role has changed, so we need to increment the new + // role and decrement the old one. + handle_event( + Event::PeerUpdated { + info_hash: sample_info_hash(), + old_peer, + new_peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + // The peer's role has changed, so the new role has incremented. + expect_gauge_metric_to_be(&stats_repository, &metric_name, &new_role_label_set, new_role_total + 1.0).await; + + // And the old role has decremented. + expect_gauge_metric_to_be(&stats_repository, &metric_name, &old_role_label_set, old_role_total - 1.0).await; + } + } + + #[tokio::test] + async fn it_should_increment_the_number_of_peers_added_when_a_peer_added_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + let peer = sample_peer(); + + handle_event( + Event::PeerAdded { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL), + &label_set_for_peer(&peer), + 1, + ) + .await; + } + + #[tokio::test] + async fn it_should_increment_the_number_of_peers_removed_when_a_peer_removed_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + let peer = sample_peer(); + + handle_event( + Event::PeerRemoved { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL), + &label_set_for_peer(&peer), + 1, + ) + .await; + } + + #[tokio::test] + async fn it_should_increment_the_number_of_peers_updated_when_a_peer_updated_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + let new_peer = sample_peer(); + + handle_event( + Event::PeerUpdated { + info_hash: sample_info_hash(), + old_peer: sample_peer(), + new_peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL), + &label_set_for_peer(&new_peer), + 1, + ) + .await; + } + + mod torrent_downloads_total { + + use std::sync::Arc; + + use rstest::rstest; + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::label::LabelValue; + use torrust_tracker_metrics::{label_name, metric_name}; + use torrust_tracker_primitives::peer::PeerRole; + + use crate::event::Event; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::handler::tests::{expect_counter_metric_to_be, make_peer}; + use crate::statistics::repository::Repository; + use crate::statistics::SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL; + use crate::tests::sample_info_hash; + use crate::CurrentClock; + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_increment_the_number_of_downloads_when_a_peer_downloaded_event_is_received( + #[case] role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let peer = make_peer(role); + + let stats_repository = Arc::new(Repository::new()); + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL); + let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); + + handle_event( + Event::PeerDownloadCompleted { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be(&stats_repository, &metric_name, &label_set, 1).await; + } + } + } +} diff --git a/packages/swarm-coordination-registry/src/statistics/event/listener.rs b/packages/swarm-coordination-registry/src/statistics/event/listener.rs new file mode 100644 index 000000000..b578d1284 --- /dev/null +++ b/packages/swarm-coordination-registry/src/statistics/event/listener.rs @@ -0,0 +1,58 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; + +use super::handler::handle_event; +use crate::event::receiver::Receiver; +use crate::statistics::repository::Repository; +use crate::{CurrentClock, SWARM_COORDINATION_REGISTRY_LOG_TARGET}; + +#[must_use] +pub fn run_event_listener( + receiver: Receiver, + cancellation_token: CancellationToken, + repository: &Arc<Repository>, +) -> JoinHandle<()> { + let stats_repository = repository.clone(); + + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Starting swarm coordination registry event listener"); + + tokio::spawn(async move { + dispatch_events(receiver, cancellation_token, stats_repository).await; + + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Swarm coordination registry listener finished"); + }) +} + +async fn dispatch_events(mut receiver: Receiver, cancellation_token: CancellationToken, stats_repository: Arc<Repository>) { + loop { + tokio::select! { + biased; + + () = cancellation_token.cancelled() => { + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Received cancellation request, shutting down swarm coordination registry event listener."); + break; + } + + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Swarm coordination registry event receiver closed."); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Swarm coordination registry event receiver lagged by {} events.", n); + } + } + } + } + } + } + } +} diff --git a/packages/swarm-coordination-registry/src/statistics/event/mod.rs b/packages/swarm-coordination-registry/src/statistics/event/mod.rs new file mode 100644 index 000000000..dae683398 --- /dev/null +++ b/packages/swarm-coordination-registry/src/statistics/event/mod.rs @@ -0,0 +1,2 @@ +pub mod handler; +pub mod listener; diff --git a/packages/swarm-coordination-registry/src/statistics/metrics.rs b/packages/swarm-coordination-registry/src/statistics/metrics.rs new file mode 100644 index 000000000..d62a1ba6e --- /dev/null +++ b/packages/swarm-coordination-registry/src/statistics/metrics.rs @@ -0,0 +1,63 @@ +use serde::Serialize; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +/// Metrics collected by the torrent repository. +#[derive(Debug, Clone, PartialEq, Default, Serialize)] +pub struct Metrics { + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increment_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increment_counter(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increment_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increment_gauge(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn decrement_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.decrement_gauge(metric_name, labels, now) + } +} diff --git a/packages/swarm-coordination-registry/src/statistics/mod.rs b/packages/swarm-coordination-registry/src/statistics/mod.rs new file mode 100644 index 000000000..a4bf4c018 --- /dev/null +++ b/packages/swarm-coordination-registry/src/statistics/mod.rs @@ -0,0 +1,117 @@ +pub mod activity_metrics_updater; +pub mod event; +pub mod metrics; +pub mod repository; + +use metrics::Metrics; +use torrust_tracker_metrics::metric::description::MetricDescription; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::unit::Unit; + +// Torrent metrics + +const SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL: &str = "swarm_coordination_registry_torrents_added_total"; +const SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL: &str = "swarm_coordination_registry_torrents_removed_total"; + +const SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL: &str = "swarm_coordination_registry_torrents_total"; +const SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL: &str = "swarm_coordination_registry_torrents_downloads_total"; +const SWARM_COORDINATION_REGISTRY_TORRENTS_INACTIVE_TOTAL: &str = "swarm_coordination_registry_torrents_inactive_total"; + +// Peers metrics + +const SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL: &str = "swarm_coordination_registry_peers_added_total"; +const SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL: &str = "swarm_coordination_registry_peers_removed_total"; +const SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL: &str = "swarm_coordination_registry_peers_updated_total"; + +const SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL: &str = "swarm_coordination_registry_peer_connections_total"; +const SWARM_COORDINATION_REGISTRY_UNIQUE_PEERS_TOTAL: &str = "swarm_coordination_registry_unique_peers_total"; // todo: not implemented yet +const SWARM_COORDINATION_REGISTRY_PEERS_INACTIVE_TOTAL: &str = "swarm_coordination_registry_peers_inactive_total"; +const SWARM_COORDINATION_REGISTRY_PEERS_COMPLETED_STATE_REVERTED_TOTAL: &str = + "swarm_coordination_registry_peers_completed_state_reverted_total"; + +#[must_use] +pub fn describe_metrics() -> Metrics { + let mut metrics = Metrics::default(); + + // Torrent metrics + + metrics.metric_collection.describe_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("The total number of torrents added.")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("The total number of torrents removed.")), + ); + + metrics.metric_collection.describe_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("The total number of torrents.")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("The total number of torrent downloads.")), + ); + + metrics.metric_collection.describe_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_INACTIVE_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("The total number of inactive torrents.")), + ); + + // Peers metrics + + metrics.metric_collection.describe_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("The total number of peers added.")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("The total number of peers removed.")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("The total number of peers updated.")), + ); + + metrics.metric_collection.describe_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new( + "The total number of peer connections (one connection per torrent).", + )), + ); + + metrics.metric_collection.describe_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_UNIQUE_PEERS_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("The total number of unique peers.")), + ); + + metrics.metric_collection.describe_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_INACTIVE_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("The total number of inactive peers.")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_COMPLETED_STATE_REVERTED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new( + "The total number of peers whose completed state was reverted.", + )), + ); + + metrics +} diff --git a/packages/swarm-coordination-registry/src/statistics/repository.rs b/packages/swarm-coordination-registry/src/statistics/repository.rs new file mode 100644 index 000000000..fe1292d00 --- /dev/null +++ b/packages/swarm-coordination-registry/src/statistics/repository.rs @@ -0,0 +1,132 @@ +use std::sync::Arc; + +use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::Error; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::describe_metrics; +use super::metrics::Metrics; + +/// A repository for the torrent repository metrics. +#[derive(Clone)] +pub struct Repository { + pub stats: Arc<RwLock<Metrics>>, +} + +impl Default for Repository { + fn default() -> Self { + Self::new() + } +} + +impl Repository { + #[must_use] + pub fn new() -> Self { + let stats = Arc::new(RwLock::new(describe_metrics())); + + Self { stats } + } + + pub async fn get_metrics(&self) -> RwLockReadGuard<'_, Metrics> { + self.stats.read().await + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the counter. + pub async fn increment_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.increment_counter(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to increment the counter: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// set the gauge. + pub async fn set_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.set_gauge(metric_name, labels, value, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to set the gauge: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the gauge. + pub async fn increment_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.increment_gauge(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to increment the gauge: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// decrement the gauge. + pub async fn decrement_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.decrement_gauge(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to decrement the gauge: {}", err), + } + + result + } +} diff --git a/packages/swarm-coordination-registry/src/swarm/coordinator.rs b/packages/swarm-coordination-registry/src/swarm/coordinator.rs new file mode 100644 index 000000000..433ab9d32 --- /dev/null +++ b/packages/swarm-coordination-registry/src/swarm/coordinator.rs @@ -0,0 +1,1040 @@ +//! A swarm is a collection of peers that are all trying to download the same +//! torrent. +use std::collections::BTreeMap; +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::AnnounceEvent; +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::peer::{self, Peer, PeerAnnouncement}; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::sender::Sender; +use crate::event::Event; + +#[derive(Clone)] +pub struct Coordinator { + info_hash: InfoHash, + peers: BTreeMap<SocketAddr, Arc<PeerAnnouncement>>, + metadata: SwarmMetadata, + event_sender: Sender, +} + +impl Coordinator { + #[must_use] + pub fn new(info_hash: &InfoHash, downloaded: u32, event_sender: Sender) -> Self { + Self { + info_hash: *info_hash, + peers: BTreeMap::new(), + metadata: SwarmMetadata::new(downloaded, 0, 0), + event_sender, + } + } + + pub async fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) { + let _previous_peer = match peer::ReadInfo::get_event(incoming_announce) { + AnnounceEvent::Started | AnnounceEvent::None | AnnounceEvent::Completed => { + self.upsert_peer(Arc::new(*incoming_announce)).await + } + AnnounceEvent::Stopped => self.remove_peer(&incoming_announce.peer_addr).await, + }; + } + + pub async fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) -> usize { + let peers_to_remove = self.inactive_peers(current_cutoff); + + for peer_addr in &peers_to_remove { + self.remove_peer(peer_addr).await; + } + + peers_to_remove.len() + } + + #[must_use] + pub fn get(&self, peer_addr: &SocketAddr) -> Option<&Arc<Peer>> { + self.peers.get(peer_addr) + } + + #[must_use] + pub fn peers(&self, limit: Option<usize>) -> Vec<Arc<Peer>> { + match limit { + Some(limit) => self.peers.values().take(limit).cloned().collect(), + None => self.peers.values().cloned().collect(), + } + } + + #[must_use] + pub fn peers_excluding(&self, peer_addr: &SocketAddr, limit: Option<usize>) -> Vec<Arc<peer::Peer>> { + match limit { + Some(limit) => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *peer_addr) + // Limit the number of peers on the result + .take(limit) + .cloned() + .collect(), + None => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *peer_addr) + .cloned() + .collect(), + } + } + + #[must_use] + pub fn metadata(&self) -> SwarmMetadata { + self.metadata + } + + /// Returns the number of seeders and leechers in the swarm. + /// + /// # Panics + /// + /// This function will panic if the `complete` or `incomplete` fields in the + /// `metadata` field cannot be converted to `usize`. + #[must_use] + pub fn seeders_and_leechers(&self) -> (usize, usize) { + let seeders = self + .metadata + .complete + .try_into() + .expect("Failed to convert 'complete' (seeders) count to usize"); + let leechers = self + .metadata + .incomplete + .try_into() + .expect("Failed to convert 'incomplete' (leechers) count to usize"); + + (seeders, leechers) + } + + #[must_use] + pub fn count_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> usize { + self.peers + .iter() + .filter(|(_, peer)| peer::ReadInfo::get_updated(&**peer) <= current_cutoff) + .count() + } + + #[must_use] + pub fn get_activity_metadata(&self, current_cutoff: DurationSinceUnixEpoch) -> ActivityMetadata { + let inactive_peers_total = self.count_inactive_peers(current_cutoff); + + let active_peers_total = self.len() - inactive_peers_total; + + let is_active = active_peers_total > 0; + + ActivityMetadata::new(is_active, active_peers_total, inactive_peers_total) + } + + #[must_use] + pub fn len(&self) -> usize { + self.peers.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.peers.is_empty() + } + + #[must_use] + pub fn is_peerless(&self) -> bool { + self.is_empty() + } + + /// Returns true if the swarm meets the retention policy, meaning that + /// it should be kept in the list of swarms. + #[must_use] + pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + !self.should_be_removed(policy) + } + + async fn upsert_peer(&mut self, incoming_announce: Arc<PeerAnnouncement>) -> Option<Arc<Peer>> { + let announcement = incoming_announce.clone(); + + if let Some(previous_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { + let downloads_increased = self.update_metadata_on_update(&previous_announce, &announcement); + + self.trigger_peer_updated_event(&previous_announce, &announcement).await; + + if downloads_increased { + self.trigger_peer_download_completed_event(&announcement).await; + } + + Some(previous_announce) + } else { + self.update_metadata_on_insert(&announcement); + + self.trigger_peer_added_event(&announcement).await; + + None + } + } + + async fn remove_peer(&mut self, peer_addr: &SocketAddr) -> Option<Arc<Peer>> { + if let Some(old_peer) = self.peers.remove(peer_addr) { + self.update_metadata_on_removal(&old_peer); + + self.trigger_peer_removed_event(&old_peer).await; + + Some(old_peer) + } else { + None + } + } + + #[must_use] + fn inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Vec<SocketAddr> { + self.peers + .iter() + .filter(|(_, peer)| peer::ReadInfo::get_updated(&**peer) <= current_cutoff) + .map(|(addr, _)| *addr) + .collect() + } + + /// Returns true if the swarm should be removed according to the retention + /// policy. + fn should_be_removed(&self, policy: &TrackerPolicy) -> bool { + policy.remove_peerless_torrents && self.is_empty() + } + + fn update_metadata_on_insert(&mut self, added_peer: &Arc<PeerAnnouncement>) { + if added_peer.is_seeder() { + self.metadata.complete += 1; + } else { + self.metadata.incomplete += 1; + } + } + + fn update_metadata_on_removal(&mut self, removed_peer: &Arc<Peer>) { + if removed_peer.is_seeder() { + self.metadata.complete -= 1; + } else { + self.metadata.incomplete -= 1; + } + } + + fn update_metadata_on_update( + &mut self, + previous_announce: &Arc<PeerAnnouncement>, + new_announce: &Arc<PeerAnnouncement>, + ) -> bool { + let mut downloads_increased = false; + + if previous_announce.role() != new_announce.role() { + if new_announce.is_seeder() { + self.metadata.complete += 1; + self.metadata.incomplete -= 1; + } else { + self.metadata.complete -= 1; + self.metadata.incomplete += 1; + } + } + + if new_announce.is_completed() && !previous_announce.is_completed() { + self.metadata.downloaded += 1; + downloads_increased = true; + } + + downloads_increased + } + + async fn trigger_peer_added_event(&self, announcement: &Arc<PeerAnnouncement>) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerAdded { + info_hash: self.info_hash, + peer: *announcement.clone(), + }) + .await; + } + } + + async fn trigger_peer_removed_event(&self, old_peer: &Arc<Peer>) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerRemoved { + info_hash: self.info_hash, + peer: *old_peer.clone(), + }) + .await; + } + } + + async fn trigger_peer_updated_event(&self, old_announce: &Arc<PeerAnnouncement>, new_announce: &Arc<PeerAnnouncement>) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerUpdated { + info_hash: self.info_hash, + old_peer: *old_announce.clone(), + new_peer: *new_announce.clone(), + }) + .await; + } + } + + async fn trigger_peer_download_completed_event(&self, new_announce: &Arc<PeerAnnouncement>) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerDownloadCompleted { + info_hash: self.info_hash, + peer: *new_announce.clone(), + }) + .await; + } + } +} + +#[derive(Clone)] +pub struct ActivityMetadata { + /// Indicates if the swarm is active. It's inactive if there are no active + /// peers. + pub is_active: bool, + + /// The number of active peers in the swarm. + pub active_peers_total: usize, + + /// The number of inactive peers in the swarm. + pub inactive_peers_total: usize, +} + +impl ActivityMetadata { + #[must_use] + pub fn new(is_active: bool, active_peers_total: usize, inactive_peers_total: usize) -> Self { + Self { + is_active, + active_peers_total, + inactive_peers_total, + } + } +} + +#[cfg(test)] +mod tests { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::PeerId; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::swarm::coordinator::Coordinator; + use crate::tests::sample_info_hash; + + #[test] + fn it_should_be_empty_when_no_peers_have_been_inserted() { + let swarm = Coordinator::new(&sample_info_hash(), 0, None); + + assert!(swarm.is_empty()); + } + + #[test] + fn it_should_have_zero_length_when_no_peers_have_been_inserted() { + let swarm = Coordinator::new(&sample_info_hash(), 0, None); + + assert_eq!(swarm.len(), 0); + } + + #[tokio::test] + async fn it_should_allow_inserting_a_new_peer() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let peer = PeerBuilder::default().build(); + + assert_eq!(swarm.upsert_peer(peer.into()).await, None); + } + + #[tokio::test] + async fn it_should_allow_updating_a_preexisting_peer() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let peer = PeerBuilder::default().build(); + + swarm.upsert_peer(peer.into()).await; + + assert_eq!(swarm.upsert_peer(peer.into()).await, Some(Arc::new(peer))); + } + + #[tokio::test] + async fn it_should_allow_getting_all_peers() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let peer = PeerBuilder::default().build(); + + swarm.upsert_peer(peer.into()).await; + + assert_eq!(swarm.peers(None), [Arc::new(peer)]); + } + + #[tokio::test] + async fn it_should_allow_getting_one_peer_by_id() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let peer = PeerBuilder::default().build(); + + swarm.upsert_peer(peer.into()).await; + + assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); + } + + #[tokio::test] + async fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let peer = PeerBuilder::default().build(); + + swarm.upsert_peer(peer.into()).await; + + assert_eq!(swarm.len(), 1); + } + + #[tokio::test] + async fn it_should_decrease_the_number_of_peers_after_removing_one() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let peer = PeerBuilder::default().build(); + + swarm.upsert_peer(peer.into()).await; + + swarm.remove_peer(&peer.peer_addr).await; + + assert!(swarm.is_empty()); + } + + #[tokio::test] + async fn it_should_allow_removing_an_existing_peer() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let peer = PeerBuilder::default().build(); + + swarm.upsert_peer(peer.into()).await; + + let old = swarm.remove_peer(&peer.peer_addr).await; + + assert_eq!(old, Some(Arc::new(peer))); + assert_eq!(swarm.get(&peer.peer_addr), None); + } + + #[tokio::test] + async fn it_should_allow_removing_a_non_existing_peer() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let peer = PeerBuilder::default().build(); + + assert_eq!(swarm.remove_peer(&peer.peer_addr).await, None); + } + + #[tokio::test] + async fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let peer1 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) + .build(); + swarm.upsert_peer(peer1.into()).await; + + let peer2 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) + .build(); + swarm.upsert_peer(peer2.into()).await; + + assert_eq!(swarm.peers_excluding(&peer2.peer_addr, None), [Arc::new(peer1)]); + } + + #[tokio::test] + async fn it_should_count_inactive_peers() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let one_second = DurationSinceUnixEpoch::new(1, 0); + + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + swarm.upsert_peer(peer.into()).await; + + let inactive_peers_total = swarm.count_inactive_peers(last_update_time + one_second); + + assert_eq!(inactive_peers_total, 1); + } + + #[tokio::test] + async fn it_should_remove_inactive_peers() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let one_second = DurationSinceUnixEpoch::new(1, 0); + + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + swarm.upsert_peer(peer.into()).await; + + // Remove peers not updated since one second after inserting the peer + swarm.remove_inactive(last_update_time + one_second).await; + + assert_eq!(swarm.len(), 0); + } + + #[tokio::test] + async fn it_should_not_remove_active_peers() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let one_second = DurationSinceUnixEpoch::new(1, 0); + + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + swarm.upsert_peer(peer.into()).await; + + // Remove peers not updated since one second before inserting the peer. + swarm.remove_inactive(last_update_time.checked_sub(one_second).unwrap()).await; + + assert_eq!(swarm.len(), 1); + } + + mod for_retaining_policy { + + use torrust_tracker_configuration::TrackerPolicy; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + + use crate::tests::sample_info_hash; + use crate::Coordinator; + + fn empty_swarm() -> Coordinator { + Coordinator::new(&sample_info_hash(), 0, None) + } + + async fn not_empty_swarm() -> Coordinator { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + swarm.upsert_peer(PeerBuilder::default().build().into()).await; + swarm + } + + async fn not_empty_swarm_with_downloads() -> Coordinator { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let mut peer = PeerBuilder::leecher().build(); + + swarm.upsert_peer(peer.into()).await; + + peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; + + swarm.upsert_peer(peer.into()).await; + + assert!(swarm.metadata().downloads() > 0); + + swarm + } + + fn remove_peerless_torrents_policy() -> TrackerPolicy { + TrackerPolicy { + remove_peerless_torrents: true, + ..Default::default() + } + } + + fn don_not_remove_peerless_torrents_policy() -> TrackerPolicy { + TrackerPolicy { + remove_peerless_torrents: false, + ..Default::default() + } + } + + mod when_removing_peerless_torrents_is_enabled { + + use torrust_tracker_configuration::TrackerPolicy; + + use crate::swarm::coordinator::tests::for_retaining_policy::{ + empty_swarm, not_empty_swarm, not_empty_swarm_with_downloads, remove_peerless_torrents_policy, + }; + + #[test] + fn it_should_be_removed_if_the_swarm_is_empty() { + assert!(empty_swarm().should_be_removed(&remove_peerless_torrents_policy())); + } + + #[tokio::test] + async fn it_should_not_be_removed_is_the_swarm_is_not_empty() { + assert!(!not_empty_swarm().await.should_be_removed(&remove_peerless_torrents_policy())); + } + + #[tokio::test] + async fn it_should_not_be_removed_even_if_the_swarm_is_empty_if_we_need_to_track_stats_for_downloads_and_there_has_been_downloads( + ) { + let policy = TrackerPolicy { + remove_peerless_torrents: true, + persistent_torrent_completed_stat: true, + ..Default::default() + }; + + assert!(!not_empty_swarm_with_downloads().await.should_be_removed(&policy)); + } + } + + mod when_removing_peerless_torrents_is_disabled { + + use crate::swarm::coordinator::tests::for_retaining_policy::{ + don_not_remove_peerless_torrents_policy, empty_swarm, not_empty_swarm, + }; + + #[test] + fn it_should_not_be_removed_even_if_the_swarm_is_empty() { + assert!(!empty_swarm().should_be_removed(&don_not_remove_peerless_torrents_policy())); + } + + #[tokio::test] + async fn it_should_not_be_removed_is_the_swarm_is_not_empty() { + assert!(!not_empty_swarm() + .await + .should_be_removed(&don_not_remove_peerless_torrents_policy())); + } + } + } + + #[tokio::test] + async fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let peer1 = PeerBuilder::default() + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) + .build(); + swarm.upsert_peer(peer1.into()).await; + + let peer2 = PeerBuilder::default() + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) + .build(); + swarm.upsert_peer(peer2.into()).await; + + assert_eq!(swarm.len(), 2); + } + + #[tokio::test] + async fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + // When that happens the peer ID will be changed in the swarm. + // In practice, it's like if the peer had changed its ID. + + let peer1 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) + .build(); + swarm.upsert_peer(peer1.into()).await; + + let peer2 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) + .build(); + swarm.upsert_peer(peer2.into()).await; + + assert_eq!(swarm.len(), 1); + } + + #[tokio::test] + async fn it_should_return_the_swarm_metadata() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + swarm.upsert_peer(seeder.into()).await; + swarm.upsert_peer(leecher.into()).await; + + assert_eq!( + swarm.metadata(), + SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 1, + } + ); + } + + #[tokio::test] + async fn it_should_return_the_number_of_seeders_in_the_list() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + swarm.upsert_peer(seeder.into()).await; + swarm.upsert_peer(leecher.into()).await; + + let (seeders, _leechers) = swarm.seeders_and_leechers(); + + assert_eq!(seeders, 1); + } + + #[tokio::test] + async fn it_should_return_the_number_of_leechers_in_the_list() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + swarm.upsert_peer(seeder.into()).await; + swarm.upsert_peer(leecher.into()).await; + + let (_seeders, leechers) = swarm.seeders_and_leechers(); + + assert_eq!(leechers, 1); + } + + #[tokio::test] + async fn it_should_be_a_peerless_swarm_when_it_does_not_contain_any_peers() { + let swarm = Coordinator::new(&sample_info_hash(), 0, None); + assert!(swarm.is_peerless()); + } + + mod updating_the_swarm_metadata { + + mod when_a_new_peer_is_added { + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + + use crate::swarm::coordinator::Coordinator; + use crate::tests::sample_info_hash; + + #[tokio::test] + async fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let leechers = swarm.metadata().leechers(); + + let leecher = PeerBuilder::leecher().build(); + + swarm.upsert_peer(leecher.into()).await; + + assert_eq!(swarm.metadata().leechers(), leechers + 1); + } + + #[tokio::test] + async fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let seeders = swarm.metadata().seeders(); + + let seeder = PeerBuilder::seeder().build(); + + swarm.upsert_peer(seeder.into()).await; + + assert_eq!(swarm.metadata().seeders(), seeders + 1); + } + + #[tokio::test] + async fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( + ) { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let downloads = swarm.metadata().downloads(); + + let seeder = PeerBuilder::seeder().build(); + + swarm.upsert_peer(seeder.into()).await; + + assert_eq!(swarm.metadata().downloads(), downloads); + } + } + + mod when_a_peer_is_removed { + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + + use crate::swarm::coordinator::Coordinator; + use crate::tests::sample_info_hash; + + #[tokio::test] + async fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let leecher = PeerBuilder::leecher().build(); + + swarm.upsert_peer(leecher.into()).await; + + let leechers = swarm.metadata().leechers(); + + swarm.remove_peer(&leecher.peer_addr).await; + + assert_eq!(swarm.metadata().leechers(), leechers - 1); + } + + #[tokio::test] + async fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let seeder = PeerBuilder::seeder().build(); + + swarm.upsert_peer(seeder.into()).await; + + let seeders = swarm.metadata().seeders(); + + swarm.remove_peer(&seeder.peer_addr).await; + + assert_eq!(swarm.metadata().seeders(), seeders - 1); + } + } + + mod when_a_peer_is_removed_due_to_inactivity { + use std::time::Duration; + + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + + use crate::swarm::coordinator::Coordinator; + use crate::tests::sample_info_hash; + + #[tokio::test] + async fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let leecher = PeerBuilder::leecher().build(); + + swarm.upsert_peer(leecher.into()).await; + + let leechers = swarm.metadata().leechers(); + + swarm.remove_inactive(leecher.updated + Duration::from_secs(1)).await; + + assert_eq!(swarm.metadata().leechers(), leechers - 1); + } + + #[tokio::test] + async fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let seeder = PeerBuilder::seeder().build(); + + swarm.upsert_peer(seeder.into()).await; + + let seeders = swarm.metadata().seeders(); + + swarm.remove_inactive(seeder.updated + Duration::from_secs(1)).await; + + assert_eq!(swarm.metadata().seeders(), seeders - 1); + } + } + + mod for_changes_in_existing_peers { + use aquatic_udp_protocol::NumberOfBytes; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + + use crate::swarm::coordinator::Coordinator; + use crate::tests::sample_info_hash; + + #[tokio::test] + async fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let mut peer = PeerBuilder::leecher().build(); + + swarm.upsert_peer(peer.into()).await; + + let leechers = swarm.metadata().leechers(); + let seeders = swarm.metadata().seeders(); + + peer.left = NumberOfBytes::new(0); // Convert to seeder + + swarm.upsert_peer(peer.into()).await; + + assert_eq!(swarm.metadata().seeders(), seeders + 1); + assert_eq!(swarm.metadata().leechers(), leechers - 1); + } + + #[tokio::test] + async fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let mut peer = PeerBuilder::seeder().build(); + + swarm.upsert_peer(peer.into()).await; + + let leechers = swarm.metadata().leechers(); + let seeders = swarm.metadata().seeders(); + + peer.left = NumberOfBytes::new(10); // Convert to leecher + + swarm.upsert_peer(peer.into()).await; + + assert_eq!(swarm.metadata().leechers(), leechers + 1); + assert_eq!(swarm.metadata().seeders(), seeders - 1); + } + + #[tokio::test] + async fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let mut peer = PeerBuilder::leecher().build(); + + swarm.upsert_peer(peer.into()).await; + + let downloads = swarm.metadata().downloads(); + + peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; + + swarm.upsert_peer(peer.into()).await; + + assert_eq!(swarm.metadata().downloads(), downloads + 1); + } + + #[tokio::test] + async fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); + + let mut peer = PeerBuilder::leecher().build(); + + swarm.upsert_peer(peer.into()).await; + + let downloads = swarm.metadata().downloads(); + + peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; + + swarm.upsert_peer(peer.into()).await; + + swarm.upsert_peer(peer.into()).await; + + assert_eq!(swarm.metadata().downloads(), downloads + 1); + } + } + } + + mod triggering_events { + + use std::sync::Arc; + + use aquatic_udp_protocol::AnnounceEvent::Started; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; + use crate::event::Event; + use crate::swarm::coordinator::Coordinator; + use crate::tests::sample_info_hash; + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_new_peer_is_added() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence(&mut event_sender_mock, vec![Event::PeerAdded { info_hash, peer }]); + + let mut swarm = Coordinator::new(&sample_info_hash(), 0, Some(Arc::new(event_sender_mock))); + + swarm.upsert_peer(peer.into()).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_is_directly_removed() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![Event::PeerAdded { info_hash, peer }, Event::PeerRemoved { info_hash, peer }], + ); + + let mut swarm = Coordinator::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + swarm.upsert_peer(peer.into()).await; + + swarm.remove_peer(&peer.peer_addr).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_is_removed_due_to_inactivity() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![Event::PeerAdded { info_hash, peer }, Event::PeerRemoved { info_hash, peer }], + ); + + let mut swarm = Coordinator::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + swarm.upsert_peer(peer.into()).await; + + // Peers not updated after this time will be removed + let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); + + swarm.remove_inactive(current_cutoff).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_is_updated() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().with_event(Started).build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::PeerAdded { info_hash, peer }, + Event::PeerUpdated { + info_hash, + old_peer: peer, + new_peer: peer, + }, + ], + ); + + let mut swarm = Coordinator::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + swarm.upsert_peer(peer.into()).await; + + // Update the peer + swarm.upsert_peer(peer.into()).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_completes_a_download() { + let info_hash = sample_info_hash(); + let started_peer = PeerBuilder::leecher().with_event(Started).build(); + let completed_peer = started_peer.into_completed(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::PeerAdded { + info_hash, + peer: started_peer, + }, + Event::PeerUpdated { + info_hash, + old_peer: started_peer, + new_peer: completed_peer, + }, + Event::PeerDownloadCompleted { + info_hash, + peer: completed_peer, + }, + ], + ); + + let mut swarm = Coordinator::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + swarm.upsert_peer(started_peer.into()).await; + + // Announce as completed + swarm.upsert_peer(completed_peer.into()).await; + } + } +} diff --git a/packages/swarm-coordination-registry/src/swarm/mod.rs b/packages/swarm-coordination-registry/src/swarm/mod.rs new file mode 100644 index 000000000..925ae4948 --- /dev/null +++ b/packages/swarm-coordination-registry/src/swarm/mod.rs @@ -0,0 +1,2 @@ +pub mod coordinator; +pub mod registry; diff --git a/packages/swarm-coordination-registry/src/swarm/registry.rs b/packages/swarm-coordination-registry/src/swarm/registry.rs new file mode 100644 index 000000000..c8e98f307 --- /dev/null +++ b/packages/swarm-coordination-registry/src/swarm/registry.rs @@ -0,0 +1,1447 @@ +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use crossbeam_skiplist::SkipMap; +use tokio::sync::Mutex; +use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; + +use crate::event::sender::Sender; +use crate::event::Event; +use crate::swarm::coordinator::Coordinator; +use crate::CoordinatorHandle; + +#[derive(Default)] +pub struct Registry { + swarms: SkipMap<InfoHash, CoordinatorHandle>, + event_sender: Sender, +} + +impl Registry { + #[must_use] + pub fn new(event_sender: Sender) -> Self { + Self { + swarms: SkipMap::new(), + event_sender, + } + } + + /// Upsert a peer into the swarm of a torrent. + /// + /// Optionally, it can also preset the number of downloads of the torrent + /// only if it's the first time the torrent is being inserted. + /// + /// # Arguments + /// + /// * `info_hash` - The info hash of the torrent. + /// * `peer` - The peer to upsert. + /// * `opt_persistent_torrent` - The optional persisted data about a torrent + /// (number of downloads for the torrent). + /// + /// # Returns + /// + /// Returns `true` if the number of downloads was increased because the peer + /// completed the download. + /// + /// # Errors + /// + /// This function panics if the lock for the swarm handle cannot be acquired. + pub async fn handle_announcement( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + opt_persistent_torrent: Option<NumberOfDownloads>, + ) -> Result<(), Error> { + let swarm_handle = match self.swarms.get(info_hash) { + None => { + let number_of_downloads = opt_persistent_torrent.unwrap_or_default(); + + let new_swarm_handle = + CoordinatorHandle::new(Coordinator::new(info_hash, number_of_downloads, self.event_sender.clone()).into()); + + let new_swarm_handle = self.swarms.get_or_insert(*info_hash, new_swarm_handle); + + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::TorrentAdded { + info_hash: *info_hash, + announcement: *peer, + }) + .await; + } + + new_swarm_handle + } + Some(existing_swarm_handle) => existing_swarm_handle, + }; + + let mut swarm = swarm_handle.value().lock().await; + + swarm.handle_announcement(peer).await; + + Ok(()) + } + + /// Inserts a new swarm. Only used for testing purposes. + pub fn insert(&self, info_hash: &InfoHash, swarm: Coordinator) { + // code-review: swarms builder? or constructor from vec? + // It's only used for testing purposes. It allows to pre-define the + // initial state of the swarm without having to go through the upsert + // process. + + let swarm_handle = Arc::new(Mutex::new(swarm)); + + self.swarms.insert(*info_hash, swarm_handle); + + // IMPORTANT: Notice this does not send an event because is used only + // for testing purposes. The event is sent only when the torrent is + // announced for the first time. + } + + /// Removes a torrent entry from the repository. + /// + /// # Returns + /// + /// An `Option` containing the removed torrent entry if it existed. + #[must_use] + pub async fn remove(&self, key: &InfoHash) -> Option<CoordinatorHandle> { + let swarm_handle = self.swarms.remove(key).map(|entry| entry.value().clone()); + + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender.send(Event::TorrentRemoved { info_hash: *key }).await; + } + + swarm_handle + } + + /// Retrieves a tracked torrent handle by its infohash. + /// + /// # Returns + /// + /// An `Option` containing the tracked torrent handle if found. + #[must_use] + pub fn get(&self, key: &InfoHash) -> Option<CoordinatorHandle> { + let maybe_entry = self.swarms.get(key); + maybe_entry.map(|entry| entry.value().clone()) + } + + /// Retrieves a paginated list of tracked torrent handles. + /// + /// This method returns a vector of tuples, each containing an infohash and + /// its associated tracked torrent handle. The pagination parameters + /// (offset and limit) can be used to control the size of the result set. + /// + /// # Returns + /// + /// A vector of `(InfoHash, TorrentEntry)` tuples. + #[must_use] + pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, CoordinatorHandle)> { + match pagination { + Some(pagination) => self + .swarms + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .swarms + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + /// Retrieves swarm metadata for a given torrent. + /// + /// # Returns + /// + /// A `SwarmMetadata` struct containing the aggregated torrent data if found. + /// + /// # Errors + /// + /// This function panics if the lock for the swarm handle cannot be acquired. + pub async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Result<Option<SwarmMetadata>, Error> { + match self.swarms.get(info_hash) { + None => Ok(None), + Some(swarm_handle) => { + let swarm = swarm_handle.value().lock().await; + Ok(Some(swarm.metadata())) + } + } + } + + /// Retrieves swarm metadata for a given torrent. + /// + /// # Returns + /// + /// A `SwarmMetadata` struct containing the aggregated torrent data if it's + /// found or a zeroed metadata struct if not. + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for the + /// swarm handle. + pub async fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> Result<SwarmMetadata, Error> { + match self.get_swarm_metadata(info_hash).await { + Ok(Some(swarm_metadata)) => Ok(swarm_metadata), + Ok(None) => Ok(SwarmMetadata::zeroed()), + Err(err) => Err(err), + } + } + + /// Retrieves torrent peers for a given torrent and client, excluding the + /// requesting client. + /// + /// This method filters out the client making the request (based on its + /// network address) and returns up to a maximum number of peers, defined by + /// the greater of the provided limit or the global `TORRENT_PEERS_LIMIT`. + /// + /// # Returns + /// + /// A vector of peers (wrapped in `Arc`) representing the active peers for + /// the torrent, excluding the requesting client. + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for the + /// swarm handle. + pub async fn get_peers_peers_excluding( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + limit: usize, + ) -> Result<Vec<Arc<peer::Peer>>, Error> { + match self.get(info_hash) { + None => Ok(vec![]), + Some(swarm_handle) => { + let swarm = swarm_handle.lock().await; + Ok(swarm.peers_excluding(&peer.peer_addr, Some(limit))) + } + } + } + + /// Retrieves the list of peers for a given torrent. + /// + /// This method returns up to `TORRENT_PEERS_LIMIT` peers for the torrent + /// specified by the info-hash. + /// + /// # Returns + /// + /// A vector of peers (wrapped in `Arc`) representing the active peers for + /// the torrent. + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for the + /// swarm handle. + pub async fn get_swarm_peers(&self, info_hash: &InfoHash, limit: usize) -> Result<Vec<Arc<peer::Peer>>, Error> { + match self.get(info_hash) { + None => Ok(vec![]), + Some(swarm_handle) => { + let swarm = swarm_handle.lock().await; + Ok(swarm.peers(Some(limit))) + } + } + } + + pub async fn get_activity_metadata(&self, current_cutoff: DurationSinceUnixEpoch) -> AggregateActivityMetadata { + let mut active_peers_total = 0; + let mut inactive_peers_total = 0; + let mut active_torrents_total = 0; + + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock().await; + + let activity_metadata = swarm.get_activity_metadata(current_cutoff); + + if activity_metadata.is_active { + active_torrents_total += 1; + } + + active_peers_total += activity_metadata.active_peers_total; + inactive_peers_total += activity_metadata.inactive_peers_total; + } + + AggregateActivityMetadata { + active_peers_total, + inactive_peers_total, + active_torrents_total, + inactive_torrents_total: self.len() - active_torrents_total, + } + } + + /// Counts the number of inactive peers across all torrents. + pub async fn count_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> usize { + let mut inactive_peers_total = 0; + + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock().await; + inactive_peers_total += swarm.count_inactive_peers(current_cutoff); + } + + inactive_peers_total + } + + /// Removes inactive peers from all torrent entries. + /// + /// A peer is considered inactive if its last update timestamp is older than + /// the provided cutoff time. + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result<usize, Error> { + tracing::info!( + "Removing inactive peers since: {:?} ...", + convert_from_timestamp_to_datetime_utc(current_cutoff) + ); + + let mut inactive_peers_removed = 0; + + for swarm_handle in &self.swarms { + let mut swarm = swarm_handle.value().lock().await; + let removed = swarm.remove_inactive(current_cutoff).await; + inactive_peers_removed += removed; + } + + tracing::info!(inactive_peers_removed = inactive_peers_removed); + + Ok(inactive_peers_removed) + } + + /// Removes torrent entries that have no active peers. + /// + /// Depending on the tracker policy, torrents without any peers may be + /// removed to conserve memory. + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> Result<u64, Error> { + tracing::info!("Removing peerless torrents ..."); + + let mut peerless_torrents_removed = 0; + + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock().await; + + if swarm.meets_retaining_policy(policy) { + continue; + } + + let info_hash = *swarm_handle.key(); + + swarm_handle.remove(); + + peerless_torrents_removed += 1; + + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender.send(Event::TorrentRemoved { info_hash }).await; + } + } + + tracing::info!(peerless_torrents_removed = peerless_torrents_removed); + + Ok(peerless_torrents_removed) + } + + /// Imports persistent torrent data into the in-memory repository. + /// + /// This method takes a set of persisted torrent entries (e.g., from a + /// database) and imports them into the in-memory repository for immediate + /// access. + pub fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) -> u64 { + tracing::info!("Importing persisted info about torrents ..."); + + let mut torrents_imported = 0; + + for (info_hash, completed) in persistent_torrents { + if self.swarms.contains_key(info_hash) { + continue; + } + + let entry = CoordinatorHandle::new(Coordinator::new(info_hash, *completed, self.event_sender.clone()).into()); + + // Since SkipMap is lock-free the torrent could have been inserted + // after checking if it exists. + self.swarms.get_or_insert(*info_hash, entry); + + torrents_imported += 1; + } + + tracing::info!(imported_torrents = torrents_imported); + + torrents_imported + } + + /// Calculates and returns overall torrent metrics. + /// + /// The returned [`AggregateSwarmMetadata`] contains aggregate data such as + /// the total number of torrents, total complete (seeders), incomplete + /// (leechers), and downloaded counts. + /// + /// # Returns + /// + /// A [`AggregateSwarmMetadata`] struct with the aggregated metrics. + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub async fn get_aggregate_swarm_metadata(&self) -> Result<AggregateActiveSwarmMetadata, Error> { + let mut metrics = AggregateActiveSwarmMetadata::default(); + + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock().await; + + let stats = swarm.metadata(); + + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; + } + + Ok(metrics) + } + + /// Counts the number of torrents that are peerless (i.e., have no active + /// peers). + /// + /// # Returns + /// + /// A `usize` representing the number of peerless torrents. + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub async fn count_peerless_torrents(&self) -> Result<usize, Error> { + let mut peerless_torrents = 0; + + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock().await; + + if swarm.is_peerless() { + peerless_torrents += 1; + } + } + + Ok(peerless_torrents) + } + + /// Counts the total number of peers across all torrents. + /// + /// # Returns + /// + /// A `usize` representing the total number of peers. + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub async fn count_peers(&self) -> Result<usize, Error> { + let mut peers = 0; + + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock().await; + + peers += swarm.len(); + } + + Ok(peers) + } + + #[must_use] + pub fn len(&self) -> usize { + self.swarms.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.swarms.is_empty() + } + + pub fn contains(&self, key: &InfoHash) -> bool { + self.swarms.contains_key(key) + } +} + +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error {} + +#[derive(Clone, Debug, Default)] +pub struct AggregateActivityMetadata { + /// The number of active peers in all swarms. + pub active_peers_total: usize, + + /// The number of inactive peers in all swarms. + pub inactive_peers_total: usize, + + /// The number of active torrents. + pub active_torrents_total: usize, + + /// The number of inactive torrents. + pub inactive_torrents_total: usize, +} + +impl AggregateActivityMetadata { + pub fn log(&self) { + tracing::info!( + active_peers_total = self.active_peers_total, + inactive_peers_total = self.inactive_peers_total, + active_torrents_total = self.active_torrents_total, + inactive_torrents_total = self.inactive_torrents_total + ); + } +} +#[cfg(test)] +mod tests { + + mod the_swarm_repository { + + use std::sync::Arc; + + use aquatic_udp_protocol::PeerId; + + use crate::swarm::registry::Registry; + use crate::tests::{sample_info_hash, sample_peer}; + + /// It generates a peer id from a number where the number is the last + /// part of the peer ID. For example, for `12` it returns + /// `-qB00000000000000012`. + fn numeric_peer_id(two_digits_value: i32) -> PeerId { + // Format idx as a string with leading zeros, ensuring it has exactly 2 digits + let idx_str = format!("{two_digits_value:02}"); + + // Create the base part of the peer ID. + let base = b"-qB00000000000000000"; + + // Concatenate the base with idx bytes, ensuring the total length is 20 bytes. + let mut peer_id_bytes = [0u8; 20]; + peer_id_bytes[..base.len()].copy_from_slice(base); + peer_id_bytes[base.len() - idx_str.len()..].copy_from_slice(idx_str.as_bytes()); + + PeerId(peer_id_bytes) + } + + // The `TorrentRepository` has these responsibilities: + // - To maintain the peer lists for each torrent. + // - To maintain the the torrent entries, which contains all the info + // about the torrents, including the peer lists. + // - To return the torrent entries (swarm handles). + // - To return the peer lists for a given torrent. + // - To return the torrent metrics. + // - To return the swarm metadata for a given torrent. + // - To handle the persistence of the torrent entries. + + #[tokio::test] + async fn it_should_return_zero_length_when_it_has_no_swarms() { + let swarms = Arc::new(Registry::default()); + assert_eq!(swarms.len(), 0); + } + + #[tokio::test] + async fn it_should_return_the_length_when_it_has_swarms() { + let swarms = Arc::new(Registry::default()); + let info_hash = sample_info_hash(); + let peer = sample_peer(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + assert_eq!(swarms.len(), 1); + } + + #[tokio::test] + async fn it_should_be_empty_when_it_has_no_swarms() { + let swarms = Arc::new(Registry::default()); + assert!(swarms.is_empty()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + assert!(!swarms.is_empty()); + } + + #[tokio::test] + async fn it_should_not_be_empty_when_it_has_at_least_one_swarm() { + let swarms = Arc::new(Registry::default()); + let info_hash = sample_info_hash(); + let peer = sample_peer(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + assert!(!swarms.is_empty()); + } + + mod maintaining_the_peer_lists { + + use std::sync::Arc; + + use crate::swarm::registry::Registry; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn it_should_add_the_first_peer_to_the_torrent_peer_list() { + let swarms = Arc::new(Registry::default()); + + let info_hash = sample_info_hash(); + + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); + + assert!(swarms.get(&info_hash).is_some()); + } + + #[tokio::test] + async fn it_should_allow_adding_the_same_peer_twice_to_the_torrent_peer_list() { + let swarms = Arc::new(Registry::default()); + + let info_hash = sample_info_hash(); + + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); + + assert!(swarms.get(&info_hash).is_some()); + } + } + + mod returning_peer_lists_for_a_torrent { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::swarm::registry::tests::the_swarm_repository::numeric_peer_id; + use crate::swarm::registry::Registry; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn it_should_return_the_peers_for_a_given_torrent() { + let swarms = Arc::new(Registry::default()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + let peers = swarms.get_swarm_peers(&info_hash, 74).await.unwrap(); + + assert_eq!(peers, vec![Arc::new(peer)]); + } + + #[tokio::test] + async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { + let swarms = Arc::new(Registry::default()); + + let peers = swarms.get_swarm_peers(&sample_info_hash(), 74).await.unwrap(); + + assert!(peers.is_empty()); + } + + #[tokio::test] + async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { + let swarms = Arc::new(Registry::default()); + + let info_hash = sample_info_hash(); + + for idx in 1..=75 { + let peer = Peer { + peer_id: numeric_peer_id(idx), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + }; + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + } + + let peers = swarms.get_swarm_peers(&info_hash, 74).await.unwrap(); + + assert_eq!(peers.len(), 74); + } + + mod excluding_the_client_peer { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::swarm::registry::tests::the_swarm_repository::numeric_peer_id; + use crate::swarm::registry::Registry; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn it_should_return_an_empty_peer_list_for_a_non_existing_torrent() { + let swarms = Arc::new(Registry::default()); + + let peers = swarms + .get_peers_peers_excluding(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT) + .await + .unwrap(); + + assert_eq!(peers, vec![]); + } + + #[tokio::test] + async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { + let swarms = Arc::new(Registry::default()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + let peers = swarms + .get_peers_peers_excluding(&info_hash, &peer, TORRENT_PEERS_LIMIT) + .await + .unwrap(); + + assert_eq!(peers, vec![]); + } + + #[tokio::test] + async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { + let swarms = Arc::new(Registry::default()); + + let info_hash = sample_info_hash(); + + let excluded_peer = sample_peer(); + + swarms.handle_announcement(&info_hash, &excluded_peer, None).await.unwrap(); + + // Add 74 peers + for idx in 2..=75 { + let peer = Peer { + peer_id: numeric_peer_id(idx), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + }; + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + } + + let peers = swarms + .get_peers_peers_excluding(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT) + .await + .unwrap(); + + assert_eq!(peers.len(), 74); + } + } + } + + mod maintaining_the_torrent_entries { + + use std::ops::Add; + use std::sync::Arc; + use std::time::Duration; + + use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_configuration::TrackerPolicy; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::swarm::registry::Registry; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn it_should_remove_a_torrent_entry() { + let swarms = Arc::new(Registry::default()); + + let info_hash = sample_info_hash(); + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); + + let _unused = swarms.remove(&info_hash).await; + + assert!(swarms.get(&info_hash).is_none()); + } + + #[tokio::test] + async fn it_should_count_inactive_peers() { + let swarms = Arc::new(Registry::default()); + + let info_hash = sample_info_hash(); + let mut peer = sample_peer(); + peer.updated = DurationSinceUnixEpoch::new(0, 0); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + // Cut off time is 1 second after the peer was updated + let inactive_peers_total = swarms.count_inactive_peers(peer.updated.add(Duration::from_secs(1))).await; + + assert_eq!(inactive_peers_total, 1); + } + + #[tokio::test] + async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { + let swarms = Arc::new(Registry::default()); + + let info_hash = sample_info_hash(); + let mut peer = sample_peer(); + peer.updated = DurationSinceUnixEpoch::new(0, 0); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + // Cut off time is 1 second after the peer was updated + swarms + .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .await + .unwrap(); + + assert!(!swarms + .get_swarm_peers(&info_hash, 74) + .await + .unwrap() + .contains(&Arc::new(peer))); + } + + async fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc<Registry> { + let swarms = Arc::new(Registry::default()); + + // Insert a sample peer for the torrent to force adding the torrent entry + let mut peer = sample_peer(); + peer.updated = DurationSinceUnixEpoch::new(0, 0); + swarms.handle_announcement(info_hash, &peer, None).await.unwrap(); + + // Remove the peer + swarms + .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .await + .unwrap(); + + swarms + } + + #[tokio::test] + async fn it_should_remove_torrents_without_peers() { + let info_hash = sample_info_hash(); + + let swarms = initialize_repository_with_one_torrent_without_peers(&info_hash).await; + + let tracker_policy = TrackerPolicy { + remove_peerless_torrents: true, + ..Default::default() + }; + + swarms.remove_peerless_torrents(&tracker_policy).await.unwrap(); + + assert!(swarms.get(&info_hash).is_none()); + } + } + mod returning_torrent_entries { + + use std::sync::Arc; + + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::swarm::registry::Registry; + use crate::tests::{sample_info_hash, sample_peer}; + use crate::{Coordinator, CoordinatorHandle}; + + /// `TorrentEntry` data is not directly accessible. It's only + /// accessible through the trait methods. We need this temporary + /// DTO to write simple and more readable assertions. + #[derive(Debug, Clone, PartialEq)] + struct TorrentEntryInfo { + swarm_metadata: SwarmMetadata, + peers: Vec<Peer>, + number_of_peers: usize, + } + + async fn torrent_entry_info(swarm_handle: CoordinatorHandle) -> TorrentEntryInfo { + let torrent_guard = swarm_handle.lock().await; + torrent_guard.clone().into() + } + + #[allow(clippy::from_over_into)] + impl Into<TorrentEntryInfo> for Coordinator { + fn into(self) -> TorrentEntryInfo { + let torrent_entry_info = TorrentEntryInfo { + swarm_metadata: self.metadata(), + peers: self.peers(None).iter().map(|peer| *peer.clone()).collect(), + number_of_peers: self.len(), + }; + torrent_entry_info + } + } + + #[tokio::test] + async fn it_should_return_one_torrent_entry_by_infohash() { + let swarms = Arc::new(Registry::default()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + let torrent_entry_info = torrent_entry_info(swarms.get(&info_hash).unwrap()).await; + + assert_eq!( + TorrentEntryInfo { + swarm_metadata: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + peers: vec!(peer), + number_of_peers: 1 + }, + torrent_entry_info + ); + } + + mod it_should_return_many_torrent_entries { + use std::sync::Arc; + + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::swarm::registry::tests::the_swarm_repository::returning_torrent_entries::{ + torrent_entry_info, TorrentEntryInfo, + }; + use crate::swarm::registry::Registry; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn without_pagination() { + let swarms = Arc::new(Registry::default()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + let torrent_entries = swarms.get_paginated(None); + + assert_eq!(torrent_entries.len(), 1); + + let torrent_entry = torrent_entry_info(torrent_entries.first().unwrap().1.clone()).await; + + assert_eq!( + TorrentEntryInfo { + swarm_metadata: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + peers: vec!(peer), + number_of_peers: 1 + }, + torrent_entry + ); + } + + mod with_pagination { + use std::sync::Arc; + + use torrust_tracker_primitives::pagination::Pagination; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::swarm::registry::tests::the_swarm_repository::returning_torrent_entries::{ + torrent_entry_info, TorrentEntryInfo, + }; + use crate::swarm::registry::Registry; + use crate::tests::{ + sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, + sample_peer_one, sample_peer_two, + }; + + #[tokio::test] + async fn it_should_return_the_first_page() { + let swarms = Arc::new(Registry::default()); + + // Insert one torrent entry + let info_hash_one = sample_info_hash_one(); + let peer_one = sample_peer_one(); + swarms.handle_announcement(&info_hash_one, &peer_one, None).await.unwrap(); + + // Insert another torrent entry + let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); + let peer_two = sample_peer_two(); + swarms.handle_announcement(&info_hash_one, &peer_two, None).await.unwrap(); + + // Get only the first page where page size is 1 + let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 0, limit: 1 })); + + assert_eq!(torrent_entries.len(), 1); + + let torrent_entry_info = torrent_entry_info(torrent_entries.first().unwrap().1.clone()).await; + + assert_eq!( + TorrentEntryInfo { + swarm_metadata: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + peers: vec!(peer_one), + number_of_peers: 1 + }, + torrent_entry_info + ); + } + + #[tokio::test] + async fn it_should_return_the_second_page() { + let swarms = Arc::new(Registry::default()); + + // Insert one torrent entry + let info_hash_one = sample_info_hash_one(); + let peer_one = sample_peer_one(); + swarms.handle_announcement(&info_hash_one, &peer_one, None).await.unwrap(); + + // Insert another torrent entry + let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); + let peer_two = sample_peer_two(); + swarms.handle_announcement(&info_hash_one, &peer_two, None).await.unwrap(); + + // Get only the first page where page size is 1 + let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); + + assert_eq!(torrent_entries.len(), 1); + + let torrent_entry_info = torrent_entry_info(torrent_entries.first().unwrap().1.clone()).await; + + assert_eq!( + TorrentEntryInfo { + swarm_metadata: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + peers: vec!(peer_two), + number_of_peers: 1 + }, + torrent_entry_info + ); + } + + #[tokio::test] + async fn it_should_allow_changing_the_page_size() { + let swarms = Arc::new(Registry::default()); + + // Insert one torrent entry + let info_hash_one = sample_info_hash_one(); + let peer_one = sample_peer_one(); + swarms.handle_announcement(&info_hash_one, &peer_one, None).await.unwrap(); + + // Insert another torrent entry + let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); + let peer_two = sample_peer_two(); + swarms.handle_announcement(&info_hash_one, &peer_two, None).await.unwrap(); + + // Get only the first page where page size is 1 + let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); + + assert_eq!(torrent_entries.len(), 1); + } + } + } + } + + mod returning_aggregate_swarm_metadata { + + use std::sync::Arc; + + use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; + + use crate::swarm::registry::Registry; + use crate::tests::{complete_peer, leecher, sample_info_hash, seeder}; + + // todo: refactor to use test parametrization + + #[tokio::test] + async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { + let swarms = Arc::new(Registry::default()); + + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); + + assert_eq!( + aggregate_swarm_metadata, + AggregateActiveSwarmMetadata { + total_complete: 0, + total_downloaded: 0, + total_incomplete: 0, + total_torrents: 0 + } + ); + } + + #[tokio::test] + async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { + let swarms = Arc::new(Registry::default()); + + swarms + .handle_announcement(&sample_info_hash(), &leecher(), None) + .await + .unwrap(); + + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); + + assert_eq!( + aggregate_swarm_metadata, + AggregateActiveSwarmMetadata { + total_complete: 0, + total_downloaded: 0, + total_incomplete: 1, + total_torrents: 1, + } + ); + } + + #[tokio::test] + async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { + let swarms = Arc::new(Registry::default()); + + swarms + .handle_announcement(&sample_info_hash(), &seeder(), None) + .await + .unwrap(); + + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); + + assert_eq!( + aggregate_swarm_metadata, + AggregateActiveSwarmMetadata { + total_complete: 1, + total_downloaded: 0, + total_incomplete: 0, + total_torrents: 1, + } + ); + } + + #[tokio::test] + async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { + let swarms = Arc::new(Registry::default()); + + swarms + .handle_announcement(&sample_info_hash(), &complete_peer(), None) + .await + .unwrap(); + + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); + + assert_eq!( + aggregate_swarm_metadata, + AggregateActiveSwarmMetadata { + total_complete: 1, + total_downloaded: 0, + total_incomplete: 0, + total_torrents: 1, + } + ); + } + + #[tokio::test] + async fn it_should_return_the_aggregate_swarm_metadata_when_there_are_multiple_torrents() { + let swarms = Arc::new(Registry::default()); + + let start_time = std::time::Instant::now(); + for i in 0..1_000_000 { + swarms + .handle_announcement(&gen_seeded_infohash(&i), &leecher(), None) + .await + .unwrap(); + } + let result_a = start_time.elapsed(); + + let start_time = std::time::Instant::now(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); + let result_b = start_time.elapsed(); + + assert_eq!( + (aggregate_swarm_metadata), + (AggregateActiveSwarmMetadata { + total_complete: 0, + total_downloaded: 0, + total_incomplete: 1_000_000, + total_torrents: 1_000_000, + }), + "{result_a:?} {result_b:?}" + ); + } + + mod it_should_count_peerless_torrents { + use std::sync::Arc; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::swarm::registry::Registry; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn no_peerless_torrents() { + let swarms = Arc::new(Registry::default()); + assert_eq!(swarms.count_peerless_torrents().await.unwrap(), 0); + } + + #[tokio::test] + async fn one_peerless_torrents() { + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let swarms = Arc::new(Registry::default()); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); + swarms.remove_inactive_peers(current_cutoff).await.unwrap(); + + assert_eq!(swarms.count_peerless_torrents().await.unwrap(), 1); + } + } + + mod it_should_count_peers { + use std::sync::Arc; + + use crate::swarm::registry::Registry; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn no_peers() { + let swarms = Arc::new(Registry::default()); + assert_eq!(swarms.count_peers().await.unwrap(), 0); + } + + #[tokio::test] + async fn one_peer() { + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let swarms = Arc::new(Registry::default()); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + assert_eq!(swarms.count_peers().await.unwrap(), 1); + } + } + } + + mod returning_swarm_metadata { + + use std::sync::Arc; + + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::swarm::registry::Registry; + use crate::tests::{leecher, sample_info_hash}; + + #[tokio::test] + async fn it_should_get_swarm_metadata_for_an_existing_torrent() { + let swarms = Arc::new(Registry::default()); + + let infohash = sample_info_hash(); + + swarms.handle_announcement(&infohash, &leecher(), None).await.unwrap(); + + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap(); + + assert_eq!( + swarm_metadata, + SwarmMetadata { + complete: 0, + downloaded: 0, + incomplete: 1, + } + ); + } + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { + let swarms = Arc::new(Registry::default()); + + let swarm_metadata = swarms.get_swarm_metadata_or_default(&sample_info_hash()).await.unwrap(); + + assert_eq!(swarm_metadata, SwarmMetadata::zeroed()); + } + } + + mod handling_persistence { + + use std::sync::Arc; + + use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; + + use crate::swarm::registry::Registry; + use crate::tests::{leecher, sample_info_hash}; + + #[tokio::test] + async fn it_should_allow_importing_persisted_torrent_entries() { + let swarms = Arc::new(Registry::default()); + + let infohash = sample_info_hash(); + + let mut persistent_torrents = NumberOfDownloadsBTreeMap::default(); + + persistent_torrents.insert(infohash, 1); + + swarms.import_persistent(&persistent_torrents); + + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap(); + + // Only the number of downloads is persisted. + assert_eq!(swarm_metadata.downloaded, 1); + } + + #[tokio::test] + async fn it_should_allow_overwriting_a_previously_imported_persisted_torrent() { + // code-review: do we want to allow this? + + let swarms = Arc::new(Registry::default()); + + let infohash = sample_info_hash(); + + let mut persistent_torrents = NumberOfDownloadsBTreeMap::default(); + + persistent_torrents.insert(infohash, 1); + persistent_torrents.insert(infohash, 2); + + swarms.import_persistent(&persistent_torrents); + + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap(); + + // It takes the last value + assert_eq!(swarm_metadata.downloaded, 2); + } + + #[tokio::test] + async fn it_should_now_allow_importing_a_persisted_torrent_if_it_already_exists() { + let swarms = Arc::new(Registry::default()); + + let infohash = sample_info_hash(); + + // Insert a new the torrent entry + swarms.handle_announcement(&infohash, &leecher(), None).await.unwrap(); + let initial_number_of_downloads = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap().downloaded; + + // Try to import the torrent entry + let new_number_of_downloads = initial_number_of_downloads + 1; + let mut persistent_torrents = NumberOfDownloadsBTreeMap::default(); + persistent_torrents.insert(infohash, new_number_of_downloads); + swarms.import_persistent(&persistent_torrents); + + // The number of downloads should not be changed + assert_eq!( + swarms.get_swarm_metadata_or_default(&infohash).await.unwrap().downloaded, + initial_number_of_downloads + ); + } + } + } + + mod triggering_events { + + use std::sync::Arc; + + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; + use crate::event::Event; + use crate::swarm::registry::Registry; + use crate::tests::sample_info_hash; + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_new_torrent_is_added() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::TorrentAdded { + info_hash, + announcement: peer, + }, + Event::PeerAdded { info_hash, peer }, + ], + ); + + let swarms = Registry::new(Some(Arc::new(event_sender_mock))); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_torrent_is_directly_removed() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::TorrentAdded { + info_hash, + announcement: peer, + }, + Event::PeerAdded { info_hash, peer }, + Event::TorrentRemoved { info_hash }, + ], + ); + + let swarms = Registry::new(Some(Arc::new(event_sender_mock))); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + swarms.remove(&info_hash).await.unwrap(); + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peerless_torrent_is_removed() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::TorrentAdded { + info_hash, + announcement: peer, + }, + Event::PeerAdded { info_hash, peer }, + Event::PeerRemoved { info_hash, peer }, + Event::TorrentRemoved { info_hash }, + ], + ); + + let swarms = Registry::new(Some(Arc::new(event_sender_mock))); + + // Add the new torrent + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + // Remove the peer + let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); + swarms.remove_inactive_peers(current_cutoff).await.unwrap(); + + // Remove peerless torrents + + let tracker_policy = torrust_tracker_configuration::TrackerPolicy { + remove_peerless_torrents: true, + ..Default::default() + }; + + swarms.remove_peerless_torrents(&tracker_policy).await.unwrap(); + } + } +} diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 3495c314a..fb240730d 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library providing helpers for testing the Torrust tracker." -keywords = ["helper", "library", "testing"] +keywords = [ "helper", "library", "testing" ] name = "torrust-tracker-test-helpers" readme = "README.md" @@ -18,4 +18,4 @@ version.workspace = true rand = "0" torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } tracing = "0" -tracing-subscriber = { version = "0", features = ["json"] } +tracing-subscriber = { version = "0", features = [ "json" ] } diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 130820334..ffe3af3b2 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -40,7 +40,7 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for API let api_port = 0u16; let mut http_api = HttpApi { - bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), api_port), + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), api_port), ..Default::default() }; http_api.add_token("admin", "MyAccessToken"); @@ -48,20 +48,22 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for Health Check API let health_check_api_port = 0u16; - config.health_check_api.bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), health_check_api_port); + config.health_check_api.bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), health_check_api_port); // Ephemeral socket address for UDP tracker let udp_port = 0u16; config.udp_trackers = Some(vec![UdpTracker { - bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), udp_port), + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), udp_port), cookie_lifetime: Duration::from_secs(120), + tracker_usage_statistics: true, }]); // Ephemeral socket address for HTTP tracker let http_port = 0u16; config.http_trackers = Some(vec![HttpTracker { - bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), http_port), + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), http_port), tsl_config: None, + tracker_usage_statistics: true, }]); let temp_file = ephemeral_sqlite_database(); @@ -154,7 +156,7 @@ pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { pub fn ephemeral_ipv6() -> Configuration { let mut cfg = ephemeral(); - let ipv6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), 0); + let ipv6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0); if let Some(ref mut http_api) = cfg.http_api { http_api.bind_address.clone_from(&ipv6); diff --git a/packages/test-helpers/src/random.rs b/packages/test-helpers/src/random.rs index f096d695c..62265dbd7 100644 --- a/packages/test-helpers/src/random.rs +++ b/packages/test-helpers/src/random.rs @@ -1,6 +1,6 @@ //! Random data generators for testing. use rand::distr::Alphanumeric; -use rand::{rng, Rng}; +use rand::{rng, RngExt}; /// Returns a random alphanumeric string of a certain size. /// diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository-benchmarking/Cargo.toml similarity index 63% rename from packages/torrent-repository/Cargo.toml rename to packages/torrent-repository-benchmarking/Cargo.toml index 2097d57d2..653ad8102 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository-benchmarking/Cargo.toml @@ -1,7 +1,7 @@ [package] -description = "A library that provides a repository of torrents files and their peers." -keywords = ["library", "repository", "torrents"] -name = "torrust-tracker-torrent-repository" +description = "A library to runt benchmarking for different implementations of a repository of torrents files and their peers." +keywords = [ "library", "repository", "torrents" ] +name = "torrust-tracker-torrent-repository-benchmarking" readme = "README.md" authors.workspace = true @@ -22,15 +22,15 @@ crossbeam-skiplist = "0" dashmap = "6" futures = "0" parking_lot = "0" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } zerocopy = "0.7" [dev-dependencies] -async-std = { version = "1", features = ["attributes", "tokio1"] } -criterion = { version = "0", features = ["async_tokio"] } +async-std = { version = "1", features = [ "attributes", "tokio1" ] } +criterion = { version = "0", features = [ "async_tokio" ] } rstest = "0" [[bench]] diff --git a/packages/torrent-repository/README.md b/packages/torrent-repository-benchmarking/README.md similarity index 73% rename from packages/torrent-repository/README.md rename to packages/torrent-repository-benchmarking/README.md index ffc71f1d7..a0556a58f 100644 --- a/packages/torrent-repository/README.md +++ b/packages/torrent-repository-benchmarking/README.md @@ -1,6 +1,6 @@ -# Torrust Tracker Torrent Repository +# Torrust Tracker Swarm Coordination Registry Benchmarking -A library to provide a torrent repository to the [Torrust Tracker](https://github.com/torrust/torrust-tracker). +A library to runt benchmarking for different implementations of a repository of torrents files and their peers. Torrent repositories are used by the [Torrust Tracker](https://github.com/torrust/torrust-tracker). ## Benchmarking diff --git a/packages/torrent-repository/benches/helpers/asyn.rs b/packages/torrent-repository-benchmarking/benches/helpers/asyn.rs similarity index 98% rename from packages/torrent-repository/benches/helpers/asyn.rs rename to packages/torrent-repository-benchmarking/benches/helpers/asyn.rs index fc6b3ffb0..4deb1955a 100644 --- a/packages/torrent-repository/benches/helpers/asyn.rs +++ b/packages/torrent-repository-benchmarking/benches/helpers/asyn.rs @@ -3,7 +3,7 @@ use std::time::{Duration, Instant}; use bittorrent_primitives::info_hash::InfoHash; use futures::stream::FuturesUnordered; -use torrust_tracker_torrent_repository::repository::RepositoryAsync; +use torrust_tracker_torrent_repository_benchmarking::repository::RepositoryAsync; use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; diff --git a/packages/torrent-repository/benches/helpers/mod.rs b/packages/torrent-repository-benchmarking/benches/helpers/mod.rs similarity index 100% rename from packages/torrent-repository/benches/helpers/mod.rs rename to packages/torrent-repository-benchmarking/benches/helpers/mod.rs diff --git a/packages/torrent-repository/benches/helpers/sync.rs b/packages/torrent-repository-benchmarking/benches/helpers/sync.rs similarity index 98% rename from packages/torrent-repository/benches/helpers/sync.rs rename to packages/torrent-repository-benchmarking/benches/helpers/sync.rs index e00401446..2cefb5a4a 100644 --- a/packages/torrent-repository/benches/helpers/sync.rs +++ b/packages/torrent-repository-benchmarking/benches/helpers/sync.rs @@ -3,7 +3,7 @@ use std::time::{Duration, Instant}; use bittorrent_primitives::info_hash::InfoHash; use futures::stream::FuturesUnordered; -use torrust_tracker_torrent_repository::repository::Repository; +use torrust_tracker_torrent_repository_benchmarking::repository::Repository; use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; diff --git a/packages/torrent-repository/benches/helpers/utils.rs b/packages/torrent-repository-benchmarking/benches/helpers/utils.rs similarity index 93% rename from packages/torrent-repository/benches/helpers/utils.rs rename to packages/torrent-repository-benchmarking/benches/helpers/utils.rs index 51b09ec0f..16ba0bf7f 100644 --- a/packages/torrent-repository/benches/helpers/utils.rs +++ b/packages/torrent-repository-benchmarking/benches/helpers/utils.rs @@ -9,7 +9,7 @@ use zerocopy::I64; pub const DEFAULT_PEER: Peer = Peer { peer_id: PeerId([0; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080), updated: DurationSinceUnixEpoch::from_secs(0), uploaded: NumberOfBytes(I64::ZERO), downloaded: NumberOfBytes(I64::ZERO), diff --git a/packages/torrent-repository/benches/repository_benchmark.rs b/packages/torrent-repository-benchmarking/benches/repository_benchmark.rs similarity index 97% rename from packages/torrent-repository/benches/repository_benchmark.rs rename to packages/torrent-repository-benchmarking/benches/repository_benchmark.rs index 4e50f1454..f5f8e4b28 100644 --- a/packages/torrent-repository/benches/repository_benchmark.rs +++ b/packages/torrent-repository-benchmarking/benches/repository_benchmark.rs @@ -3,7 +3,7 @@ use std::time::Duration; mod helpers; use criterion::{criterion_group, criterion_main, Criterion}; -use torrust_tracker_torrent_repository::{ +use torrust_tracker_torrent_repository_benchmarking::{ TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexParkingLot, TorrentsSkipMapMutexStd, TorrentsSkipMapRwLockParkingLot, @@ -17,7 +17,7 @@ fn add_one_torrent(c: &mut Criterion) { let mut group = c.benchmark_group("add_one_torrent"); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("RwLockStd", |b| { b.iter_custom(sync::add_one_torrent::<TorrentsRwLockStd, _>); @@ -74,7 +74,7 @@ fn add_multiple_torrents_in_parallel(c: &mut Criterion) { //group.sample_size(10); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("RwLockStd", |b| { b.to_async(&rt) @@ -138,7 +138,7 @@ fn update_one_torrent_in_parallel(c: &mut Criterion) { //group.sample_size(10); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("RwLockStd", |b| { b.to_async(&rt) @@ -202,7 +202,7 @@ fn update_multiple_torrents_in_parallel(c: &mut Criterion) { //group.sample_size(10); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("RwLockStd", |b| { b.to_async(&rt) diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository-benchmarking/src/entry/mod.rs similarity index 100% rename from packages/torrent-repository/src/entry/mod.rs rename to packages/torrent-repository-benchmarking/src/entry/mod.rs diff --git a/packages/torrent-repository/src/entry/mutex_parking_lot.rs b/packages/torrent-repository-benchmarking/src/entry/mutex_parking_lot.rs similarity index 100% rename from packages/torrent-repository/src/entry/mutex_parking_lot.rs rename to packages/torrent-repository-benchmarking/src/entry/mutex_parking_lot.rs diff --git a/packages/torrent-repository/src/entry/mutex_std.rs b/packages/torrent-repository-benchmarking/src/entry/mutex_std.rs similarity index 100% rename from packages/torrent-repository/src/entry/mutex_std.rs rename to packages/torrent-repository-benchmarking/src/entry/mutex_std.rs diff --git a/packages/torrent-repository/src/entry/mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/entry/mutex_tokio.rs similarity index 100% rename from packages/torrent-repository/src/entry/mutex_tokio.rs rename to packages/torrent-repository-benchmarking/src/entry/mutex_tokio.rs diff --git a/packages/torrent-repository/src/entry/peer_list.rs b/packages/torrent-repository-benchmarking/src/entry/peer_list.rs similarity index 98% rename from packages/torrent-repository/src/entry/peer_list.rs rename to packages/torrent-repository-benchmarking/src/entry/peer_list.rs index 33270cf27..976e89d03 100644 --- a/packages/torrent-repository/src/entry/peer_list.rs +++ b/packages/torrent-repository-benchmarking/src/entry/peer_list.rs @@ -195,7 +195,7 @@ mod tests { let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) .build(); peer_list.upsert(peer1.into()); @@ -265,7 +265,7 @@ mod tests { peer_list.upsert(peer.into()); // Remove peers not updated since one second before inserting the peer. - peer_list.remove_inactive_peers(last_update_time - one_second); + peer_list.remove_inactive_peers(last_update_time.checked_sub(one_second).unwrap()); assert_eq!(peer_list.len(), 1); } diff --git a/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs b/packages/torrent-repository-benchmarking/src/entry/rw_lock_parking_lot.rs similarity index 100% rename from packages/torrent-repository/src/entry/rw_lock_parking_lot.rs rename to packages/torrent-repository-benchmarking/src/entry/rw_lock_parking_lot.rs diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository-benchmarking/src/entry/single.rs similarity index 100% rename from packages/torrent-repository/src/entry/single.rs rename to packages/torrent-repository-benchmarking/src/entry/single.rs diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository-benchmarking/src/lib.rs similarity index 100% rename from packages/torrent-repository/src/lib.rs rename to packages/torrent-repository-benchmarking/src/lib.rs diff --git a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs similarity index 87% rename from packages/torrent-repository/src/repository/dash_map_mutex_std.rs rename to packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs index d4a84caa0..fec94b4a5 100644 --- a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs @@ -4,8 +4,8 @@ use bittorrent_primitives::info_hash::InfoHash; use dashmap::DashMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -22,7 +22,7 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option<PersistentTorrent>) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option<NumberOfDownloads>) -> bool { // todo: load persistent torrent data if provided if let Some(entry) = self.torrents.get(info_hash) { @@ -46,8 +46,8 @@ where maybe_entry.map(|entry| entry.clone()) } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); @@ -77,7 +77,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { for (info_hash, completed) in persistent_torrents { if self.torrents.contains_key(info_hash) { continue; diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository-benchmarking/src/repository/mod.rs similarity index 75% rename from packages/torrent-repository/src/repository/mod.rs rename to packages/torrent-repository-benchmarking/src/repository/mod.rs index 9284ff6e6..cf58838a1 100644 --- a/packages/torrent-repository/src/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/src/repository/mod.rs @@ -1,8 +1,8 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; pub mod dash_map_mutex_std; pub mod rw_lock_std; @@ -17,22 +17,22 @@ use std::fmt::Debug; pub trait Repository<T>: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> Option<T>; - fn get_metrics(&self) -> AggregateSwarmMetadata; + fn get_metrics(&self) -> AggregateActiveSwarmMetadata; fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; - fn import_persistent(&self, persistent_torrents: &PersistentTorrents); + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap); fn remove(&self, key: &InfoHash) -> Option<T>; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); fn remove_peerless_torrents(&self, policy: &TrackerPolicy); - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option<PersistentTorrent>) -> bool; + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option<NumberOfDownloads>) -> bool; fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option<SwarmMetadata>; } #[allow(clippy::module_name_repetitions)] pub trait RepositoryAsync<T>: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> impl std::future::Future<Output = Option<T>> + Send; - fn get_metrics(&self) -> impl std::future::Future<Output = AggregateSwarmMetadata> + Send; + fn get_metrics(&self) -> impl std::future::Future<Output = AggregateActiveSwarmMetadata> + Send; fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future<Output = Vec<(InfoHash, T)>> + Send; - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> impl std::future::Future<Output = ()> + Send; + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) -> impl std::future::Future<Output = ()> + Send; fn remove(&self, key: &InfoHash) -> impl std::future::Future<Output = Option<T>> + Send; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future<Output = ()> + Send; fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future<Output = ()> + Send; @@ -40,7 +40,7 @@ pub trait RepositoryAsync<T>: Debug + Default + Sized + 'static { &self, info_hash: &InfoHash, peer: &peer::Peer, - opt_persistent_torrent: Option<PersistentTorrent>, + opt_persistent_torrent: Option<NumberOfDownloads>, ) -> impl std::future::Future<Output = bool> + Send; fn get_swarm_metadata(&self, info_hash: &InfoHash) -> impl std::future::Future<Output = Option<SwarmMetadata>> + Send; } diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs similarity index 89% rename from packages/torrent-repository/src/repository/rw_lock_std.rs rename to packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs index d190718af..5000579dd 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs @@ -1,8 +1,8 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -45,7 +45,7 @@ impl Repository<EntrySingle> for TorrentsRwLockStd where EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option<PersistentTorrent>) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option<NumberOfDownloads>) -> bool { // todo: load persistent torrent data if provided let mut db = self.get_torrents_mut(); @@ -64,8 +64,8 @@ where db.get(key).cloned() } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().values() { let stats = entry.get_swarm_metadata(); @@ -92,7 +92,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut torrents = self.get_torrents_mut(); for (info_hash, downloaded) in persistent_torrents { diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs similarity index 89% rename from packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs rename to packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs index 1764b94e8..085256ff1 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs @@ -3,8 +3,8 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -32,7 +32,7 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option<PersistentTorrent>) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option<NumberOfDownloads>) -> bool { // todo: load persistent torrent data if provided let maybe_entry = self.get_torrents().get(info_hash).cloned(); @@ -59,8 +59,8 @@ where db.get(key).cloned() } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().values() { let stats = entry.lock().expect("it should get a lock").get_swarm_metadata(); @@ -87,7 +87,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut torrents = self.get_torrents_mut(); for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs similarity index 91% rename from packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs rename to packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs index 116c1ff87..9fd451149 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs @@ -7,8 +7,8 @@ use futures::future::join_all; use futures::{Future, FutureExt}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -40,7 +40,7 @@ where &self, info_hash: &InfoHash, peer: &peer::Peer, - _opt_persistent_torrent: Option<PersistentTorrent>, + _opt_persistent_torrent: Option<NumberOfDownloads>, ) -> bool { // todo: load persistent torrent data if provided @@ -85,8 +85,8 @@ where } } - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); let entries: Vec<_> = self.get_torrents().values().cloned().collect(); @@ -101,7 +101,7 @@ where metrics } - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut db = self.get_torrents_mut(); for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs similarity index 89% rename from packages/torrent-repository/src/repository/rw_lock_tokio.rs rename to packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs index 53838023d..e85200aeb 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs @@ -1,8 +1,8 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -50,7 +50,7 @@ where &self, info_hash: &InfoHash, peer: &peer::Peer, - _opt_persistent_torrent: Option<PersistentTorrent>, + _opt_persistent_torrent: Option<NumberOfDownloads>, ) -> bool { // todo: load persistent torrent data if provided @@ -84,8 +84,8 @@ where } } - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().await.values() { let stats = entry.get_swarm_metadata(); @@ -98,7 +98,7 @@ where metrics } - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut torrents = self.get_torrents_mut().await; for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs similarity index 89% rename from packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs rename to packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs index eb7e300fd..8d6584713 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs @@ -3,8 +3,8 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -38,7 +38,7 @@ where &self, info_hash: &InfoHash, peer: &peer::Peer, - _opt_persistent_torrent: Option<PersistentTorrent>, + _opt_persistent_torrent: Option<NumberOfDownloads>, ) -> bool { // todo: load persistent torrent data if provided @@ -78,8 +78,8 @@ where } } - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().await.values() { let stats = entry.get_swarm_metadata(); @@ -92,7 +92,7 @@ where metrics } - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut torrents = self.get_torrents_mut().await; for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs similarity index 89% rename from packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs rename to packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs index c8ebaf4d6..c8f499e03 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -3,8 +3,8 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -38,7 +38,7 @@ where &self, info_hash: &InfoHash, peer: &peer::Peer, - _opt_persistent_torrent: Option<PersistentTorrent>, + _opt_persistent_torrent: Option<NumberOfDownloads>, ) -> bool { // todo: load persistent torrent data if provided @@ -81,8 +81,8 @@ where } } - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().await.values() { let stats = entry.get_swarm_metadata().await; @@ -95,7 +95,7 @@ where metrics } - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut db = self.get_torrents_mut().await; for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs similarity index 91% rename from packages/torrent-repository/src/repository/skip_map_mutex_std.rs rename to packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs index 8a15a9442..0432b13d0 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs @@ -4,8 +4,8 @@ use bittorrent_primitives::info_hash::InfoHash; use crossbeam_skiplist::SkipMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -38,7 +38,7 @@ where /// /// Returns `true` if the number of downloads was increased because the peer /// completed the download. - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option<PersistentTorrent>) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option<NumberOfDownloads>) -> bool { if let Some(existing_entry) = self.torrents.get(info_hash) { existing_entry.value().upsert_peer(peer) } else { @@ -69,8 +69,8 @@ where maybe_entry.map(|entry| entry.value().clone()) } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); @@ -100,7 +100,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { for (info_hash, completed) in persistent_torrents { if self.torrents.contains_key(info_hash) { continue; @@ -146,7 +146,7 @@ where EntryRwLockParkingLot: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option<PersistentTorrent>) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option<NumberOfDownloads>) -> bool { // todo: load persistent torrent data if provided let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); @@ -162,8 +162,8 @@ where maybe_entry.map(|entry| entry.value().clone()) } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().read().get_swarm_metadata(); @@ -193,7 +193,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { for (info_hash, completed) in persistent_torrents { if self.torrents.contains_key(info_hash) { continue; @@ -239,7 +239,7 @@ where EntryMutexParkingLot: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option<PersistentTorrent>) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option<NumberOfDownloads>) -> bool { // todo: load persistent torrent data if provided let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); @@ -255,8 +255,8 @@ where maybe_entry.map(|entry| entry.value().clone()) } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().lock().get_swarm_metadata(); @@ -286,7 +286,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { for (info_hash, completed) in persistent_torrents { if self.torrents.contains_key(info_hash) { continue; diff --git a/packages/torrent-repository/tests/common/mod.rs b/packages/torrent-repository-benchmarking/tests/common/mod.rs similarity index 100% rename from packages/torrent-repository/tests/common/mod.rs rename to packages/torrent-repository-benchmarking/tests/common/mod.rs diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository-benchmarking/tests/common/repo.rs similarity index 95% rename from packages/torrent-repository/tests/common/repo.rs rename to packages/torrent-repository-benchmarking/tests/common/repo.rs index 224fc6aa3..2987240ef 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository-benchmarking/tests/common/repo.rs @@ -1,10 +1,10 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use torrust_tracker_torrent_repository::repository::{Repository as _, RepositoryAsync as _}; -use torrust_tracker_torrent_repository::{ +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; +use torrust_tracker_torrent_repository_benchmarking::repository::{Repository as _, RepositoryAsync as _}; +use torrust_tracker_torrent_repository_benchmarking::{ EntrySingle, TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexParkingLot, TorrentsSkipMapMutexStd, TorrentsSkipMapRwLockParkingLot, @@ -29,7 +29,7 @@ impl Repo { &self, info_hash: &InfoHash, peer: &peer::Peer, - opt_persistent_torrent: Option<PersistentTorrent>, + opt_persistent_torrent: Option<NumberOfDownloads>, ) -> bool { match self { Repo::RwLockStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), @@ -75,7 +75,7 @@ impl Repo { } } - pub(crate) async fn get_metrics(&self) -> AggregateSwarmMetadata { + pub(crate) async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { match self { Repo::RwLockStd(repo) => repo.get_metrics(), Repo::RwLockStdMutexStd(repo) => repo.get_metrics(), @@ -144,7 +144,7 @@ impl Repo { } } - pub(crate) async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + pub(crate) async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { match self { Repo::RwLockStd(repo) => repo.import_persistent(persistent_torrents), Repo::RwLockStdMutexStd(repo) => repo.import_persistent(persistent_torrents), diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository-benchmarking/tests/common/torrent.rs similarity index 96% rename from packages/torrent-repository/tests/common/torrent.rs rename to packages/torrent-repository-benchmarking/tests/common/torrent.rs index 927f13169..02874f9fc 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository-benchmarking/tests/common/torrent.rs @@ -4,8 +4,8 @@ use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; -use torrust_tracker_torrent_repository::entry::{Entry as _, EntryAsync as _, EntrySync as _}; -use torrust_tracker_torrent_repository::{ +use torrust_tracker_torrent_repository_benchmarking::entry::{Entry as _, EntryAsync as _, EntrySync as _}; +use torrust_tracker_torrent_repository_benchmarking::{ EntryMutexParkingLot, EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle, }; diff --git a/packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs b/packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs new file mode 100644 index 000000000..48aa981cd --- /dev/null +++ b/packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs @@ -0,0 +1,26 @@ +use torrust_tracker_primitives::peer::fixture::PeerBuilder; +use torrust_tracker_primitives::peer::{self}; + +/// A torrent seeder is a peer with 0 bytes left to download which +/// has not announced it has stopped +#[must_use] +pub fn a_completed_peer(id: i32) -> peer::Peer { + let peer_id = peer::Id::new(id); + PeerBuilder::default() + .with_bytes_left_to_download(0) + .with_event_completed() + .with_peer_id(&peer_id) + .into() +} + +/// A torrent leecher is a peer that is not a seeder. +/// Leecher: left > 0 OR event = Stopped +#[must_use] +pub fn a_started_peer(id: i32) -> peer::Peer { + let peer_id = peer::Id::new(id); + PeerBuilder::default() + .with_bytes_left_to_download(1) + .with_event_started() + .with_peer_id(&peer_id) + .into() +} diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository-benchmarking/tests/entry/mod.rs similarity index 98% rename from packages/torrent-repository/tests/entry/mod.rs rename to packages/torrent-repository-benchmarking/tests/entry/mod.rs index 43d7f94da..86ca891d4 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/entry/mod.rs @@ -1,5 +1,4 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::ops::Sub; use std::time::Duration; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; @@ -9,7 +8,7 @@ use torrust_tracker_clock::clock::{self, Time as _}; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_torrent_repository::{ +use torrust_tracker_torrent_repository_benchmarking::{ EntryMutexParkingLot, EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle, }; @@ -368,7 +367,7 @@ async fn it_should_get_peers_excluding_the_client_socket( let peers = torrent.get_peers(None).await; let mut peer = **peers.first().expect("there should be a peer"); - let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); + let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8081); // for this test, we should not already use this socket. assert_ne!(peer.peer_addr, socket); @@ -430,7 +429,9 @@ async fn it_should_remove_inactive_peers_beyond_cutoff( let now = clock::Working::now(); clock::Stopped::local_set(&now); - peer.updated = now.sub(EXPIRE); + peer.updated = now + .checked_sub(EXPIRE) + .expect("it_should_remove_inactive_peers_beyond_cutoff: EXPIRE must not exceed now"); torrent.upsert_peer(&peer).await; diff --git a/packages/torrent-repository/tests/integration.rs b/packages/torrent-repository-benchmarking/tests/integration.rs similarity index 100% rename from packages/torrent-repository/tests/integration.rs rename to packages/torrent-repository-benchmarking/tests/integration.rs diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository-benchmarking/tests/repository/mod.rs similarity index 91% rename from packages/torrent-repository/tests/repository/mod.rs rename to packages/torrent-repository-benchmarking/tests/repository/mod.rs index 77977837f..fb0b8fcff 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/repository/mod.rs @@ -7,13 +7,13 @@ use rstest::{fixture, rstest}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::PersistentTorrents; -use torrust_tracker_torrent_repository::entry::Entry as _; -use torrust_tracker_torrent_repository::repository::dash_map_mutex_std::XacrimonDashMap; -use torrust_tracker_torrent_repository::repository::rw_lock_std::RwLockStd; -use torrust_tracker_torrent_repository::repository::rw_lock_tokio::RwLockTokio; -use torrust_tracker_torrent_repository::repository::skip_map_mutex_std::CrossbeamSkipList; -use torrust_tracker_torrent_repository::EntrySingle; +use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; +use torrust_tracker_torrent_repository_benchmarking::entry::Entry as _; +use torrust_tracker_torrent_repository_benchmarking::repository::dash_map_mutex_std::XacrimonDashMap; +use torrust_tracker_torrent_repository_benchmarking::repository::rw_lock_std::RwLockStd; +use torrust_tracker_torrent_repository_benchmarking::repository::rw_lock_tokio::RwLockTokio; +use torrust_tracker_torrent_repository_benchmarking::repository::skip_map_mutex_std::CrossbeamSkipList; +use torrust_tracker_torrent_repository_benchmarking::EntrySingle; use crate::common::repo::Repo; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -167,12 +167,12 @@ fn many_hashed_in_order() -> Entries { } #[fixture] -fn persistent_empty() -> PersistentTorrents { - PersistentTorrents::default() +fn persistent_empty() -> NumberOfDownloadsBTreeMap { + NumberOfDownloadsBTreeMap::default() } #[fixture] -fn persistent_single() -> PersistentTorrents { +fn persistent_single() -> NumberOfDownloadsBTreeMap { let hash = &mut DefaultHasher::default(); hash.write_u8(1); @@ -182,7 +182,7 @@ fn persistent_single() -> PersistentTorrents { } #[fixture] -fn persistent_three() -> PersistentTorrents { +fn persistent_three() -> NumberOfDownloadsBTreeMap { let hash = &mut DefaultHasher::default(); hash.write_u8(1); @@ -364,12 +364,10 @@ async fn it_should_get_paginated( } // it should return the only the second entry if both the limit and the offset are one. - Pagination { limit: 1, offset: 1 } => { - if info_hashes.len() > 1 { - let page = repo.get_paginated(Some(&paginated)).await; - assert_eq!(page.len(), 1); - assert_eq!(page[0].0, info_hashes[1]); - } + Pagination { limit: 1, offset: 1 } if info_hashes.len() > 1 => { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page[0].0, info_hashes[1]); } // the other cases are not yet tested. _ => {} @@ -402,11 +400,11 @@ async fn it_should_get_metrics( repo: Repo, #[case] entries: Entries, ) { - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; make(&repo, &entries).await; - let mut metrics = AggregateSwarmMetadata::default(); + let mut metrics = AggregateActiveSwarmMetadata::default(); for (_, torrent) in entries { let stats = torrent.get_swarm_metadata(); @@ -445,12 +443,14 @@ async fn it_should_import_persistent_torrents( )] repo: Repo, #[case] entries: Entries, - #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, + #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: NumberOfDownloadsBTreeMap, ) { make(&repo, &entries).await; let mut downloaded = repo.get_metrics().await.total_downloaded; - persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); + for d in persistent_torrents.values() { + downloaded += u64::from(*d); + } repo.import_persistent(&persistent_torrents).await; @@ -526,7 +526,6 @@ async fn it_should_remove_inactive_peers( repo: Repo, #[case] entries: Entries, ) { - use std::ops::Sub as _; use std::time::Duration; use torrust_tracker_clock::clock::stopped::Stopped as _; @@ -556,7 +555,9 @@ async fn it_should_remove_inactive_peers( let now = clock::Working::now(); clock::Stopped::local_set(&now); - peer.updated = now.sub(EXPIRE); + peer.updated = now + .checked_sub(EXPIRE) + .expect("it_should_remove_inactive_peers_beyond_cutoff: EXPIRE must not exceed now"); } // Insert the infohash and peer into the repository diff --git a/packages/torrent-repository/tests/common/torrent_peer_builder.rs b/packages/torrent-repository/tests/common/torrent_peer_builder.rs deleted file mode 100644 index 33120180d..000000000 --- a/packages/torrent-repository/tests/common/torrent_peer_builder.rs +++ /dev/null @@ -1,90 +0,0 @@ -use std::net::SocketAddr; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use torrust_tracker_clock::clock::Time; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; - -use crate::CurrentClock; - -#[derive(Debug, Default)] -struct TorrentPeerBuilder { - peer: peer::Peer, -} - -#[allow(dead_code)] -impl TorrentPeerBuilder { - #[must_use] - fn new() -> Self { - Self { - peer: peer::Peer { - updated: CurrentClock::now(), - ..Default::default() - }, - } - } - - #[must_use] - fn with_event_completed(mut self) -> Self { - self.peer.event = AnnounceEvent::Completed; - self - } - - #[must_use] - fn with_event_started(mut self) -> Self { - self.peer.event = AnnounceEvent::Started; - self - } - - #[must_use] - fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - #[must_use] - fn with_peer_id(mut self, peer_id: PeerId) -> Self { - self.peer.peer_id = peer_id; - self - } - - #[must_use] - fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes::new(left); - self - } - - #[must_use] - fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self - } - - #[must_use] - fn into(self) -> peer::Peer { - self.peer - } -} - -/// A torrent seeder is a peer with 0 bytes left to download which -/// has not announced it has stopped -#[must_use] -pub fn a_completed_peer(id: i32) -> peer::Peer { - let peer_id = peer::Id::new(id); - TorrentPeerBuilder::new() - .with_number_of_bytes_left(0) - .with_event_completed() - .with_peer_id(*peer_id) - .into() -} - -/// A torrent leecher is a peer that is not a seeder. -/// Leecher: left > 0 OR event = Stopped -#[must_use] -pub fn a_started_peer(id: i32) -> peer::Peer { - let peer_id = peer::Id::new(id); - TorrentPeerBuilder::new() - .with_number_of_bytes_left(1) - .with_event_started() - .with_peer_id(*peer_id) - .into() -} diff --git a/packages/tracker-client/Cargo.toml b/packages/tracker-client/Cargo.toml index ef5cccaa2..0cd419471 100644 --- a/packages/tracker-client/Cargo.toml +++ b/packages/tracker-client/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with the generic tracker clients." -keywords = ["bittorrent", "client", "tracker"] +keywords = [ "bittorrent", "client", "tracker" ] license = "LGPL-3.0" name = "bittorrent-tracker-client" readme = "README.md" @@ -17,16 +17,16 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } hyper = "1" percent-encoding = "2" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } serde_bencode = "0" serde_bytes = "0" serde_repr = "0" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } @@ -34,4 +34,4 @@ tracing = "0" zerocopy = "0.7" [package.metadata.cargo-machete] -ignored = ["serde_bytes"] +ignored = [ "serde_bytes" ] diff --git a/packages/tracker-client/src/http/client/mod.rs b/packages/tracker-client/src/http/client/mod.rs index 3c904a7c9..50e979c79 100644 --- a/packages/tracker-client/src/http/client/mod.rs +++ b/packages/tracker-client/src/http/client/mod.rs @@ -23,8 +23,9 @@ pub enum Error { } /// HTTP Tracker Client +#[allow(clippy::struct_field_names)] pub struct Client { - client: reqwest::Client, + http_client: reqwest::Client, base_url: Url, key: Option<Key>, } @@ -49,7 +50,7 @@ impl Client { Ok(Self { base_url, - client, + http_client: client, key: None, }) } @@ -68,7 +69,7 @@ impl Client { Ok(Self { base_url, - client, + http_client: client, key: None, }) } @@ -84,7 +85,7 @@ impl Client { Ok(Self { base_url, - client, + http_client: client, key: Some(key), }) } @@ -159,7 +160,7 @@ impl Client { /// /// This method fails if there was an error while sending request. pub async fn get(&self, path: &str) -> Result<Response, Error> { - self.client + self.http_client .get(self.build_url(path)) .send() .await @@ -170,7 +171,7 @@ impl Client { /// /// This method fails if there was an error while sending request. pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Result<Response, Error> { - self.client + self.http_client .get(self.build_url(path)) .header(key, value) .send() diff --git a/packages/tracker-client/src/http/client/requests/announce.rs b/packages/tracker-client/src/http/client/requests/announce.rs index 7d20fbba8..87bdbad52 100644 --- a/packages/tracker-client/src/http/client/requests/announce.rs +++ b/packages/tracker-client/src/http/client/requests/announce.rs @@ -53,16 +53,16 @@ pub type BaseTenASCII = u64; pub type PortNumber = u16; pub enum Event { - //Started, - //Stopped, + Started, + Stopped, Completed, } impl fmt::Display for Event { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - //Event::Started => write!(f, "started"), - //Event::Stopped => write!(f, "stopped"), + Event::Started => write!(f, "started"), + Event::Stopped => write!(f, "stopped"), Event::Completed => write!(f, "completed"), } } @@ -102,7 +102,7 @@ impl QueryBuilder { peer_id: PeerId(*b"-qB00000000000000001").0, port: 17548, left: 0, - event: Some(Event::Completed), + event: Some(Event::Started), compact: Some(Compact::NotAccepted), }; Self { diff --git a/packages/tracker-client/src/udp/client.rs b/packages/tracker-client/src/udp/client.rs index 89a33726d..94c882d29 100644 --- a/packages/tracker-client/src/udp/client.rs +++ b/packages/tracker-client/src/udp/client.rs @@ -8,6 +8,7 @@ use aquatic_udp_protocol::{ConnectRequest, Request, Response, TransactionId}; use tokio::net::UdpSocket; use tokio::time; use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use torrust_tracker_primitives::service_binding::ServiceBinding; use zerocopy::network_endian::I32; use super::Error; @@ -230,10 +231,12 @@ impl UdpTrackerClient { /// /// # Errors /// -pub async fn check(remote_addr: &SocketAddr) -> Result<String, String> { +pub async fn check(service_binding: &ServiceBinding) -> Result<String, String> { + let remote_addr = service_binding.bind_address(); + tracing::debug!("Checking Service (detail): {remote_addr:?}."); - match UdpTrackerClient::new(*remote_addr, DEFAULT_TIMEOUT).await { + match UdpTrackerClient::new(remote_addr, DEFAULT_TIMEOUT).await { Ok(client) => { let connect_request = ConnectRequest { transaction_id: TransactionId(I32::new(123)), @@ -253,7 +256,7 @@ pub async fn check(remote_addr: &SocketAddr) -> Result<String, String> { } }; - let sleep = time::sleep(Duration::from_millis(2000)); + let sleep = time::sleep(Duration::from_secs(2)); tokio::pin!(sleep); tokio::select! { diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index ac1cee88d..fb864cde7 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -4,7 +4,7 @@ description = "A library with the core functionality needed to implement a BitTo documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["api", "bittorrent", "core", "library", "tracker"] +keywords = [ "api", "bittorrent", "core", "library", "tracker" ] license.workspace = true name = "bittorrent-tracker-core" publish.workspace = true @@ -16,22 +16,25 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" -chrono = { version = "0", default-features = false, features = ["clock"] } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +chrono = { version = "0", default-features = false, features = [ "clock" ] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } mockall = "0" r2d2 = "0" r2d2_mysql = "25" -r2d2_sqlite = { version = "0", features = ["bundled"] } +r2d2_sqlite = { version = "0", features = [ "bundled" ] } rand = "0" -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } +tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" [dev-dependencies] diff --git a/packages/tracker-core/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql b/packages/tracker-core/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql index 407ae4dd1..ab160bd75 100644 --- a/packages/tracker-core/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql +++ b/packages/tracker-core/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql @@ -4,6 +4,7 @@ CREATE TABLE info_hash VARCHAR(40) NOT NULL UNIQUE ); +# todo: rename to `torrent_metrics` CREATE TABLE IF NOT EXISTS torrents ( id integer PRIMARY KEY AUTO_INCREMENT, diff --git a/packages/tracker-core/migrations/mysql/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql b/packages/tracker-core/migrations/mysql/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql new file mode 100644 index 000000000..36f940cc3 --- /dev/null +++ b/packages/tracker-core/migrations/mysql/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql @@ -0,0 +1,6 @@ +CREATE TABLE + IF NOT EXISTS torrent_aggregate_metrics ( + id integer PRIMARY KEY AUTO_INCREMENT, + metric_name VARCHAR(50) NOT NULL UNIQUE, + value INTEGER DEFAULT 0 NOT NULL + ); \ No newline at end of file diff --git a/packages/tracker-core/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql b/packages/tracker-core/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql index bd451bf8b..c5bcad926 100644 --- a/packages/tracker-core/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql +++ b/packages/tracker-core/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql @@ -4,6 +4,7 @@ CREATE TABLE info_hash TEXT NOT NULL UNIQUE ); +# todo: rename to `torrent_metrics` CREATE TABLE IF NOT EXISTS torrents ( id INTEGER PRIMARY KEY AUTOINCREMENT, diff --git a/packages/tracker-core/migrations/sqlite/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql b/packages/tracker-core/migrations/sqlite/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql new file mode 100644 index 000000000..34166903c --- /dev/null +++ b/packages/tracker-core/migrations/sqlite/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql @@ -0,0 +1,6 @@ +CREATE TABLE + IF NOT EXISTS torrent_aggregate_metrics ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + metric_name TEXT NOT NULL UNIQUE, + value INTEGER DEFAULT 0 NOT NULL + ); \ No newline at end of file diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index b858cae6c..0b6bffd31 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -96,11 +96,12 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::{Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::core::AnnounceData; -use torrust_tracker_primitives::peer; +use torrust_tracker_primitives::{peer, NumberOfDownloads}; use super::torrent::repository::in_memory::InMemoryTorrentRepository; -use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use crate::databases; use crate::error::AnnounceError; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::whitelist::authorization::WhitelistAuthorization; /// Handles `announce` requests from `BitTorrent` clients. @@ -115,7 +116,7 @@ pub struct AnnounceHandler { in_memory_torrent_repository: Arc<InMemoryTorrentRepository>, /// Repository for persistent torrent data (database). - db_torrent_repository: Arc<DatabasePersistentTorrentRepository>, + db_downloads_metric_repository: Arc<DatabaseDownloadsMetricRepository>, } impl AnnounceHandler { @@ -125,13 +126,13 @@ impl AnnounceHandler { config: &Core, whitelist_authorization: &Arc<WhitelistAuthorization>, in_memory_torrent_repository: &Arc<InMemoryTorrentRepository>, - db_torrent_repository: &Arc<DatabasePersistentTorrentRepository>, + db_downloads_metric_repository: &Arc<DatabaseDownloadsMetricRepository>, ) -> Self { Self { whitelist_authorization: whitelist_authorization.clone(), config: config.clone(), in_memory_torrent_repository: in_memory_torrent_repository.clone(), - db_torrent_repository: db_torrent_repository.clone(), + db_downloads_metric_repository: db_downloads_metric_repository.clone(), } } @@ -154,7 +155,7 @@ impl AnnounceHandler { /// /// Returns an error if the tracker is running in `listed` mode and the /// torrent is not whitelisted. - pub async fn announce( + pub async fn handle_announcement( &self, info_hash: &InfoHash, peer: &mut peer::Peer, @@ -163,32 +164,39 @@ impl AnnounceHandler { ) -> Result<AnnounceData, AnnounceError> { self.whitelist_authorization.authorize(info_hash).await?; - let opt_persistent_torrent = if self.config.tracker_policy.persistent_torrent_completed_stat { - self.db_torrent_repository.load(info_hash)? - } else { - None - }; - peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); - let number_of_downloads_increased = - self.in_memory_torrent_repository - .upsert_peer(info_hash, peer, opt_persistent_torrent); + self.in_memory_torrent_repository + .handle_announcement(info_hash, peer, self.load_downloads_metric_if_needed(info_hash)?) + .await; - if self.config.tracker_policy.persistent_torrent_completed_stat && number_of_downloads_increased { - self.db_torrent_repository.increase_number_of_downloads(info_hash)?; - } + Ok(self.build_announce_data(info_hash, peer, peers_wanted).await) + } - Ok(self.build_announce_data(info_hash, peer, peers_wanted)) + /// Loads the number of downloads for a torrent if needed. + fn load_downloads_metric_if_needed( + &self, + info_hash: &InfoHash, + ) -> Result<Option<NumberOfDownloads>, databases::error::Error> { + if self.config.tracker_policy.persistent_torrent_completed_stat && !self.in_memory_torrent_repository.contains(info_hash) + { + Ok(self.db_downloads_metric_repository.load_torrent_downloads(info_hash)?) + } else { + Ok(None) + } } /// Builds the announce data for the peer making the request. - fn build_announce_data(&self, info_hash: &InfoHash, peer: &peer::Peer, peers_wanted: &PeersWanted) -> AnnounceData { + async fn build_announce_data(&self, info_hash: &InfoHash, peer: &peer::Peer, peers_wanted: &PeersWanted) -> AnnounceData { let peers = self .in_memory_torrent_repository - .get_peers_for(info_hash, peer, peers_wanted.limit()); + .get_peers_for(info_hash, peer, peers_wanted.limit()) + .await; - let swarm_metadata = self.in_memory_torrent_repository.get_swarm_metadata(info_hash); + let swarm_metadata = self + .in_memory_torrent_repository + .get_swarm_metadata_or_default(info_hash) + .await; AnnounceData { peers, @@ -450,7 +458,7 @@ mod tests { let mut peer = sample_peer(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -463,7 +471,7 @@ mod tests { let mut previously_announced_peer = sample_peer_1(); announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut previously_announced_peer, &peer_ip(), @@ -474,7 +482,7 @@ mod tests { let mut peer = sample_peer_2(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -487,7 +495,7 @@ mod tests { let mut previously_announced_peer_1 = sample_peer_1(); announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut previously_announced_peer_1, &peer_ip(), @@ -498,7 +506,7 @@ mod tests { let mut previously_announced_peer_2 = sample_peer_2(); announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut previously_announced_peer_2, &peer_ip(), @@ -509,7 +517,7 @@ mod tests { let mut peer = sample_peer_3(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::only(1)) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::only(1)) .await .unwrap(); @@ -534,7 +542,7 @@ mod tests { let mut peer = seeder(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -548,7 +556,7 @@ mod tests { let mut peer = leecher(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -562,7 +570,7 @@ mod tests { // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut started_peer, &peer_ip(), @@ -573,7 +581,7 @@ mod tests { let mut completed_peer = completed_peer(); let announce_data = announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut completed_peer, &peer_ip(), @@ -588,82 +596,6 @@ mod tests { } } - mod handling_torrent_persistence { - - use std::sync::Arc; - - use aquatic_udp_protocol::AnnounceEvent; - use torrust_tracker_test_helpers::configuration; - use torrust_tracker_torrent_repository::entry::EntrySync; - - use crate::announce_handler::tests::the_announce_handler::peer_ip; - use crate::announce_handler::{AnnounceHandler, PeersWanted}; - use crate::databases::setup::initialize_database; - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::manager::TorrentsManager; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use crate::whitelist::authorization::WhitelistAuthorization; - use crate::whitelist::repository::in_memory::InMemoryWhitelist; - - #[tokio::test] - async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { - let mut config = configuration::ephemeral_public(); - - config.core.tracker_policy.persistent_torrent_completed_stat = true; - - let database = initialize_database(&config.core); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let torrents_manager = Arc::new(TorrentsManager::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let info_hash = sample_info_hash(); - - let mut peer = sample_peer(); - - peer.event = AnnounceEvent::Started; - let announce_data = announce_handler - .announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) - .await - .unwrap(); - assert_eq!(announce_data.stats.downloaded, 0); - - peer.event = AnnounceEvent::Completed; - let announce_data = announce_handler - .announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) - .await - .unwrap(); - assert_eq!(announce_data.stats.downloaded, 1); - - // Remove the newly updated torrent from memory - let _unused = in_memory_torrent_repository.remove(&info_hash); - - torrents_manager.load_torrents_from_database().unwrap(); - - let torrent_entry = in_memory_torrent_repository - .get(&info_hash) - .expect("it should be able to get entry"); - - // It persists the number of completed peers. - assert_eq!(torrent_entry.get_swarm_metadata().downloaded, 1); - - // It does not persist the peers - assert!(torrent_entry.peers_is_empty()); - } - } - mod should_allow_the_client_peers_to_specified_the_number_of_peers_wanted { use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; diff --git a/packages/tracker-core/src/authentication/key/peer_key.rs b/packages/tracker-core/src/authentication/key/peer_key.rs index 41aba950b..ba648ad2f 100644 --- a/packages/tracker-core/src/authentication/key/peer_key.rs +++ b/packages/tracker-core/src/authentication/key/peer_key.rs @@ -13,7 +13,7 @@ use std::time::Duration; use derive_more::Display; use rand::distr::Alphanumeric; -use rand::{rng, Rng}; +use rand::{rng, RngExt}; use serde::{Deserialize, Serialize}; use thiserror::Error; use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 9f4d23802..93b8efd7e 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use torrust_tracker_configuration::Core; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::announce_handler::AnnounceHandler; use crate::authentication::handler::KeysHandler; @@ -10,14 +11,14 @@ use crate::authentication::service::AuthenticationService; use crate::databases::setup::initialize_database; use crate::databases::Database; use crate::scrape_handler::ScrapeHandler; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::torrent::manager::TorrentsManager; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use crate::whitelist; use crate::whitelist::authorization::WhitelistAuthorization; use crate::whitelist::manager::WhitelistManager; use crate::whitelist::repository::in_memory::InMemoryWhitelist; use crate::whitelist::setup::initialize_whitelist_manager; +use crate::{statistics, whitelist}; pub struct TrackerCoreContainer { pub core_config: Arc<Core>, @@ -30,13 +31,17 @@ pub struct TrackerCoreContainer { pub whitelist_authorization: Arc<whitelist::authorization::WhitelistAuthorization>, pub whitelist_manager: Arc<WhitelistManager>, pub in_memory_torrent_repository: Arc<InMemoryTorrentRepository>, - pub db_torrent_repository: Arc<DatabasePersistentTorrentRepository>, + pub db_downloads_metric_repository: Arc<DatabaseDownloadsMetricRepository>, pub torrents_manager: Arc<TorrentsManager>, + pub stats_repository: Arc<statistics::repository::Repository>, } impl TrackerCoreContainer { #[must_use] - pub fn initialize(core_config: &Arc<Core>) -> Self { + pub fn initialize_from( + core_config: &Arc<Core>, + swarm_coordination_registry_container: &Arc<SwarmCoordinationRegistryContainer>, + ) -> Self { let database = initialize_database(core_config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); @@ -48,20 +53,24 @@ impl TrackerCoreContainer { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new( + swarm_coordination_registry_container.swarms.clone(), + )); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( core_config, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); + let stats_repository = Arc::new(statistics::repository::Repository::new()); + let announce_handler = Arc::new(AnnounceHandler::new( core_config, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); @@ -77,8 +86,9 @@ impl TrackerCoreContainer { whitelist_authorization, whitelist_manager, in_memory_torrent_repository, - db_torrent_repository, + db_downloads_metric_repository, torrents_manager, + stats_repository, } } } diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index 2cedab2d7..6c849bb70 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -6,6 +6,9 @@ use sqlite::Sqlite; use super::error::Error; use super::Database; +/// Metric name in DB for the total number of downloads across all torrents. +const TORRENTS_DOWNLOADS_TOTAL: &str = "torrents_downloads_total"; + /// The database management system used by the tracker. /// /// Refer to: @@ -97,9 +100,14 @@ pub(crate) mod tests { // Persistent torrents (stats) + // Torrent metrics handling_torrent_persistence::it_should_save_and_load_persistent_torrents(driver); handling_torrent_persistence::it_should_load_all_persistent_torrents(driver); handling_torrent_persistence::it_should_increase_the_number_of_downloads_for_a_given_torrent(driver); + // Aggregate metrics for all torrents + handling_torrent_persistence::it_should_save_and_load_the_global_number_of_downloads(driver); + handling_torrent_persistence::it_should_load_the_global_number_of_downloads(driver); + handling_torrent_persistence::it_should_increase_the_global_number_of_downloads(driver); // Authentication keys (for private trackers) @@ -154,14 +162,16 @@ pub(crate) mod tests { use crate::databases::Database; use crate::test_helpers::tests::sample_info_hash; + // Metrics per torrent + pub fn it_should_save_and_load_persistent_torrents(driver: &Arc<Box<dyn Database>>) { let infohash = sample_info_hash(); let number_of_downloads = 1; - driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + driver.save_torrent_downloads(&infohash, number_of_downloads).unwrap(); - let number_of_downloads = driver.load_persistent_torrent(&infohash).unwrap().unwrap(); + let number_of_downloads = driver.load_torrent_downloads(&infohash).unwrap().unwrap(); assert_eq!(number_of_downloads, 1); } @@ -171,9 +181,9 @@ pub(crate) mod tests { let number_of_downloads = 1; - driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + driver.save_torrent_downloads(&infohash, number_of_downloads).unwrap(); - let torrents = driver.load_persistent_torrents().unwrap(); + let torrents = driver.load_all_torrents_downloads().unwrap(); assert_eq!(torrents.len(), 1); assert_eq!(torrents.get(&infohash), Some(number_of_downloads).as_ref()); @@ -184,11 +194,45 @@ pub(crate) mod tests { let number_of_downloads = 1; - driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + driver.save_torrent_downloads(&infohash, number_of_downloads).unwrap(); + + driver.increase_downloads_for_torrent(&infohash).unwrap(); + + let number_of_downloads = driver.load_torrent_downloads(&infohash).unwrap().unwrap(); + + assert_eq!(number_of_downloads, 2); + } + + // Aggregate metrics for all torrents + + pub fn it_should_save_and_load_the_global_number_of_downloads(driver: &Arc<Box<dyn Database>>) { + let number_of_downloads = 1; + + driver.save_global_downloads(number_of_downloads).unwrap(); + + let number_of_downloads = driver.load_global_downloads().unwrap().unwrap(); + + assert_eq!(number_of_downloads, 1); + } + + pub fn it_should_load_the_global_number_of_downloads(driver: &Arc<Box<dyn Database>>) { + let number_of_downloads = 1; + + driver.save_global_downloads(number_of_downloads).unwrap(); + + let number_of_downloads = driver.load_global_downloads().unwrap().unwrap(); + + assert_eq!(number_of_downloads, 1); + } + + pub fn it_should_increase_the_global_number_of_downloads(driver: &Arc<Box<dyn Database>>) { + let number_of_downloads = 1; + + driver.save_global_downloads(number_of_downloads).unwrap(); - driver.increase_number_of_downloads(&infohash).unwrap(); + driver.increase_global_downloads().unwrap(); - let number_of_downloads = driver.load_persistent_torrent(&infohash).unwrap().unwrap(); + let number_of_downloads = driver.load_global_downloads().unwrap().unwrap(); assert_eq!(number_of_downloads, 2); } diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index d07f061c2..da2f86ce8 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -13,9 +13,9 @@ use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; -use torrust_tracker_primitives::{PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, NumberOfDownloadsBTreeMap}; -use super::{Database, Driver, Error}; +use super::{Database, Driver, Error, TORRENTS_DOWNLOADS_TOTAL}; use crate::authentication::key::AUTH_KEY_LENGTH; use crate::authentication::{self, Key}; @@ -46,6 +46,27 @@ impl Mysql { Ok(Self { pool }) } + + fn load_torrent_aggregate_metric(&self, metric_name: &str) -> Result<Option<NumberOfDownloads>, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let query = conn.exec_first::<u32, _, _>( + "SELECT value FROM torrent_aggregate_metrics WHERE metric_name = :metric_name", + params! { "metric_name" => metric_name }, + ); + + let persistent_torrent = query?; + + Ok(persistent_torrent) + } + + fn save_torrent_aggregate_metric(&self, metric_name: &str, completed: NumberOfDownloads) -> Result<(), Error> { + const COMMAND : &str = "INSERT INTO torrent_aggregate_metrics (metric_name, value) VALUES (:metric_name, :completed) ON DUPLICATE KEY UPDATE value = VALUES(value)"; + + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + Ok(conn.exec_drop(COMMAND, params! { metric_name, completed })?) + } } impl Database for Mysql { @@ -66,6 +87,14 @@ impl Database for Mysql { );" .to_string(); + let create_torrent_aggregate_metrics_table = " + CREATE TABLE IF NOT EXISTS torrent_aggregate_metrics ( + id integer PRIMARY KEY AUTO_INCREMENT, + metric_name VARCHAR(50) NOT NULL UNIQUE, + value INTEGER DEFAULT 0 NOT NULL + );" + .to_string(); + let create_keys_table = format!( " CREATE TABLE IF NOT EXISTS `keys` ( @@ -82,6 +111,8 @@ impl Database for Mysql { conn.query_drop(&create_torrents_table) .expect("Could not create torrents table."); + conn.query_drop(&create_torrent_aggregate_metrics_table) + .expect("Could not create create_torrent_aggregate_metrics_table table."); conn.query_drop(&create_keys_table).expect("Could not create keys table."); conn.query_drop(&create_whitelist_table) .expect("Could not create whitelist table."); @@ -115,7 +146,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - fn load_persistent_torrents(&self) -> Result<PersistentTorrents, Error> { + fn load_all_torrents_downloads(&self) -> Result<NumberOfDownloadsBTreeMap, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let torrents = conn.query_map( @@ -130,7 +161,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_persistent_torrent`](crate::core::databases::Database::load_persistent_torrent). - fn load_persistent_torrent(&self, info_hash: &InfoHash) -> Result<Option<PersistentTorrent>, Error> { + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result<Option<NumberOfDownloads>, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let query = conn.exec_first::<u32, _, _>( @@ -144,7 +175,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). - fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + fn save_torrent_downloads(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { const COMMAND : &str = "INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)"; let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -155,7 +186,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::increase_number_of_downloads`](crate::core::databases::Database::increase_number_of_downloads). - fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error> { + fn increase_downloads_for_torrent(&self, info_hash: &InfoHash) -> Result<(), Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hash_str = info_hash.to_string(); @@ -168,6 +199,30 @@ impl Database for Mysql { Ok(()) } + /// Refer to [`databases::Database::load_global_number_of_downloads`](crate::core::databases::Database::load_global_number_of_downloads). + fn load_global_downloads(&self) -> Result<Option<NumberOfDownloads>, Error> { + self.load_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL) + } + + /// Refer to [`databases::Database::save_global_number_of_downloads`](crate::core::databases::Database::save_global_number_of_downloads). + fn save_global_downloads(&self, downloaded: NumberOfDownloads) -> Result<(), Error> { + self.save_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL, downloaded) + } + + /// Refer to [`databases::Database::increase_global_number_of_downloads`](crate::core::databases::Database::increase_global_number_of_downloads). + fn increase_global_downloads(&self) -> Result<(), Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let metric_name = TORRENTS_DOWNLOADS_TOTAL; + + conn.exec_drop( + "UPDATE torrent_aggregate_metrics SET value = value + 1 WHERE metric_name = :metric_name", + params! { metric_name }, + )?; + + Ok(()) + } + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). fn load_keys(&self) -> Result<Vec<authentication::PeerKey>, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index d36f24f8b..d08351aa8 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -13,9 +13,9 @@ use r2d2::Pool; use r2d2_sqlite::rusqlite::params; use r2d2_sqlite::rusqlite::types::Null; use r2d2_sqlite::SqliteConnectionManager; -use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; -use super::{Database, Driver, Error}; +use super::{Database, Driver, Error, TORRENTS_DOWNLOADS_TOTAL}; use crate::authentication::{self, Key}; const DRIVER: Driver = Driver::Sqlite3; @@ -49,6 +49,39 @@ impl Sqlite { Ok(Self { pool }) } + + fn load_torrent_aggregate_metric(&self, metric_name: &str) -> Result<Option<NumberOfDownloads>, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let mut stmt = conn.prepare("SELECT value FROM torrent_aggregate_metrics WHERE metric_name = ?")?; + + let mut rows = stmt.query([metric_name])?; + + let persistent_torrent = rows.next()?; + + Ok(persistent_torrent.map(|f| { + let value: i64 = f.get(0).unwrap(); + u32::try_from(value).unwrap() + })) + } + + fn save_torrent_aggregate_metric(&self, metric_name: &str, completed: NumberOfDownloads) -> Result<(), Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let insert = conn.execute( + "INSERT INTO torrent_aggregate_metrics (metric_name, value) VALUES (?1, ?2) ON CONFLICT(metric_name) DO UPDATE SET value = ?2", + [metric_name.to_string(), completed.to_string()], + )?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(()) + } + } } impl Database for Sqlite { @@ -69,6 +102,14 @@ impl Database for Sqlite { );" .to_string(); + let create_torrent_aggregate_metrics_table = " + CREATE TABLE IF NOT EXISTS torrent_aggregate_metrics ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + metric_name TEXT NOT NULL UNIQUE, + value INTEGER DEFAULT 0 NOT NULL + );" + .to_string(); + let create_keys_table = " CREATE TABLE IF NOT EXISTS keys ( id INTEGER PRIMARY KEY AUTOINCREMENT, @@ -82,6 +123,7 @@ impl Database for Sqlite { conn.execute(&create_whitelist_table, [])?; conn.execute(&create_keys_table, [])?; conn.execute(&create_torrents_table, [])?; + conn.execute(&create_torrent_aggregate_metrics_table, [])?; Ok(()) } @@ -110,7 +152,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - fn load_persistent_torrents(&self) -> Result<PersistentTorrents, Error> { + fn load_all_torrents_downloads(&self) -> Result<NumberOfDownloadsBTreeMap, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -126,7 +168,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_persistent_torrent`](crate::core::databases::Database::load_persistent_torrent). - fn load_persistent_torrent(&self, info_hash: &InfoHash) -> Result<Option<PersistentTorrent>, Error> { + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result<Option<NumberOfDownloads>, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT completed FROM torrents WHERE info_hash = ?")?; @@ -142,7 +184,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). - fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + fn save_torrent_downloads(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let insert = conn.execute( @@ -161,7 +203,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::increase_number_of_downloads`](crate::core::databases::Database::increase_number_of_downloads). - fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error> { + fn increase_downloads_for_torrent(&self, info_hash: &InfoHash) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let _ = conn.execute( @@ -172,6 +214,30 @@ impl Database for Sqlite { Ok(()) } + /// Refer to [`databases::Database::load_global_number_of_downloads`](crate::core::databases::Database::load_global_number_of_downloads). + fn load_global_downloads(&self) -> Result<Option<NumberOfDownloads>, Error> { + self.load_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL) + } + + /// Refer to [`databases::Database::save_global_number_of_downloads`](crate::core::databases::Database::save_global_number_of_downloads). + fn save_global_downloads(&self, downloaded: NumberOfDownloads) -> Result<(), Error> { + self.save_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL, downloaded) + } + + /// Refer to [`databases::Database::increase_global_number_of_downloads`](crate::core::databases::Database::increase_global_number_of_downloads). + fn increase_global_downloads(&self) -> Result<(), Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let metric_name = TORRENTS_DOWNLOADS_TOTAL; + + let _ = conn.execute( + "UPDATE torrent_aggregate_metrics SET value = value + 1 WHERE metric_name = ?", + [metric_name], + )?; + + Ok(()) + } + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). fn load_keys(&self) -> Result<Vec<authentication::PeerKey>, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index 2703ab8bf..c9d89769a 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -52,7 +52,7 @@ pub mod setup; use bittorrent_primitives::info_hash::InfoHash; use mockall::automock; -use torrust_tracker_primitives::{PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, NumberOfDownloadsBTreeMap}; use self::error::Error; use crate::authentication::{self, Key}; @@ -101,7 +101,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the metrics cannot be loaded. - fn load_persistent_torrents(&self) -> Result<PersistentTorrents, Error>; + fn load_all_torrents_downloads(&self) -> Result<NumberOfDownloadsBTreeMap, Error>; /// Loads torrent metrics data from the database for one torrent. /// @@ -110,7 +110,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the metrics cannot be loaded. - fn load_persistent_torrent(&self, info_hash: &InfoHash) -> Result<Option<PersistentTorrent>, Error>; + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result<Option<NumberOfDownloads>, Error>; /// Saves torrent metrics data into the database. /// @@ -124,23 +124,55 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the metrics cannot be saved. - fn save_persistent_torrent(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error>; + fn save_torrent_downloads(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error>; /// Increases the number of downloads for a given torrent. /// /// It does not create a new entry if the torrent is not found and it does /// not return an error. /// + /// # Context: Torrent Metrics + /// + /// # Arguments + /// + /// * `info_hash` - A reference to the torrent's info hash. + /// + /// # Errors + /// + /// Returns an [`Error`] if the query failed. + fn increase_downloads_for_torrent(&self, info_hash: &InfoHash) -> Result<(), Error>; + + /// Loads the total number of downloads for all torrents from the database. + /// + /// # Context: Torrent Metrics + /// + /// # Errors + /// + /// Returns an [`Error`] if the total downloads cannot be loaded. + fn load_global_downloads(&self) -> Result<Option<NumberOfDownloads>, Error>; + + /// Saves the total number of downloads for all torrents into the database. + /// + /// # Context: Torrent Metrics + /// /// # Arguments /// /// * `info_hash` - A reference to the torrent's info hash. + /// * `downloaded` - The number of times the torrent has been downloaded. + /// + /// # Errors + /// + /// Returns an [`Error`] if the total downloads cannot be saved. + fn save_global_downloads(&self, downloaded: NumberOfDownloads) -> Result<(), Error>; + + /// Increases the total number of downloads for all torrents. /// /// # Context: Torrent Metrics /// /// # Errors /// /// Returns an [`Error`] if the query failed. - fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error>; + fn increase_global_downloads(&self) -> Result<(), Error>; // Whitelist diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index 4a35e9a0b..866aa64c5 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -84,7 +84,7 @@ pub enum ScrapeError { /// /// This error is returned when an operation involves a torrent that is not /// present in the whitelist. -#[derive(thiserror::Error, Debug, Clone)] +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] pub enum WhitelistError { /// Indicates that the torrent identified by `info_hash` is not whitelisted. #[error("The torrent: {info_hash}, is not whitelisted, {location}")] diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index d9da9b9e7..5167abf51 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -124,6 +124,7 @@ pub mod container; pub mod databases; pub mod error; pub mod scrape_handler; +pub mod statistics; pub mod torrent; pub mod whitelist; @@ -156,6 +157,8 @@ pub(crate) type CurrentClock = clock::Working; #[allow(dead_code)] pub(crate) type CurrentClock = clock::Stopped; +pub const TRACKER_CORE_LOG_TARGET: &str = "TRACKER_CORE"; + #[cfg(test)] mod tests { mod the_tracker { @@ -200,7 +203,7 @@ mod tests { // Announce a "complete" peer for the torrent let mut complete_peer = complete_peer(); announce_handler - .announce( + .handle_announcement( &info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10)), @@ -212,7 +215,7 @@ mod tests { // Announce an "incomplete" peer for the torrent let mut incomplete_peer = incomplete_peer(); announce_handler - .announce( + .handle_announcement( &info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11)), @@ -222,16 +225,16 @@ mod tests { .unwrap(); // Scrape - let scrape_data = scrape_handler.scrape(&vec![info_hash]).await.unwrap(); + let scrape_data = scrape_handler.handle_scrape(&vec![info_hash]).await.unwrap(); - // The expected swarm metadata for the file + // The expected swarm metadata for the torrent let mut expected_scrape_data = ScrapeData::empty(); expected_scrape_data.add_file( &info_hash, SwarmMetadata { - complete: 0, // the "complete" peer does not count because it was not previously known - downloaded: 0, - incomplete: 1, // the "incomplete" peer we have just announced + complete: 1, // the "incomplete" announced + downloaded: 0, // the "complete" peer download does not count because it was not previously known + incomplete: 1, // the "incomplete" peer announced }, ); @@ -256,7 +259,7 @@ mod tests { let non_whitelisted_info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::<InfoHash>().unwrap(); // DevSkim: ignore DS173237 - let scrape_data = scrape_handler.scrape(&vec![non_whitelisted_info_hash]).await.unwrap(); + let scrape_data = scrape_handler.handle_scrape(&vec![non_whitelisted_info_hash]).await.unwrap(); // The expected zeroed swarm metadata for the file let mut expected_scrape_data = ScrapeData::empty(); diff --git a/packages/tracker-core/src/scrape_handler.rs b/packages/tracker-core/src/scrape_handler.rs index 93b25dea6..9c94a4e50 100644 --- a/packages/tracker-core/src/scrape_handler.rs +++ b/packages/tracker-core/src/scrape_handler.rs @@ -107,12 +107,16 @@ impl ScrapeHandler { /// # BEP Reference: /// /// [BEP 48: Scrape Protocol](https://www.bittorrent.org/beps/bep_0048.html) - pub async fn scrape(&self, info_hashes: &Vec<InfoHash>) -> Result<ScrapeData, ScrapeError> { + pub async fn handle_scrape(&self, info_hashes: &Vec<InfoHash>) -> Result<ScrapeData, ScrapeError> { let mut scrape_data = ScrapeData::empty(); for info_hash in info_hashes { let swarm_metadata = match self.whitelist_authorization.authorize(info_hash).await { - Ok(()) => self.in_memory_torrent_repository.get_swarm_metadata(info_hash), + Ok(()) => { + self.in_memory_torrent_repository + .get_swarm_metadata_or_default(info_hash) + .await + } Err(_) => SwarmMetadata::zeroed(), }; scrape_data.add_file(info_hash, swarm_metadata); @@ -154,7 +158,7 @@ mod tests { let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::<InfoHash>().unwrap()]; // DevSkim: ignore DS173237 - let scrape_data = scrape_handler.scrape(&info_hashes).await.unwrap(); + let scrape_data = scrape_handler.handle_scrape(&info_hashes).await.unwrap(); let mut expected_scrape_data = ScrapeData::empty(); @@ -172,7 +176,7 @@ mod tests { "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::<InfoHash>().unwrap(), // DevSkim: ignore DS173237 ]; - let scrape_data = scrape_handler.scrape(&info_hashes).await.unwrap(); + let scrape_data = scrape_handler.handle_scrape(&info_hashes).await.unwrap(); let mut expected_scrape_data = ScrapeData::empty(); expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs new file mode 100644 index 000000000..9a5182f25 --- /dev/null +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -0,0 +1,77 @@ +use std::sync::Arc; + +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use torrust_tracker_swarm_coordination_registry::event::Event; + +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; +use crate::statistics::repository::Repository; +use crate::statistics::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; + +pub async fn handle_event( + event: Event, + stats_repository: &Arc<Repository>, + db_downloads_metric_repository: &Arc<DatabaseDownloadsMetricRepository>, + persistent_torrent_completed_stat: bool, + now: DurationSinceUnixEpoch, +) { + match event { + // Torrent events + Event::TorrentAdded { info_hash, .. } => { + tracing::debug!(info_hash = ?info_hash, "Torrent added",); + } + Event::TorrentRemoved { info_hash } => { + tracing::debug!(info_hash = ?info_hash, "Torrent removed",); + } + + // Peer events + Event::PeerAdded { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer added", ); + } + Event::PeerRemoved { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer removed", ); + } + Event::PeerUpdated { + info_hash, + old_peer, + new_peer, + } => { + tracing::debug!(info_hash = ?info_hash, old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated"); + } + Event::PeerDownloadCompleted { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); + + // Increment the number of downloads for all the torrents in memory + let _unused = stats_repository + .increment_counter( + &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + &LabelSet::default(), + now, + ) + .await; + + if persistent_torrent_completed_stat { + // Increment the number of downloads for the torrent in the database + match db_downloads_metric_repository.increase_downloads_for_torrent(&info_hash) { + Ok(()) => { + tracing::debug!(info_hash = ?info_hash, "Number of torrent downloads increased"); + } + Err(err) => { + tracing::error!(info_hash = ?info_hash, error = ?err, "Failed to increase number of downloads for the torrent"); + } + } + + // Increment the global number of downloads (for all torrents) in the database + match db_downloads_metric_repository.increase_global_downloads() { + Ok(()) => { + tracing::debug!("Global number of downloads increased"); + } + Err(err) => { + tracing::error!(error = ?err, "Failed to increase global number of downloads"); + } + } + } + } + } +} diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs new file mode 100644 index 000000000..8d2d74c71 --- /dev/null +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -0,0 +1,80 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; +use torrust_tracker_swarm_coordination_registry::event::receiver::Receiver; + +use super::handler::handle_event; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; +use crate::statistics::repository::Repository; +use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; + +#[must_use] +pub fn run_event_listener( + receiver: Receiver, + cancellation_token: CancellationToken, + repository: &Arc<Repository>, + db_downloads_metric_repository: &Arc<DatabaseDownloadsMetricRepository>, + persistent_torrent_completed_stat: bool, +) -> JoinHandle<()> { + let stats_repository = repository.clone(); + let db_downloads_metric_repository: Arc<DatabaseDownloadsMetricRepository> = db_downloads_metric_repository.clone(); + + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting tracker core event listener"); + + tokio::spawn(async move { + dispatch_events( + receiver, + cancellation_token, + stats_repository, + db_downloads_metric_repository, + persistent_torrent_completed_stat, + ) + .await; + + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Tracker core listener finished"); + }) +} + +async fn dispatch_events( + mut receiver: Receiver, + cancellation_token: CancellationToken, + stats_repository: Arc<Repository>, + db_downloads_metric_repository: Arc<DatabaseDownloadsMetricRepository>, + persistent_torrent_completed_stat: bool, +) { + loop { + tokio::select! { + biased; + + () = cancellation_token.cancelled() => { + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Received cancellation request, shutting down tracker core event listener."); + break; + } + + result = receiver.recv() => { + match result { + Ok(event) => handle_event( + event, + &stats_repository, + &db_downloads_metric_repository, + persistent_torrent_completed_stat, + CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Tracker core event receiver closed"); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: TRACKER_CORE_LOG_TARGET, "Tracker core event receiver lagged by {} events", n); + } + } + } + } + } + } + } +} diff --git a/packages/tracker-core/src/statistics/event/mod.rs b/packages/tracker-core/src/statistics/event/mod.rs new file mode 100644 index 000000000..dae683398 --- /dev/null +++ b/packages/tracker-core/src/statistics/event/mod.rs @@ -0,0 +1,2 @@ +pub mod handler; +pub mod listener; diff --git a/packages/tracker-core/src/statistics/metrics.rs b/packages/tracker-core/src/statistics/metrics.rs new file mode 100644 index 000000000..a5caaf1cf --- /dev/null +++ b/packages/tracker-core/src/statistics/metrics.rs @@ -0,0 +1,76 @@ +use serde::Serialize; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +/// Metrics collected by the torrent repository. +#[derive(Debug, Clone, PartialEq, Default, Serialize)] +pub struct Metrics { + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increment_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increment_counter(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: u64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_counter(metric_name, labels, value, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increment_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increment_gauge(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn decrement_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.decrement_gauge(metric_name, labels, now) + } +} diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs new file mode 100644 index 000000000..fdb8e8fd4 --- /dev/null +++ b/packages/tracker-core/src/statistics/mod.rs @@ -0,0 +1,28 @@ +pub mod event; +pub mod metrics; +pub mod persisted; +pub mod repository; + +use metrics::Metrics; +use torrust_tracker_metrics::metric::description::MetricDescription; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::unit::Unit; + +// Torrent metrics + +const TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "tracker_core_persistent_torrents_downloads_total"; + +#[must_use] +pub fn describe_metrics() -> Metrics { + let mut metrics = Metrics::default(); + + // Torrent metrics + + metrics.metric_collection.describe_counter( + &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("The total number of torrent downloads (persisted).")), + ); + + metrics +} diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/statistics/persisted/downloads.rs similarity index 61% rename from packages/tracker-core/src/torrent/repository/persisted.rs rename to packages/tracker-core/src/statistics/persisted/downloads.rs index dec571baf..6248bdc73 100644 --- a/packages/tracker-core/src/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/statistics/persisted/downloads.rs @@ -2,12 +2,12 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::{PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, NumberOfDownloadsBTreeMap}; use crate::databases::error::Error; use crate::databases::Database; -/// Torrent repository implementation that persists torrent metrics in a database. +/// It persists torrent metrics in a database. /// /// This repository persists only a subset of the torrent data: the torrent /// metrics, specifically the number of downloads (or completed counts) for each @@ -19,7 +19,7 @@ use crate::databases::Database; /// /// Not all in-memory torrent data is persisted; only the aggregate metrics are /// stored. -pub struct DatabasePersistentTorrentRepository { +pub struct DatabaseDownloadsMetricRepository { /// A shared reference to the database driver implementation. /// /// The driver must implement the [`Database`] trait. This allows for @@ -28,7 +28,7 @@ pub struct DatabasePersistentTorrentRepository { database: Arc<Box<dyn Database>>, } -impl DatabasePersistentTorrentRepository { +impl DatabaseDownloadsMetricRepository { /// Creates a new instance of `DatabasePersistentTorrentRepository`. /// /// # Arguments @@ -41,12 +41,14 @@ impl DatabasePersistentTorrentRepository { /// A new `DatabasePersistentTorrentRepository` instance with a cloned /// reference to the provided database. #[must_use] - pub fn new(database: &Arc<Box<dyn Database>>) -> DatabasePersistentTorrentRepository { + pub fn new(database: &Arc<Box<dyn Database>>) -> DatabaseDownloadsMetricRepository { Self { database: database.clone(), } } + // Single Torrent Metrics + /// Increases the number of downloads for a given torrent. /// /// If the torrent is not found, it creates a new entry. @@ -58,12 +60,12 @@ impl DatabasePersistentTorrentRepository { /// # Errors /// /// Returns an [`Error`] if the database operation fails. - pub(crate) fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error> { - let torrent = self.load(info_hash)?; + pub(crate) fn increase_downloads_for_torrent(&self, info_hash: &InfoHash) -> Result<(), Error> { + let torrent = self.load_torrent_downloads(info_hash)?; match torrent { - Some(_number_of_downloads) => self.database.increase_number_of_downloads(info_hash), - None => self.save(info_hash, 1), + Some(_number_of_downloads) => self.database.increase_downloads_for_torrent(info_hash), + None => self.save_torrent_downloads(info_hash, 1), } } @@ -75,8 +77,8 @@ impl DatabasePersistentTorrentRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - pub(crate) fn load_all(&self) -> Result<PersistentTorrents, Error> { - self.database.load_persistent_torrents() + pub(crate) fn load_all_torrents_downloads(&self) -> Result<NumberOfDownloadsBTreeMap, Error> { + self.database.load_all_torrents_downloads() } /// Loads one persistent torrent metrics from the database. @@ -87,8 +89,8 @@ impl DatabasePersistentTorrentRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - pub(crate) fn load(&self, info_hash: &InfoHash) -> Result<Option<PersistentTorrent>, Error> { - self.database.load_persistent_torrent(info_hash) + pub(crate) fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result<Option<NumberOfDownloads>, Error> { + self.database.load_torrent_downloads(info_hash) } /// Saves the persistent torrent metric into the database. @@ -104,24 +106,51 @@ impl DatabasePersistentTorrentRepository { /// # Errors /// /// Returns an [`Error`] if the database operation fails. - pub(crate) fn save(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { - self.database.save_persistent_torrent(info_hash, downloaded) + pub(crate) fn save_torrent_downloads(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { + self.database.save_torrent_downloads(info_hash, downloaded) + } + + // Aggregate Metrics + + /// Increases the global number of downloads for all torrent. + /// + /// If the metric is not found, it creates it. + /// + /// # Errors + /// + /// Returns an [`Error`] if the database operation fails. + pub(crate) fn increase_global_downloads(&self) -> Result<(), Error> { + let torrent = self.database.load_global_downloads()?; + + match torrent { + Some(_number_of_downloads) => self.database.increase_global_downloads(), + None => self.database.save_global_downloads(1), + } + } + + /// Loads the global number of downloads for all torrents from the database. + /// + /// # Errors + /// + /// Returns an [`Error`] if the underlying database query fails. + pub(crate) fn load_global_downloads(&self) -> Result<Option<NumberOfDownloads>, Error> { + self.database.load_global_downloads() } } #[cfg(test)] mod tests { - use torrust_tracker_primitives::PersistentTorrents; + use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; - use super::DatabasePersistentTorrentRepository; + use super::DatabaseDownloadsMetricRepository; use crate::databases::setup::initialize_database; use crate::test_helpers::tests::{ephemeral_configuration, sample_info_hash, sample_info_hash_one, sample_info_hash_two}; - fn initialize_db_persistent_torrent_repository() -> DatabasePersistentTorrentRepository { + fn initialize_db_persistent_torrent_repository() -> DatabaseDownloadsMetricRepository { let config = ephemeral_configuration(); let database = initialize_database(&config); - DatabasePersistentTorrentRepository::new(&database) + DatabaseDownloadsMetricRepository::new(&database) } #[test] @@ -130,9 +159,9 @@ mod tests { let infohash = sample_info_hash(); - repository.save(&infohash, 1).unwrap(); + repository.save_torrent_downloads(&infohash, 1).unwrap(); - let torrents = repository.load_all().unwrap(); + let torrents = repository.load_all_torrents_downloads().unwrap(); assert_eq!(torrents.get(&infohash), Some(1).as_ref()); } @@ -143,9 +172,9 @@ mod tests { let infohash = sample_info_hash(); - repository.increase_number_of_downloads(&infohash).unwrap(); + repository.increase_downloads_for_torrent(&infohash).unwrap(); - let torrents = repository.load_all().unwrap(); + let torrents = repository.load_all_torrents_downloads().unwrap(); assert_eq!(torrents.get(&infohash), Some(1).as_ref()); } @@ -157,12 +186,12 @@ mod tests { let infohash_one = sample_info_hash_one(); let infohash_two = sample_info_hash_two(); - repository.save(&infohash_one, 1).unwrap(); - repository.save(&infohash_two, 2).unwrap(); + repository.save_torrent_downloads(&infohash_one, 1).unwrap(); + repository.save_torrent_downloads(&infohash_two, 2).unwrap(); - let torrents = repository.load_all().unwrap(); + let torrents = repository.load_all_torrents_downloads().unwrap(); - let mut expected_torrents = PersistentTorrents::new(); + let mut expected_torrents = NumberOfDownloadsBTreeMap::new(); expected_torrents.insert(infohash_one, 1); expected_torrents.insert(infohash_two, 2); diff --git a/packages/tracker-core/src/statistics/persisted/mod.rs b/packages/tracker-core/src/statistics/persisted/mod.rs new file mode 100644 index 000000000..86c28370d --- /dev/null +++ b/packages/tracker-core/src/statistics/persisted/mod.rs @@ -0,0 +1,59 @@ +pub mod downloads; + +use std::sync::Arc; + +use thiserror::Error; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::{metric_collection, metric_name}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::repository::Repository; +use super::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; +use crate::databases; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; + +/// Loads persisted metrics from the database and sets them in the stats repository. +/// +/// # Errors +/// +/// This function will return an error if the database query fails or if the +/// metric collection fails to set the initial metric values. +pub async fn load_persisted_metrics( + stats_repository: &Arc<Repository>, + db_downloads_metric_repository: &Arc<DatabaseDownloadsMetricRepository>, + now: DurationSinceUnixEpoch, +) -> Result<(), Error> { + if let Some(downloads) = db_downloads_metric_repository.load_global_downloads()? { + stats_repository + .set_counter( + &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + &LabelSet::default(), + u64::from(downloads), + now, + ) + .await?; + } + + Ok(()) +} + +#[derive(Error, Debug, Clone)] +pub enum Error { + #[error("Database error: {err}")] + DatabaseError { err: databases::error::Error }, + + #[error("Metrics error: {err}")] + MetricsError { err: metric_collection::Error }, +} + +impl From<databases::error::Error> for Error { + fn from(err: databases::error::Error) -> Self { + Self::DatabaseError { err } + } +} + +impl From<metric_collection::Error> for Error { + fn from(err: metric_collection::Error) -> Self { + Self::MetricsError { err } + } +} diff --git a/packages/tracker-core/src/statistics/repository.rs b/packages/tracker-core/src/statistics/repository.rs new file mode 100644 index 000000000..21b1da7f2 --- /dev/null +++ b/packages/tracker-core/src/statistics/repository.rs @@ -0,0 +1,176 @@ +use std::sync::Arc; + +use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::Error; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::metrics::Metrics; +use super::{describe_metrics, TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL}; + +/// A repository for the torrent repository metrics. +#[derive(Clone)] +pub struct Repository { + pub stats: Arc<RwLock<Metrics>>, +} + +impl Default for Repository { + fn default() -> Self { + Self::new() + } +} + +impl Repository { + #[must_use] + pub fn new() -> Self { + let stats = Arc::new(RwLock::new(describe_metrics())); + + Self { stats } + } + + pub async fn get_metrics(&self) -> RwLockReadGuard<'_, Metrics> { + self.stats.read().await + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the counter. + pub async fn increment_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.increment_counter(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to increment the counter: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the counter. + pub async fn set_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + value: u64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.set_counter(metric_name, labels, value, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to set the counter: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// set the gauge. + pub async fn set_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.set_gauge(metric_name, labels, value, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to set the gauge: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the gauge. + pub async fn increment_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.increment_gauge(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to increment the gauge: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// decrement the gauge. + pub async fn decrement_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.decrement_gauge(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to decrement the gauge: {}", err), + } + + result + } + + /// Get the total number of torrent downloads. + /// + /// The value is persisted in database if persistence for downloads metrics is enabled. + pub async fn get_torrents_downloads_total(&self) -> u64 { + let metrics = self.get_metrics().await; + + let downloads = metrics.metric_collection.get_counter_value( + &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + &LabelSet::default(), + ); + + if let Some(downloads) = downloads { + downloads.value() + } else { + 0 + } + } +} diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index 79904dec2..bf21e6f94 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -7,7 +7,7 @@ pub(crate) mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; - use rand::Rng; + use rand::RngExt; use torrust_tracker_configuration::Configuration; #[cfg(test)] use torrust_tracker_configuration::Core; @@ -19,8 +19,8 @@ pub(crate) mod tests { use crate::announce_handler::AnnounceHandler; use crate::databases::setup::initialize_database; use crate::scrape_handler::ScrapeHandler; + use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::whitelist::repository::in_memory::InMemoryWhitelist; use crate::whitelist::{self}; @@ -64,16 +64,6 @@ pub(crate) mod tests { .expect("String should be a valid info hash") } - /// # Panics - /// - /// Will panic if the string representation of the info hash is not a valid info hash. - #[must_use] - pub fn sample_info_hash_alphabetically_ordered_after_sample_info_hash_one() -> InfoHash { - "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1" // DevSkim: ignore DS173237 - .parse::<InfoHash>() - .expect("String should be a valid info hash") - } - /// Sample peer whose state is not relevant for the tests. #[must_use] pub fn sample_peer() -> Peer { @@ -88,32 +78,6 @@ pub(crate) mod tests { } } - #[must_use] - pub fn sample_peer_one() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000001"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } - } - - #[must_use] - pub fn sample_peer_two() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000002"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } - } - #[must_use] pub fn seeder() -> Peer { complete_peer() @@ -140,7 +104,7 @@ pub(crate) mod tests { #[must_use] pub fn complete_peer() -> Peer { Peer { - peer_id: PeerId(*b"-qB00000000000000000"), + peer_id: PeerId(*b"-qB00000000000000001"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), @@ -154,8 +118,8 @@ pub(crate) mod tests { #[must_use] pub fn incomplete_peer() -> Peer { Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + peer_id: PeerId(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), downloaded: NumberOfBytes::new(0), @@ -173,13 +137,13 @@ pub(crate) mod tests { &in_memory_whitelist.clone(), )); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 792bb024d..5acc27980 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -4,9 +4,10 @@ use std::time::Duration; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Core; +use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::in_memory::InMemoryTorrentRepository; -use super::repository::persisted::DatabasePersistentTorrentRepository; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::{databases, CurrentClock}; /// The `TorrentsManager` is responsible for managing torrent entries by @@ -28,9 +29,8 @@ pub struct TorrentsManager { /// The in-memory torrents repository. in_memory_torrent_repository: Arc<InMemoryTorrentRepository>, - /// The persistent torrents repository. - #[allow(dead_code)] - db_torrent_repository: Arc<DatabasePersistentTorrentRepository>, + /// The download metrics repository. + db_downloads_metric_repository: Arc<DatabaseDownloadsMetricRepository>, } impl TorrentsManager { @@ -41,7 +41,7 @@ impl TorrentsManager { /// * `config` - A reference to the tracker configuration. /// * `in_memory_torrent_repository` - A shared reference to the in-memory /// repository of torrents. - /// * `db_torrent_repository` - A shared reference to the persistent + /// * `db_downloads_metric_repository` - A shared reference to the persistent /// repository for torrent metrics. /// /// # Returns @@ -51,16 +51,16 @@ impl TorrentsManager { pub fn new( config: &Core, in_memory_torrent_repository: &Arc<InMemoryTorrentRepository>, - db_torrent_repository: &Arc<DatabasePersistentTorrentRepository>, + db_downloads_metric_repository: &Arc<DatabaseDownloadsMetricRepository>, ) -> Self { Self { config: config.clone(), in_memory_torrent_repository: in_memory_torrent_repository.clone(), - db_torrent_repository: db_torrent_repository.clone(), + db_downloads_metric_repository: db_downloads_metric_repository.clone(), } } - /// Loads torrents from the persistent database into the in-memory repository. + /// Loads torrents from the database into the in-memory repository. /// /// This function retrieves the list of persistent torrent entries (which /// include only the aggregate metrics, not the detailed peer lists) from @@ -70,9 +70,8 @@ impl TorrentsManager { /// /// Returns a `databases::error::Error` if unable to load the persistent /// torrent data. - #[allow(dead_code)] - pub(crate) fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { - let persistent_torrents = self.db_torrent_repository.load_all()?; + pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { + let persistent_torrents = self.db_downloads_metric_repository.load_all_torrents_downloads()?; self.in_memory_torrent_repository.import_persistent(&persistent_torrents); @@ -91,17 +90,56 @@ impl TorrentsManager { /// 2. If the tracker is configured to remove peerless torrents /// (`remove_peerless_torrents` is set), it removes entire torrent /// entries that have no active peers. - pub fn cleanup_torrents(&self) { - let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) - .unwrap_or_default(); + pub async fn cleanup_torrents(&self) { + self.log_aggregate_swarm_metadata().await; - self.in_memory_torrent_repository.remove_inactive_peers(current_cutoff); + self.remove_inactive_peers().await; + self.log_aggregate_swarm_metadata().await; + + self.remove_peerless_torrents().await; + + self.log_aggregate_swarm_metadata().await; + } + + async fn remove_inactive_peers(&self) { + self.in_memory_torrent_repository + .remove_inactive_peers(self.current_cutoff()) + .await; + } + + fn current_cutoff(&self) -> DurationSinceUnixEpoch { + CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))).unwrap_or_default() + } + + async fn remove_peerless_torrents(&self) { if self.config.tracker_policy.remove_peerless_torrents { self.in_memory_torrent_repository - .remove_peerless_torrents(&self.config.tracker_policy); + .remove_peerless_torrents(&self.config.tracker_policy) + .await; } } + + async fn log_aggregate_swarm_metadata(&self) { + // Pre-calculated data + let aggregate_swarm_metadata = self.in_memory_torrent_repository.get_aggregate_swarm_metadata().await; + + tracing::info!(name: "pre_calculated_aggregate_swarm_metadata", + torrents = aggregate_swarm_metadata.total_torrents, + downloads = aggregate_swarm_metadata.total_downloaded, + seeders = aggregate_swarm_metadata.total_complete, + leechers = aggregate_swarm_metadata.total_incomplete, + ); + + // Hot data (iterating over data structures) + let peerless_torrents = self.in_memory_torrent_repository.count_peerless_torrents().await; + let peers = self.in_memory_torrent_repository.count_peers().await; + + tracing::info!(name: "hot_aggregate_swarm_metadata", + peerless_torrents = peerless_torrents, + peers = peers, + ); + } } #[cfg(test)] @@ -110,9 +148,9 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Core; - use torrust_tracker_torrent_repository::entry::EntrySync; + use torrust_tracker_swarm_coordination_registry::Registry; - use super::{DatabasePersistentTorrentRepository, TorrentsManager}; + use super::{DatabaseDownloadsMetricRepository, TorrentsManager}; use crate::databases::setup::initialize_database; use crate::test_helpers::tests::{ephemeral_configuration, sample_info_hash}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -120,7 +158,7 @@ mod tests { struct TorrentsManagerDeps { config: Arc<Core>, in_memory_torrent_repository: Arc<InMemoryTorrentRepository>, - database_persistent_torrent_repository: Arc<DatabasePersistentTorrentRepository>, + database_persistent_torrent_repository: Arc<DatabaseDownloadsMetricRepository>, } fn initialize_torrents_manager() -> (Arc<TorrentsManager>, Arc<TorrentsManagerDeps>) { @@ -129,9 +167,10 @@ mod tests { } fn initialize_torrents_manager_with(config: Core) -> (Arc<TorrentsManager>, Arc<TorrentsManagerDeps>) { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let swarms = Arc::new(Registry::default()); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms)); let database = initialize_database(&config); - let database_persistent_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let database_persistent_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( &config, @@ -149,13 +188,16 @@ mod tests { ) } - #[test] - fn it_should_load_the_numbers_of_downloads_for_all_torrents_from_the_database() { + #[tokio::test] + async fn it_should_load_the_numbers_of_downloads_for_all_torrents_from_the_database() { let (torrents_manager, services) = initialize_torrents_manager(); let infohash = sample_info_hash(); - services.database_persistent_torrent_repository.save(&infohash, 1).unwrap(); + services + .database_persistent_torrent_repository + .save_torrent_downloads(&infohash, 1) + .unwrap(); torrents_manager.load_torrents_from_database().unwrap(); @@ -164,7 +206,9 @@ mod tests { .in_memory_torrent_repository .get(&infohash) .unwrap() - .get_swarm_metadata() + .lock() + .await + .metadata() .downloaded, 1 ); @@ -184,8 +228,8 @@ mod tests { use crate::torrent::manager::tests::{initialize_torrents_manager, initialize_torrents_manager_with}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - #[test] - fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { + #[tokio::test] + async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { let (torrents_manager, services) = initialize_torrents_manager(); let infohash = sample_info_hash(); @@ -195,7 +239,10 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = services.in_memory_torrent_repository.upsert_peer(&infohash, &peer, None); + services + .in_memory_torrent_repository + .handle_announcement(&infohash, &peer, None) + .await; // Simulate the time has passed 1 second more than the max peer timeout. clock::Stopped::local_add(&Duration::from_secs( @@ -203,23 +250,25 @@ mod tests { )) .unwrap(); - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; assert!(services.in_memory_torrent_repository.get(&infohash).is_none()); } - fn add_a_peerless_torrent(infohash: &InfoHash, in_memory_torrent_repository: &Arc<InMemoryTorrentRepository>) { + async fn add_a_peerless_torrent(infohash: &InfoHash, in_memory_torrent_repository: &Arc<InMemoryTorrentRepository>) { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(infohash, &peer, None); + in_memory_torrent_repository.handle_announcement(infohash, &peer, None).await; // Remove the peer. The torrent is now peerless. - in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + in_memory_torrent_repository + .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .await; } - #[test] - fn it_should_remove_torrents_that_have_no_peers_when_it_is_configured_to_do_so() { + #[tokio::test] + async fn it_should_remove_torrents_that_have_no_peers_when_it_is_configured_to_do_so() { let mut config = ephemeral_configuration(); config.tracker_policy.remove_peerless_torrents = true; @@ -227,15 +276,15 @@ mod tests { let infohash = sample_info_hash(); - add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository); + add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository).await; - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; assert!(services.in_memory_torrent_repository.get(&infohash).is_none()); } - #[test] - fn it_should_retain_peerless_torrents_when_it_is_configured_to_do_so() { + #[tokio::test] + async fn it_should_retain_peerless_torrents_when_it_is_configured_to_do_so() { let mut config = ephemeral_configuration(); config.tracker_policy.remove_peerless_torrents = false; @@ -243,9 +292,9 @@ mod tests { let infohash = sample_info_hash(); - add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository); + add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository).await; - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; assert!(services.in_memory_torrent_repository.get(&infohash).is_some()); } diff --git a/packages/tracker-core/src/torrent/mod.rs b/packages/tracker-core/src/torrent/mod.rs index 8ee8fa6d3..01d33b893 100644 --- a/packages/tracker-core/src/torrent/mod.rs +++ b/packages/tracker-core/src/torrent/mod.rs @@ -166,16 +166,3 @@ pub mod manager; pub mod repository; pub mod services; - -#[cfg(test)] -use torrust_tracker_torrent_repository::EntryMutexStd; -use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; - -/// Alias for the primary torrent collection type, implemented as a skip map -/// wrapped in a mutex. This type is used internally by the tracker to manage -/// and access torrent entries. -pub(crate) type Torrents = TorrentsSkipMapMutexStd; - -/// Alias for a single torrent entry. -#[cfg(test)] -pub(crate) type TorrentEntry = EntryMutexStd; diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index e09bede8e..e50a82933 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -5,13 +5,9 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use torrust_tracker_torrent_repository::entry::EntrySync; -use torrust_tracker_torrent_repository::repository::Repository; -use torrust_tracker_torrent_repository::EntryMutexStd; - -use crate::torrent::Torrents; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; +use torrust_tracker_swarm_coordination_registry::{CoordinatorHandle, Registry}; /// In-memory repository for torrent entries. /// @@ -22,13 +18,18 @@ use crate::torrent::Torrents; /// /// Multiple implementations were considered, and the chosen implementation is /// used in production. Other implementations are kept for reference. -#[derive(Debug, Default)] +#[derive(Default)] pub struct InMemoryTorrentRepository { - /// The underlying in-memory data structure that stores torrent entries. - torrents: Arc<Torrents>, + /// The underlying in-memory data structure that stores swarms data. + swarms: Arc<Registry>, } impl InMemoryTorrentRepository { + #[must_use] + pub fn new(swarms: Arc<Registry>) -> Self { + Self { swarms } + } + /// Inserts or updates a peer in the torrent entry corresponding to the /// given infohash. /// @@ -43,33 +44,20 @@ impl InMemoryTorrentRepository { /// # Returns /// /// `true` if the peer stats were updated. - #[must_use] - pub fn upsert_peer( + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. + pub async fn handle_announcement( &self, info_hash: &InfoHash, peer: &peer::Peer, - opt_persistent_torrent: Option<PersistentTorrent>, - ) -> bool { - self.torrents.upsert_peer(info_hash, peer, opt_persistent_torrent) - } - - /// Removes a torrent entry from the repository. - /// - /// This method is only available in tests. It removes the torrent entry - /// associated with the given info hash and returns the removed entry if it - /// existed. - /// - /// # Arguments - /// - /// * `key` - The info hash of the torrent to remove. - /// - /// # Returns - /// - /// An `Option` containing the removed torrent entry if it existed. - #[cfg(test)] - #[must_use] - pub(crate) fn remove(&self, key: &InfoHash) -> Option<EntryMutexStd> { - self.torrents.remove(key) + opt_persistent_torrent: Option<NumberOfDownloads>, + ) { + self.swarms + .handle_announcement(info_hash, peer, opt_persistent_torrent) + .await + .expect("Failed to upsert the peer in swarms"); } /// Removes inactive peers from all torrent entries. @@ -81,8 +69,15 @@ impl InMemoryTorrentRepository { /// /// * `current_cutoff` - The cutoff timestamp; peers not updated since this /// time will be removed. - pub(crate) fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - self.torrents.remove_inactive_peers(current_cutoff); + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. + pub(crate) async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.swarms + .remove_inactive_peers(current_cutoff) + .await + .expect("Failed to remove inactive peers from swarms"); } /// Removes torrent entries that have no active peers. @@ -94,8 +89,15 @@ impl InMemoryTorrentRepository { /// /// * `policy` - The tracker policy containing the configuration for /// removing peerless torrents. - pub(crate) fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - self.torrents.remove_peerless_torrents(policy); + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. + pub(crate) async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + self.swarms + .remove_peerless_torrents(policy) + .await + .expect("Failed to remove peerless torrents from swarms"); } /// Retrieves a torrent entry by its infohash. @@ -108,8 +110,8 @@ impl InMemoryTorrentRepository { /// /// An `Option` containing the torrent entry if found. #[must_use] - pub(crate) fn get(&self, key: &InfoHash) -> Option<EntryMutexStd> { - self.torrents.get(key) + pub(crate) fn get(&self, key: &InfoHash) -> Option<CoordinatorHandle> { + self.swarms.get(key) } /// Retrieves a paginated list of torrent entries. @@ -124,10 +126,10 @@ impl InMemoryTorrentRepository { /// /// # Returns /// - /// A vector of `(InfoHash, EntryMutexStd)` tuples. + /// A vector of `(InfoHash, TorrentEntry)` tuples. #[must_use] - pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { - self.torrents.get_paginated(pagination) + pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, CoordinatorHandle)> { + self.swarms.get_paginated(pagination) } /// Retrieves swarm metadata for a given torrent. @@ -143,12 +145,16 @@ impl InMemoryTorrentRepository { /// # Returns /// /// A `SwarmMetadata` struct containing the aggregated torrent data. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error.s #[must_use] - pub(crate) fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { - match self.torrents.get(info_hash) { - Some(torrent_entry) => torrent_entry.get_swarm_metadata(), - None => SwarmMetadata::zeroed(), - } + pub(crate) async fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { + self.swarms + .get_swarm_metadata_or_default(info_hash) + .await + .expect("Failed to get swarm metadata") } /// Retrieves torrent peers for a given torrent and client, excluding the @@ -168,12 +174,16 @@ impl InMemoryTorrentRepository { /// /// A vector of peers (wrapped in `Arc`) representing the active peers for /// the torrent, excluding the requesting client. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. #[must_use] - pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec<Arc<peer::Peer>> { - match self.torrents.get(info_hash) { - None => vec![], - Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(max(limit, TORRENT_PEERS_LIMIT))), - } + pub(crate) async fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec<Arc<peer::Peer>> { + self.swarms + .get_peers_peers_excluding(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) + .await + .expect("Failed to get other peers in swarm") } /// Retrieves the list of peers for a given torrent. @@ -189,26 +199,61 @@ impl InMemoryTorrentRepository { /// /// A vector of peers (wrapped in `Arc`) representing the active peers for /// the torrent. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. #[must_use] - pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec<Arc<peer::Peer>> { - match self.torrents.get(info_hash) { - None => vec![], - Some(entry) => entry.get_peers(Some(TORRENT_PEERS_LIMIT)), - } + pub async fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec<Arc<peer::Peer>> { + // todo: pass the limit as an argument like `get_peers_for` + self.swarms + .get_swarm_peers(info_hash, TORRENT_PEERS_LIMIT) + .await + .expect("Failed to get other peers in swarm") } /// Calculates and returns overall torrent metrics. /// - /// The returned [`TorrentsMetrics`] contains aggregate data such as the - /// total number of torrents, total complete (seeders), incomplete (leechers), - /// and downloaded counts. + /// The returned [`AggregateSwarmMetadata`] contains aggregate data such as + /// the total number of torrents, total complete (seeders), incomplete + /// (leechers), and downloaded counts. /// /// # Returns /// - /// A [`TorrentsMetrics`] struct with the aggregated metrics. + /// A [`AggregateSwarmMetadata`] struct with the aggregated metrics. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. + #[must_use] + pub async fn get_aggregate_swarm_metadata(&self) -> AggregateActiveSwarmMetadata { + self.swarms + .get_aggregate_swarm_metadata() + .await + .expect("Failed to get aggregate swarm metadata") + } + + /// Counts the number of peerless torrents in the repository. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. + #[must_use] + pub async fn count_peerless_torrents(&self) -> usize { + self.swarms + .count_peerless_torrents() + .await + .expect("Failed to count peerless torrents") + } + + /// Counts the number of peers in the repository. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. #[must_use] - pub fn get_torrents_metrics(&self) -> AggregateSwarmMetadata { - self.torrents.get_metrics() + pub async fn count_peers(&self) -> usize { + self.swarms.count_peers().await.expect("Failed to count peers") } /// Imports persistent torrent data into the in-memory repository. @@ -219,674 +264,13 @@ impl InMemoryTorrentRepository { /// # Arguments /// /// * `persistent_torrents` - A reference to the persisted torrent data. - pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - self.torrents.import_persistent(persistent_torrents); + pub fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { + self.swarms.import_persistent(persistent_torrents); } -} - -#[cfg(test)] -mod tests { - - mod the_in_memory_torrent_repository { - - use aquatic_udp_protocol::PeerId; - - /// It generates a peer id from a number where the number is the last - /// part of the peer ID. For example, for `12` it returns - /// `-qB00000000000000012`. - fn numeric_peer_id(two_digits_value: i32) -> PeerId { - // Format idx as a string with leading zeros, ensuring it has exactly 2 digits - let idx_str = format!("{two_digits_value:02}"); - - // Create the base part of the peer ID. - let base = b"-qB00000000000000000"; - - // Concatenate the base with idx bytes, ensuring the total length is 20 bytes. - let mut peer_id_bytes = [0u8; 20]; - peer_id_bytes[..base.len()].copy_from_slice(base); - peer_id_bytes[base.len() - idx_str.len()..].copy_from_slice(idx_str.as_bytes()); - - PeerId(peer_id_bytes) - } - - // The `InMemoryTorrentRepository` has these responsibilities: - // - To maintain the peer lists for each torrent. - // - To maintain the the torrent entries, which contains all the info about the - // torrents, including the peer lists. - // - To return the torrent entries. - // - To return the peer lists for a given torrent. - // - To return the torrent metrics. - // - To return the swarm metadata for a given torrent. - // - To handle the persistence of the torrent entries. - - mod maintaining_the_peer_lists { - - use std::sync::Arc; - - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_add_the_first_peer_to_the_torrent_peer_list() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); - - assert!(in_memory_torrent_repository.get(&info_hash).is_some()); - } - - #[tokio::test] - async fn it_should_allow_adding_the_same_peer_twice_to_the_torrent_peer_list() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); - - assert!(in_memory_torrent_repository.get(&info_hash).is_some()); - } - } - - mod returning_peer_lists_for_a_torrent { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use torrust_tracker_primitives::peer::Peer; - use torrust_tracker_primitives::DurationSinceUnixEpoch; - - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::numeric_peer_id; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_return_the_peers_for_a_given_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - let peer = sample_peer(); - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); - - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); - - assert_eq!(peers, vec![Arc::new(peer)]); - } - - #[tokio::test] - async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let peers = in_memory_torrent_repository.get_torrent_peers(&sample_info_hash()); - - assert!(peers.is_empty()); - } - - #[tokio::test] - async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - - for idx in 1..=75 { - let peer = Peer { - peer_id: numeric_peer_id(idx), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - }; - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); - } - - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); - - assert_eq!(peers.len(), 74); - } - - mod excluding_the_client_peer { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; - use torrust_tracker_primitives::peer::Peer; - use torrust_tracker_primitives::DurationSinceUnixEpoch; - - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::numeric_peer_id; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_return_an_empty_peer_list_for_a_non_existing_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let peers = - in_memory_torrent_repository.get_peers_for(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT); - - assert_eq!(peers, vec![]); - } - - #[tokio::test] - async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - let peer = sample_peer(); - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); - - let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); - - assert_eq!(peers, vec![]); - } - - #[tokio::test] - async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - - let excluded_peer = sample_peer(); - - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer, None); - - // Add 74 peers - for idx in 2..=75 { - let peer = Peer { - peer_id: numeric_peer_id(idx), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - }; - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); - } - - let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); - - assert_eq!(peers.len(), 74); - } - } - } - - mod maintaining_the_torrent_entries { - - use std::ops::Add; - use std::sync::Arc; - use std::time::Duration; - - use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_configuration::TrackerPolicy; - use torrust_tracker_primitives::DurationSinceUnixEpoch; - - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_remove_a_torrent_entry() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); - - let _unused = in_memory_torrent_repository.remove(&info_hash); - assert!(in_memory_torrent_repository.get(&info_hash).is_none()); - } - - #[tokio::test] - async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - let mut peer = sample_peer(); - peer.updated = DurationSinceUnixEpoch::new(0, 0); - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); - - // Cut off time is 1 second after the peer was updated - in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); - - assert!(!in_memory_torrent_repository - .get_torrent_peers(&info_hash) - .contains(&Arc::new(peer))); - } - - fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc<InMemoryTorrentRepository> { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - // Insert a sample peer for the torrent to force adding the torrent entry - let mut peer = sample_peer(); - peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(info_hash, &peer, None); - - // Remove the peer - in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); - - in_memory_torrent_repository - } - - #[tokio::test] - async fn it_should_remove_torrents_without_peers() { - let info_hash = sample_info_hash(); - - let in_memory_torrent_repository = initialize_repository_with_one_torrent_without_peers(&info_hash); - - let tracker_policy = TrackerPolicy { - remove_peerless_torrents: true, - ..Default::default() - }; - - in_memory_torrent_repository.remove_peerless_torrents(&tracker_policy); - - assert!(in_memory_torrent_repository.get(&info_hash).is_none()); - } - } - mod returning_torrent_entries { - - use std::sync::Arc; - - use torrust_tracker_primitives::peer::Peer; - use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use torrust_tracker_torrent_repository::entry::EntrySync; - - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::torrent::TorrentEntry; - - /// `TorrentEntry` data is not directly accessible. It's only - /// accessible through the trait methods. We need this temporary - /// DTO to write simple and more readable assertions. - #[derive(Debug, Clone, PartialEq)] - struct TorrentEntryInfo { - swarm_metadata: SwarmMetadata, - peers: Vec<Peer>, - number_of_peers: usize, - } - - #[allow(clippy::from_over_into)] - impl Into<TorrentEntryInfo> for TorrentEntry { - fn into(self) -> TorrentEntryInfo { - TorrentEntryInfo { - swarm_metadata: self.get_swarm_metadata(), - peers: self.get_peers(None).iter().map(|peer| *peer.clone()).collect(), - number_of_peers: self.get_peers_len(), - } - } - } - - #[tokio::test] - async fn it_should_return_one_torrent_entry_by_infohash() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - let peer = sample_peer(); - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); - - let torrent_entry = in_memory_torrent_repository.get(&info_hash).unwrap(); - - assert_eq!( - TorrentEntryInfo { - swarm_metadata: SwarmMetadata { - downloaded: 0, - complete: 1, - incomplete: 0 - }, - peers: vec!(peer), - number_of_peers: 1 - }, - torrent_entry.into() - ); - } - - mod it_should_return_many_torrent_entries { - use std::sync::Arc; - - use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn without_pagination() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - let peer = sample_peer(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); - - let torrent_entries = in_memory_torrent_repository.get_paginated(None); - - assert_eq!(torrent_entries.len(), 1); - - let torrent_entry = torrent_entries.first().unwrap().1.clone(); - - assert_eq!( - TorrentEntryInfo { - swarm_metadata: SwarmMetadata { - downloaded: 0, - complete: 1, - incomplete: 0 - }, - peers: vec!(peer), - number_of_peers: 1 - }, - torrent_entry.into() - ); - } - - mod with_pagination { - use std::sync::Arc; - - use torrust_tracker_primitives::pagination::Pagination; - use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - - use crate::test_helpers::tests::{ - sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, - sample_peer_one, sample_peer_two, - }; - use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_return_the_first_page() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - // Insert one torrent entry - let info_hash_one = sample_info_hash_one(); - let peer_one = sample_peer_one(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); - - // Insert another torrent entry - let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); - let peer_two = sample_peer_two(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); - - // Get only the first page where page size is 1 - let torrent_entries = - in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 0, limit: 1 })); - - assert_eq!(torrent_entries.len(), 1); - - let torrent_entry = torrent_entries.first().unwrap().1.clone(); - - assert_eq!( - TorrentEntryInfo { - swarm_metadata: SwarmMetadata { - downloaded: 0, - complete: 1, - incomplete: 0 - }, - peers: vec!(peer_one), - number_of_peers: 1 - }, - torrent_entry.into() - ); - } - - #[tokio::test] - async fn it_should_return_the_second_page() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - // Insert one torrent entry - let info_hash_one = sample_info_hash_one(); - let peer_one = sample_peer_one(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); - - // Insert another torrent entry - let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); - let peer_two = sample_peer_two(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); - - // Get only the first page where page size is 1 - let torrent_entries = - in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); - - assert_eq!(torrent_entries.len(), 1); - - let torrent_entry = torrent_entries.first().unwrap().1.clone(); - - assert_eq!( - TorrentEntryInfo { - swarm_metadata: SwarmMetadata { - downloaded: 0, - complete: 1, - incomplete: 0 - }, - peers: vec!(peer_two), - number_of_peers: 1 - }, - torrent_entry.into() - ); - } - - #[tokio::test] - async fn it_should_allow_changing_the_page_size() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - // Insert one torrent entry - let info_hash_one = sample_info_hash_one(); - let peer_one = sample_peer_one(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); - - // Insert another torrent entry - let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); - let peer_two = sample_peer_two(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); - - // Get only the first page where page size is 1 - let torrent_entries = - in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); - - assert_eq!(torrent_entries.len(), 1); - } - } - } - } - - mod returning_aggregate_swarm_metadata { - - use std::sync::Arc; - - use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - - use crate::test_helpers::tests::{complete_peer, leecher, sample_info_hash, seeder}; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - // todo: refactor to use test parametrization - - #[tokio::test] - async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); - - assert_eq!( - aggregate_swarm_metadata, - AggregateSwarmMetadata { - total_complete: 0, - total_downloaded: 0, - total_incomplete: 0, - total_torrents: 0 - } - ); - } - - #[tokio::test] - async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher(), None); - - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); - - assert_eq!( - aggregate_swarm_metadata, - AggregateSwarmMetadata { - total_complete: 0, - total_downloaded: 0, - total_incomplete: 1, - total_torrents: 1, - } - ); - } - - #[tokio::test] - async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &seeder(), None); - - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); - - assert_eq!( - aggregate_swarm_metadata, - AggregateSwarmMetadata { - total_complete: 1, - total_downloaded: 0, - total_incomplete: 0, - total_torrents: 1, - } - ); - } - - #[tokio::test] - async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer(), None); - - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); - - assert_eq!( - aggregate_swarm_metadata, - AggregateSwarmMetadata { - total_complete: 1, - total_downloaded: 0, - total_incomplete: 0, - total_torrents: 1, - } - ); - } - - #[tokio::test] - async fn it_should_return_the_aggregate_swarm_metadata_when_there_are_multiple_torrents() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let start_time = std::time::Instant::now(); - for i in 0..1_000_000 { - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher(), None); - } - let result_a = start_time.elapsed(); - - let start_time = std::time::Instant::now(); - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); - let result_b = start_time.elapsed(); - - assert_eq!( - (aggregate_swarm_metadata), - (AggregateSwarmMetadata { - total_complete: 0, - total_downloaded: 0, - total_incomplete: 1_000_000, - total_torrents: 1_000_000, - }), - "{result_a:?} {result_b:?}" - ); - } - } - - mod returning_swarm_metadata { - - use std::sync::Arc; - - use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - - use crate::test_helpers::tests::{leecher, sample_info_hash}; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_get_swarm_metadata_for_an_existing_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let infohash = sample_info_hash(); - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&infohash, &leecher(), None); - - let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata(&infohash); - - assert_eq!( - swarm_metadata, - SwarmMetadata { - complete: 0, - downloaded: 0, - incomplete: 1, - } - ); - } - - #[tokio::test] - async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata(&sample_info_hash()); - - assert_eq!(swarm_metadata, SwarmMetadata::zeroed()); - } - } - - mod handling_persistence { - - use std::sync::Arc; - - use torrust_tracker_primitives::PersistentTorrents; - - use crate::test_helpers::tests::sample_info_hash; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_allow_importing_persisted_torrent_entries() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let infohash = sample_info_hash(); - - let mut persistent_torrents = PersistentTorrents::default(); - - persistent_torrents.insert(infohash, 1); - - in_memory_torrent_repository.import_persistent(&persistent_torrents); - - let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata(&infohash); - - // Only the number of downloads is persisted. - assert_eq!(swarm_metadata.downloaded, 1); - } - } + /// Checks if the repository contains a torrent entry for the given infohash. + #[must_use] + pub fn contains(&self, info_hash: &InfoHash) -> bool { + self.swarms.contains(info_hash) } } diff --git a/packages/tracker-core/src/torrent/repository/mod.rs b/packages/tracker-core/src/torrent/repository/mod.rs index ae789e5e9..d8325dec5 100644 --- a/packages/tracker-core/src/torrent/repository/mod.rs +++ b/packages/tracker-core/src/torrent/repository/mod.rs @@ -1,3 +1,2 @@ //! Torrent repository implementations. pub mod in_memory; -pub mod persisted; diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 88af3b570..874ad1349 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -17,7 +17,6 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::peer; -use torrust_tracker_torrent_repository::entry::EntrySync; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -89,17 +88,24 @@ pub struct BasicInfo { /// An [`Option<Info>`] which is: /// - `Some(Info)` if the torrent exists in the repository. /// - `None` if the torrent is not found. +/// +/// # Panics +/// +/// This function panics if the lock for the torrent entry cannot be obtained. #[must_use] -pub fn get_torrent_info(in_memory_torrent_repository: &Arc<InMemoryTorrentRepository>, info_hash: &InfoHash) -> Option<Info> { +pub async fn get_torrent_info( + in_memory_torrent_repository: &Arc<InMemoryTorrentRepository>, + info_hash: &InfoHash, +) -> Option<Info> { let torrent_entry_option = in_memory_torrent_repository.get(info_hash); let torrent_entry = torrent_entry_option?; - let stats = torrent_entry.get_swarm_metadata(); + let stats = torrent_entry.lock().await.metadata(); - let peers = torrent_entry.get_peers(None); + let peers = torrent_entry.lock().await.peers(None); - let peers = Some(peers.iter().map(|peer| (**peer)).collect()); + let peers = Some(peers.iter().map(|peer| **peer).collect()); Some(Info { info_hash: *info_hash, @@ -127,15 +133,19 @@ pub fn get_torrent_info(in_memory_torrent_repository: &Arc<InMemoryTorrentReposi /// /// A vector of [`BasicInfo`] structs representing the summarized data of the /// torrents. +/// +/// # Panics +/// +/// This function panics if the lock for the torrent entry cannot be obtained. #[must_use] -pub fn get_torrents_page( +pub async fn get_torrents_page( in_memory_torrent_repository: &Arc<InMemoryTorrentRepository>, pagination: Option<&Pagination>, ) -> Vec<BasicInfo> { let mut basic_infos: Vec<BasicInfo> = vec![]; for (info_hash, torrent_entry) in in_memory_torrent_repository.get_paginated(pagination) { - let stats = torrent_entry.get_swarm_metadata(); + let stats = torrent_entry.lock().await.metadata(); basic_infos.push(BasicInfo { info_hash, @@ -165,17 +175,26 @@ pub fn get_torrents_page( /// # Returns /// /// A vector of [`BasicInfo`] structs for the requested torrents. +/// +/// # Panics +/// +/// This function panics if the lock for the torrent entry cannot be obtained. #[must_use] -pub fn get_torrents(in_memory_torrent_repository: &Arc<InMemoryTorrentRepository>, info_hashes: &[InfoHash]) -> Vec<BasicInfo> { +pub async fn get_torrents( + in_memory_torrent_repository: &Arc<InMemoryTorrentRepository>, + info_hashes: &[InfoHash], +) -> Vec<BasicInfo> { let mut basic_infos: Vec<BasicInfo> = vec![]; for info_hash in info_hashes { - if let Some(stats) = in_memory_torrent_repository.get(info_hash).map(|t| t.get_swarm_metadata()) { + if let Some(torrent_entry) = in_memory_torrent_repository.get(info_hash) { + let metadata = torrent_entry.lock().await.metadata(); + basic_infos.push(BasicInfo { info_hash: *info_hash, - seeders: u64::from(stats.complete), - completed: u64::from(stats.downloaded), - leechers: u64::from(stats.incomplete), + seeders: u64::from(metadata.complete), + completed: u64::from(metadata.downloaded), + leechers: u64::from(metadata.incomplete), }); } } @@ -220,7 +239,8 @@ mod tests { let torrent_info = get_torrent_info( &in_memory_torrent_repository, &InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(), // DevSkim: ignore DS173237 - ); + ) + .await; assert!(torrent_info.is_none()); } @@ -231,9 +251,11 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + in_memory_torrent_repository + .handle_announcement(&info_hash, &sample_peer(), None) + .await; - let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).unwrap(); + let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).await.unwrap(); assert_eq!( torrent_info, @@ -263,7 +285,7 @@ mod tests { async fn it_should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; assert_eq!(torrents, vec![]); } @@ -275,9 +297,11 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + in_memory_torrent_repository + .handle_announcement(&info_hash, &sample_peer(), None) + .await; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; assert_eq!( torrents, @@ -300,13 +324,17 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); + in_memory_torrent_repository + .handle_announcement(&info_hash1, &sample_peer(), None) + .await; + in_memory_torrent_repository + .handle_announcement(&info_hash2, &sample_peer(), None) + .await; let offset = 0; let limit = 1; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); } @@ -321,13 +349,17 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); + in_memory_torrent_repository + .handle_announcement(&info_hash1, &sample_peer(), None) + .await; + in_memory_torrent_repository + .handle_announcement(&info_hash2, &sample_peer(), None) + .await; let offset = 1; let limit = 4000; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); assert_eq!( @@ -347,13 +379,17 @@ mod tests { let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); + in_memory_torrent_repository + .handle_announcement(&info_hash1, &sample_peer(), None) + .await; let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); + in_memory_torrent_repository + .handle_announcement(&info_hash2, &sample_peer(), None) + .await; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; assert_eq!( torrents, @@ -388,7 +424,7 @@ mod tests { async fn it_should_return_an_empty_list_if_none_of_the_requested_torrents_is_found() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let torrent_info = get_torrents(&in_memory_torrent_repository, &[sample_info_hash()]); + let torrent_info = get_torrents(&in_memory_torrent_repository, &[sample_info_hash()]).await; assert!(torrent_info.is_empty()); } @@ -399,9 +435,11 @@ mod tests { let info_hash = sample_info_hash(); - let _ = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + in_memory_torrent_repository + .handle_announcement(&info_hash, &sample_peer(), None) + .await; - let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]); + let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]).await; assert_eq!( torrent_info, diff --git a/packages/tracker-core/tests/common/fixtures.rs b/packages/tracker-core/tests/common/fixtures.rs new file mode 100644 index 000000000..ea9c93a65 --- /dev/null +++ b/packages/tracker-core/tests/common/fixtures.rs @@ -0,0 +1,52 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::str::FromStr; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_configuration::Core; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; + +/// # Panics +/// +/// Will panic if the temporary file path is not a valid UTF-8 string. +#[must_use] +pub fn ephemeral_configuration() -> Core { + let mut config = Core::default(); + + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + + config +} + +/// # Panics +/// +/// Will panic if the string representation of the info hash is not a valid infohash. +#[must_use] +pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::<InfoHash>() + .expect("String should be a valid info hash") +} + +/// Sample peer whose state is not relevant for the tests. +#[must_use] +pub fn sample_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(remote_client_ip(), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } +} + +// The client peer IP. +#[must_use] +pub fn remote_client_ip() -> IpAddr { + IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) +} diff --git a/packages/tracker-core/tests/common/mod.rs b/packages/tracker-core/tests/common/mod.rs new file mode 100644 index 000000000..414e9d7b5 --- /dev/null +++ b/packages/tracker-core/tests/common/mod.rs @@ -0,0 +1,2 @@ +pub mod fixtures; +pub mod test_env; diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs new file mode 100644 index 000000000..3fe0464fe --- /dev/null +++ b/packages/tracker-core/tests/common/test_env.rs @@ -0,0 +1,180 @@ +use std::net::IpAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::AnnounceEvent; +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::PeersWanted; +use bittorrent_tracker_core::container::TrackerCoreContainer; +use bittorrent_tracker_core::statistics::persisted::load_persisted_metrics; +use tokio::task::yield_now; +use tokio_util::sync::CancellationToken; +use torrust_tracker_configuration::Core; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; + +pub struct TestEnv { + pub swarm_coordination_registry_container: Arc<SwarmCoordinationRegistryContainer>, + pub tracker_core_container: Arc<TrackerCoreContainer>, +} + +impl TestEnv { + #[must_use] + pub async fn started(core_config: Core) -> Self { + let test_env = TestEnv::new(core_config); + test_env.start().await; + test_env + } + + #[must_use] + pub fn new(core_config: Core) -> Self { + let core_config = Arc::new(core_config); + + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &swarm_coordination_registry_container, + )); + + Self { + swarm_coordination_registry_container, + tracker_core_container, + } + } + + pub async fn start(&self) { + let now = DurationSinceUnixEpoch::from_secs(0); + self.load_persisted_metrics(now).await; + self.run_jobs().await; + } + + async fn load_persisted_metrics(&self, now: DurationSinceUnixEpoch) { + load_persisted_metrics( + &self.tracker_core_container.stats_repository, + &self.tracker_core_container.db_downloads_metric_repository, + now, + ) + .await + .unwrap(); + } + + async fn run_jobs(&self) { + let mut jobs = vec![]; + let cancellation_token = CancellationToken::new(); + + let job = torrust_tracker_swarm_coordination_registry::statistics::event::listener::run_event_listener( + self.swarm_coordination_registry_container.event_bus.receiver(), + cancellation_token.clone(), + &self.swarm_coordination_registry_container.stats_repository, + ); + + jobs.push(job); + + let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( + self.swarm_coordination_registry_container.event_bus.receiver(), + cancellation_token.clone(), + &self.tracker_core_container.stats_repository, + &self.tracker_core_container.db_downloads_metric_repository, + self.tracker_core_container + .core_config + .tracker_policy + .persistent_torrent_completed_stat, + ); + jobs.push(job); + + // Give the event listeners some time to start + // todo: they should notify when they are ready + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + pub async fn announce_peer_started( + &mut self, + mut peer: Peer, + remote_client_ip: &IpAddr, + info_hash: &InfoHash, + ) -> AnnounceData { + peer.event = AnnounceEvent::Started; + + let announce_data = self + .tracker_core_container + .announce_handler + .handle_announcement(info_hash, &mut peer, remote_client_ip, &PeersWanted::AsManyAsPossible) + .await + .unwrap(); + + // Give time to the event listeners to process the event + yield_now().await; + + announce_data + } + + pub async fn announce_peer_completed( + &mut self, + mut peer: Peer, + remote_client_ip: &IpAddr, + info_hash: &InfoHash, + ) -> AnnounceData { + peer.event = AnnounceEvent::Completed; + + let announce_data = self + .tracker_core_container + .announce_handler + .handle_announcement(info_hash, &mut peer, remote_client_ip, &PeersWanted::AsManyAsPossible) + .await + .unwrap(); + + // Give time to the event listeners to process the event + yield_now().await; + + announce_data + } + + pub async fn scrape(&self, info_hash: &InfoHash) -> ScrapeData { + self.tracker_core_container + .scrape_handler + .handle_scrape(&vec![*info_hash]) + .await + .unwrap() + } + + pub async fn increase_number_of_downloads(&mut self, peer: Peer, remote_client_ip: &IpAddr, info_hash: &InfoHash) { + let _announce_data = self.announce_peer_started(peer, remote_client_ip, info_hash).await; + let announce_data = self.announce_peer_completed(peer, remote_client_ip, info_hash).await; + + assert_eq!(announce_data.stats.downloads(), 1); + } + + pub async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option<SwarmMetadata> { + self.swarm_coordination_registry_container + .swarms + .get_swarm_metadata(info_hash) + .await + .unwrap() + } + + pub async fn remove_swarm(&self, info_hash: &InfoHash) { + self.swarm_coordination_registry_container + .swarms + .remove(info_hash) + .await + .unwrap(); + } + + pub async fn get_counter_value(&self, metric_name: &str) -> u64 { + self.tracker_core_container + .stats_repository + .get_metrics() + .await + .metric_collection + .get_counter_value(&MetricName::new(metric_name), &LabelSet::default()) + .unwrap() + .value() + } +} diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs index 5aaded10a..b170aaebd 100644 --- a/packages/tracker-core/tests/integration.rs +++ b/packages/tracker-core/tests/integration.rs @@ -1,135 +1,113 @@ -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::str::FromStr; -use std::sync::Arc; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; -use bittorrent_tracker_core::databases::setup::initialize_database; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use bittorrent_tracker_core::whitelist; -use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; -use torrust_tracker_configuration::Core; -use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_primitives::DurationSinceUnixEpoch; -use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; - -/// # Panics -/// -/// Will panic if the temporary file path is not a valid UTF-8 string. -#[must_use] -pub fn ephemeral_configuration() -> Core { - let mut config = Core::default(); - - let temp_file = ephemeral_sqlite_database(); - temp_file.to_str().unwrap().clone_into(&mut config.database.path); - - config -} +mod common; -/// # Panics -/// -/// Will panic if the string representation of the info hash is not a valid infohash. -#[must_use] -pub fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 - .parse::<InfoHash>() - .expect("String should be a valid info hash") -} +use common::fixtures::{ephemeral_configuration, remote_client_ip, sample_info_hash, sample_peer}; +use common::test_env::TestEnv; +use torrust_tracker_configuration::AnnouncePolicy; +use torrust_tracker_primitives::core::AnnounceData; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -/// Sample peer whose state is not relevant for the tests. -#[must_use] -pub fn sample_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(remote_client_ip(), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } +#[tokio::test] +async fn it_should_handle_the_announce_request() { + let mut test_env = TestEnv::started(ephemeral_configuration()).await; + + let announce_data = test_env + .announce_peer_started(sample_peer(), &remote_client_ip(), &sample_info_hash()) + .await; + + assert_eq!( + announce_data, + AnnounceData { + peers: vec![], + stats: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + policy: AnnouncePolicy { + interval: 120, + interval_min: 120 + } + } + ); } -// The client peer IP. -#[must_use] -fn remote_client_ip() -> IpAddr { - IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) -} +#[tokio::test] +async fn it_should_not_return_the_peer_making_the_announce_request() { + let mut test_env = TestEnv::started(ephemeral_configuration()).await; + + let announce_data = test_env + .announce_peer_started(sample_peer(), &remote_client_ip(), &sample_info_hash()) + .await; -struct Container { - pub announce_handler: Arc<AnnounceHandler>, - pub scrape_handler: Arc<ScrapeHandler>, + assert_eq!(announce_data.peers.len(), 0); } -impl Container { - pub fn initialize(config: &Core) -> Self { - let database = initialize_database(config); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( - config, - &in_memory_whitelist.clone(), - )); - let announce_handler = Arc::new(AnnounceHandler::new( - config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - Self { - announce_handler, - scrape_handler, - } - } +#[tokio::test] +async fn it_should_handle_the_scrape_request() { + let mut test_env = TestEnv::started(ephemeral_configuration()).await; + + let info_hash = sample_info_hash(); + + let _announce_data = test_env + .announce_peer_started(sample_peer(), &remote_client_ip(), &info_hash) + .await; + + let scrape_data = test_env.scrape(&info_hash).await; + + assert!(scrape_data.files.contains_key(&info_hash)); } #[tokio::test] -async fn test_announce_and_scrape_requests() { - let config = ephemeral_configuration(); +async fn it_should_persist_the_number_of_completed_peers_for_each_torrent_into_the_database() { + let mut core_config = ephemeral_configuration(); + core_config.tracker_policy.persistent_torrent_completed_stat = true; - let container = Container::initialize(&config); + let mut test_env = TestEnv::started(core_config).await; let info_hash = sample_info_hash(); - let mut peer = sample_peer(); + test_env + .increase_number_of_downloads(sample_peer(), &remote_client_ip(), &info_hash) + .await; - // Announce + assert!(test_env.get_swarm_metadata(&info_hash).await.unwrap().downloads() == 1); - // First announce: download started - peer.event = AnnounceEvent::Started; - let announce_data = container - .announce_handler - .announce(&info_hash, &mut peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) - .await - .unwrap(); + test_env.remove_swarm(&info_hash).await; - // NOTICE: you don't get back the peer making the request. - assert_eq!(announce_data.peers.len(), 0); - assert_eq!(announce_data.stats.downloaded, 0); - - // Second announce: download completed - peer.event = AnnounceEvent::Completed; - let announce_data = container - .announce_handler - .announce(&info_hash, &mut peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) - .await + // Ensure the swarm metadata is removed + assert!(test_env.get_swarm_metadata(&info_hash).await.is_none()); + + // Load torrents from the database to ensure the completed stats are persisted + test_env + .tracker_core_container + .torrents_manager + .load_torrents_from_database() .unwrap(); - assert_eq!(announce_data.peers.len(), 0); - assert_eq!(announce_data.stats.downloaded, 1); + assert!(test_env.get_swarm_metadata(&info_hash).await.unwrap().downloads() == 1); +} - // Scrape +#[tokio::test] +async fn it_should_persist_the_global_number_of_completed_peers_into_the_database() { + let mut core_config = ephemeral_configuration(); - let scrape_data = container.scrape_handler.scrape(&vec![info_hash]).await.unwrap(); + core_config.tracker_policy.persistent_torrent_completed_stat = true; - assert!(scrape_data.files.contains_key(&info_hash)); -} + let mut test_env = TestEnv::started(core_config.clone()).await; + + test_env + .increase_number_of_downloads(sample_peer(), &remote_client_ip(), &sample_info_hash()) + .await; -#[test] -fn test_scrape_request() {} + // We run a new instance of the test environment to simulate a restart. + // The new instance uses the same underlying database. + + let new_test_env = TestEnv::started(core_config).await; + + assert_eq!( + new_test_env + .get_counter_value("tracker_core_persistent_torrents_downloads_total") + .await, + 1 + ); +} diff --git a/packages/udp-protocol/Cargo.toml b/packages/udp-protocol/Cargo.toml index 31fd52af8..3bcde9a95 100644 --- a/packages/udp-protocol/Cargo.toml +++ b/packages/udp-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with the primitive types and functions for the BitTorrent UDP tracker protocol." -keywords = ["bittorrent", "library", "primitives", "udp"] +keywords = [ "bittorrent", "library", "primitives", "udp" ] name = "bittorrent-udp-tracker-protocol" readme = "README.md" diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index fc8e2328c..45a74f93c 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -4,7 +4,7 @@ description = "A library with the core functionality needed to implement a BitTo documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["api", "bittorrent", "core", "library", "tracker"] +keywords = [ "api", "bittorrent", "core", "library", "tracker" ] license.workspace = true name = "bittorrent-udp-tracker-core" publish.workspace = true @@ -20,17 +20,28 @@ bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" bittorrent-udp-tracker-protocol = { version = "3.0.0-develop", path = "../udp-protocol" } bloom = "0.3.2" blowfish = "0" -cipher = "0" +cipher = "0.5" +criterion = { version = "0.5.1", features = [ "async_tokio" ] } futures = "0" lazy_static = "1" rand = "0" +serde = "1.0.219" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync", "time"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync", "time" ] } +tokio-util = "0.7.15" +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" zerocopy = "0.7" [dev-dependencies] mockall = "0" torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } + +[[bench]] +harness = false +name = "udp_tracker_core_benchmark" diff --git a/packages/udp-tracker-core/benches/helpers/mod.rs b/packages/udp-tracker-core/benches/helpers/mod.rs new file mode 100644 index 000000000..ea1959bb4 --- /dev/null +++ b/packages/udp-tracker-core/benches/helpers/mod.rs @@ -0,0 +1,2 @@ +pub mod sync; +mod utils; diff --git a/packages/udp-tracker-core/benches/helpers/sync.rs b/packages/udp-tracker-core/benches/helpers/sync.rs new file mode 100644 index 000000000..e8ec1ce03 --- /dev/null +++ b/packages/udp-tracker-core/benches/helpers/sync.rs @@ -0,0 +1,31 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use bittorrent_udp_tracker_core::event::bus::EventBus; +use bittorrent_udp_tracker_core::event::sender::Broadcaster; +use bittorrent_udp_tracker_core::services::connect::ConnectService; +use torrust_tracker_events::bus::SenderStatus; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + +use crate::helpers::utils::{sample_ipv4_remote_addr, sample_issue_time}; + +#[allow(clippy::unused_async)] +pub async fn connect_once(samples: u64) -> Duration { + let client_socket_addr = sample_ipv4_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + + let udp_core_broadcaster = Broadcaster::default(); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); + + let udp_core_stats_event_sender = event_bus.sender(); + let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); + let start = Instant::now(); + + for _ in 0..samples { + let _response = connect_service.handle_connect(client_socket_addr, server_service_binding.clone(), sample_issue_time()); + } + + start.elapsed() +} diff --git a/packages/udp-tracker-core/benches/helpers/utils.rs b/packages/udp-tracker-core/benches/helpers/utils.rs new file mode 100644 index 000000000..1423d4bcd --- /dev/null +++ b/packages/udp-tracker-core/benches/helpers/utils.rs @@ -0,0 +1,27 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use bittorrent_udp_tracker_core::event::Event; +use futures::future::BoxFuture; +use mockall::mock; +use torrust_tracker_events::sender::SendError; + +pub(crate) fn sample_ipv4_remote_addr() -> SocketAddr { + sample_ipv4_socket_address() +} + +pub(crate) fn sample_ipv4_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) +} + +pub(crate) fn sample_issue_time() -> f64 { + 1_000_000_000_f64 +} + +mock! { + pub(crate) UdpCoreStatsEventSender {} + impl torrust_tracker_events::sender::Sender for UdpCoreStatsEventSender { + type Event = Event; + + fn send(&self, event: Event) -> BoxFuture<'static,Option<Result<usize,SendError<Event> > > > ; + } +} diff --git a/packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs b/packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs new file mode 100644 index 000000000..90fc721d0 --- /dev/null +++ b/packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs @@ -0,0 +1,20 @@ +mod helpers; + +use std::time::Duration; + +use criterion::{criterion_group, criterion_main, Criterion}; + +use crate::helpers::sync; + +fn bench_connect_once(c: &mut Criterion) { + let mut group = c.benchmark_group("udp_tracker/connect_once"); + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_secs(1)); + + group.bench_function("connect_once", |b| { + b.iter(|| sync::connect_once(100)); + }); +} + +criterion_group!(benches, bench_connect_once); +criterion_main!(benches); diff --git a/packages/udp-tracker-core/src/connection_cookie.rs b/packages/udp-tracker-core/src/connection_cookie.rs index 31c116400..2d8e941cd 100644 --- a/packages/udp-tracker-core/src/connection_cookie.rs +++ b/packages/udp-tracker-core/src/connection_cookie.rs @@ -84,9 +84,8 @@ use tracing::instrument; use zerocopy::AsBytes; use crate::crypto::keys::CipherArrayBlowfish; - /// Error returned when there was an error with the connection cookie. -#[derive(Error, Debug, Clone)] +#[derive(Error, Debug, Clone, PartialEq)] pub enum ConnectionCookieError { #[error("cookie value is not normal: {not_normal_value}")] ValueNotNormal { not_normal_value: f64 }, @@ -140,8 +139,8 @@ use std::ops::Range; pub fn check(cookie: &Cookie, fingerprint: u64, valid_range: Range<f64>) -> Result<f64, ConnectionCookieError> { assert!(valid_range.start <= valid_range.end, "range start is larger than range end"); - let cookie_bytes = CipherArrayBlowfish::from_slice(cookie.0.as_bytes()); - let cookie_bytes = decode(*cookie_bytes); + let cookie_bytes = CipherArrayBlowfish::try_from(cookie.0.as_bytes()).expect("it should be the same size"); + let cookie_bytes = decode(cookie_bytes); let issue_time = disassemble(fingerprint, cookie_bytes); @@ -176,7 +175,7 @@ pub fn gen_remote_fingerprint(remote_addr: &SocketAddr) -> u64 { } mod cookie_builder { - use cipher::{BlockDecrypt, BlockEncrypt}; + use cipher::{BlockCipherDecrypt, BlockCipherEncrypt}; use tracing::instrument; use zerocopy::{byteorder, AsBytes as _, NativeEndian}; @@ -196,7 +195,7 @@ mod cookie_builder { let cookie: byteorder::I64<NativeEndian> = *zerocopy::FromBytes::ref_from(&cookie.to_ne_bytes()).expect("it should be aligned"); - *CipherArrayBlowfish::from_slice(cookie.as_bytes()) + CipherArrayBlowfish::try_from(cookie.as_bytes()).expect("it should be the same size") } #[instrument()] diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index c4cce3dc1..1d8b1d71c 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -1,28 +1,28 @@ use std::sync::Arc; -use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::container::TrackerCoreContainer; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::whitelist; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, UdpTracker}; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; +use crate::event::bus::EventBus; +use crate::event::sender::Broadcaster; use crate::services::announce::AnnounceService; use crate::services::banning::BanService; use crate::services::connect::ConnectService; use crate::services::scrape::ScrapeService; -use crate::{statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; +use crate::statistics::repository::Repository; +use crate::{event, services, statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; pub struct UdpTrackerCoreContainer { - // todo: replace with TrackerCoreContainer - pub core_config: Arc<Core>, - pub announce_handler: Arc<AnnounceHandler>, - pub scrape_handler: Arc<ScrapeHandler>, - pub whitelist_authorization: Arc<whitelist::authorization::WhitelistAuthorization>, - pub udp_tracker_config: Arc<UdpTracker>, - pub udp_core_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>>, - pub udp_core_stats_repository: Arc<statistics::repository::Repository>, + + pub tracker_core_container: Arc<TrackerCoreContainer>, + + // `UdpTrackerCoreServices` + pub event_bus: Arc<event::bus::EventBus>, + pub stats_event_sender: crate::event::sender::Sender, + pub stats_repository: Arc<statistics::repository::Repository>, pub ban_service: Arc<RwLock<BanService>>, pub connect_service: Arc<ConnectService>, pub announce_service: Arc<AnnounceService>, @@ -32,19 +32,72 @@ pub struct UdpTrackerCoreContainer { impl UdpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc<Core>, udp_tracker_config: &Arc<UdpTracker>) -> Arc<UdpTrackerCoreContainer> { - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); - Self::initialize_from(&tracker_core_container, udp_tracker_config) + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + core_config, + &swarm_coordination_registry_container, + )); + + Self::initialize_from_tracker_core(&tracker_core_container, udp_tracker_config) } #[must_use] - pub fn initialize_from( + pub fn initialize_from_tracker_core( tracker_core_container: &Arc<TrackerCoreContainer>, udp_tracker_config: &Arc<UdpTracker>, ) -> Arc<UdpTrackerCoreContainer> { - let (udp_core_stats_event_sender, udp_core_stats_repository) = - statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); - let udp_core_stats_repository = Arc::new(udp_core_stats_repository); + let udp_tracker_core_services = UdpTrackerCoreServices::initialize_from(tracker_core_container); + + Self::initialize_from_services(tracker_core_container, &udp_tracker_core_services, udp_tracker_config) + } + + #[must_use] + pub fn initialize_from_services( + tracker_core_container: &Arc<TrackerCoreContainer>, + udp_tracker_core_services: &Arc<UdpTrackerCoreServices>, + udp_tracker_config: &Arc<UdpTracker>, + ) -> Arc<Self> { + Arc::new(Self { + udp_tracker_config: udp_tracker_config.clone(), + + tracker_core_container: tracker_core_container.clone(), + + // `UdpTrackerCoreServices` + event_bus: udp_tracker_core_services.event_bus.clone(), + stats_event_sender: udp_tracker_core_services.stats_event_sender.clone(), + stats_repository: udp_tracker_core_services.stats_repository.clone(), + ban_service: udp_tracker_core_services.ban_service.clone(), + connect_service: udp_tracker_core_services.connect_service.clone(), + announce_service: udp_tracker_core_services.announce_service.clone(), + scrape_service: udp_tracker_core_services.scrape_service.clone(), + }) + } +} + +pub struct UdpTrackerCoreServices { + pub event_bus: Arc<event::bus::EventBus>, + pub stats_event_sender: crate::event::sender::Sender, + pub stats_repository: Arc<statistics::repository::Repository>, + pub ban_service: Arc<RwLock<services::banning::BanService>>, + pub connect_service: Arc<services::connect::ConnectService>, + pub announce_service: Arc<services::announce::AnnounceService>, + pub scrape_service: Arc<services::scrape::ScrapeService>, +} + +impl UdpTrackerCoreServices { + #[must_use] + pub fn initialize_from(tracker_core_container: &Arc<TrackerCoreContainer>) -> Arc<Self> { + let udp_core_broadcaster = Broadcaster::default(); + let udp_core_stats_repository = Arc::new(Repository::new()); + let event_bus = Arc::new(EventBus::new( + tracker_core_container.core_config.tracker_usage_statistics.into(), + udp_core_broadcaster.clone(), + )); + + let udp_core_stats_event_sender = event_bus.sender(); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender.clone())); let announce_service = Arc::new(AnnounceService::new( @@ -57,19 +110,14 @@ impl UdpTrackerCoreContainer { udp_core_stats_event_sender.clone(), )); - Arc::new(UdpTrackerCoreContainer { - core_config: tracker_core_container.core_config.clone(), - announce_handler: tracker_core_container.announce_handler.clone(), - scrape_handler: tracker_core_container.scrape_handler.clone(), - whitelist_authorization: tracker_core_container.whitelist_authorization.clone(), - - udp_tracker_config: udp_tracker_config.clone(), - udp_core_stats_event_sender: udp_core_stats_event_sender.clone(), - udp_core_stats_repository: udp_core_stats_repository.clone(), - ban_service: ban_service.clone(), - connect_service: connect_service.clone(), - announce_service: announce_service.clone(), - scrape_service: scrape_service.clone(), + Arc::new(Self { + event_bus, + stats_event_sender: udp_core_stats_event_sender, + stats_repository: udp_core_stats_repository, + ban_service, + connect_service, + announce_service, + scrape_service, }) } } diff --git a/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs b/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs index 58ba70562..357bdeca5 100644 --- a/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs +++ b/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs @@ -4,14 +4,13 @@ //! application starts and are not persisted anywhere. use blowfish::BlowfishLE; -use cipher::generic_array::GenericArray; -use cipher::{BlockSizeUser, KeyInit}; +use cipher::{Block, KeyInit}; use rand::rngs::ThreadRng; -use rand::Rng; +use rand::RngExt; pub type Seed = [u8; 32]; pub type CipherBlowfish = BlowfishLE; -pub type CipherArrayBlowfish = GenericArray<u8, <CipherBlowfish as BlockSizeUser>::BlockSize>; +pub type CipherArrayBlowfish = Block<CipherBlowfish>; lazy_static! { /// The random static seed. diff --git a/packages/udp-tracker-core/src/crypto/keys.rs b/packages/udp-tracker-core/src/crypto/keys.rs index f9a3e361d..2faa745c3 100644 --- a/packages/udp-tracker-core/src/crypto/keys.rs +++ b/packages/udp-tracker-core/src/crypto/keys.rs @@ -5,6 +5,8 @@ //! //! It also provides the logic for the cipher for encryption and decryption. +use cipher::{BlockCipherDecrypt, BlockCipherEncrypt}; + use self::detail_cipher::CURRENT_CIPHER; use self::detail_seed::CURRENT_SEED; pub use crate::crypto::ephemeral_instance_keys::CipherArrayBlowfish; @@ -13,7 +15,7 @@ use crate::crypto::ephemeral_instance_keys::{CipherBlowfish, Seed, RANDOM_CIPHER /// This trait is for structures that can keep and provide a seed. pub trait Keeper { type Seed: Sized + Default + AsMut<[u8]>; - type Cipher: cipher::BlockCipher; + type Cipher: BlockCipherEncrypt + BlockCipherDecrypt; /// It returns a reference to the seed that is keeping. fn get_seed() -> &'static Self::Seed; @@ -135,14 +137,14 @@ mod detail_cipher { #[cfg(test)] mod tests { - use cipher::BlockEncrypt; + use cipher::BlockCipherEncrypt; use crate::crypto::ephemeral_instance_keys::{CipherArrayBlowfish, ZEROED_TEST_CIPHER_BLOWFISH}; use crate::crypto::keys::detail_cipher::CURRENT_CIPHER; #[test] fn it_should_default_to_zeroed_seed_when_testing() { - let mut data: cipher::generic_array::GenericArray<u8, _> = CipherArrayBlowfish::from([0u8; 8]); + let mut data = CipherArrayBlowfish::from([0u8; 8]); let mut data_2 = CipherArrayBlowfish::from([0u8; 8]); CURRENT_CIPHER.encrypt_block(&mut data); diff --git a/packages/udp-tracker-core/src/event.rs b/packages/udp-tracker-core/src/event.rs new file mode 100644 index 000000000..761b809d8 --- /dev/null +++ b/packages/udp-tracker-core/src/event.rs @@ -0,0 +1,97 @@ +use std::net::SocketAddr; + +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::label_name; +use torrust_tracker_primitives::peer::PeerAnnouncement; +use torrust_tracker_primitives::service_binding::ServiceBinding; + +/// A UDP core event. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Event { + UdpConnect { + connection: ConnectionContext, + }, + UdpAnnounce { + connection: ConnectionContext, + info_hash: InfoHash, + announcement: PeerAnnouncement, + }, + UdpScrape { + connection: ConnectionContext, + }, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct ConnectionContext { + pub client_socket_addr: SocketAddr, + pub server_service_binding: ServiceBinding, +} + +impl ConnectionContext { + #[must_use] + pub fn new(client_socket_addr: SocketAddr, server_service_binding: ServiceBinding) -> Self { + Self { + client_socket_addr, + server_service_binding, + } + } + + #[must_use] + pub fn client_socket_addr(&self) -> SocketAddr { + self.client_socket_addr + } + + #[must_use] + pub fn server_socket_addr(&self) -> SocketAddr { + self.server_service_binding.bind_address() + } +} + +impl From<ConnectionContext> for LabelSet { + fn from(connection_context: ConnectionContext) -> Self { + LabelSet::from([ + ( + label_name!("server_binding_protocol"), + LabelValue::new(&connection_context.server_service_binding.protocol().to_string()), + ), + ( + label_name!("server_binding_ip"), + LabelValue::new(&connection_context.server_service_binding.bind_address().ip().to_string()), + ), + ( + label_name!("server_binding_address_ip_type"), + LabelValue::new(&connection_context.server_service_binding.bind_address_ip_type().to_string()), + ), + ( + label_name!("server_binding_address_ip_family"), + LabelValue::new(&connection_context.server_service_binding.bind_address_ip_family().to_string()), + ), + ( + label_name!("server_binding_port"), + LabelValue::new(&connection_context.server_service_binding.bind_address().port().to_string()), + ), + ]) + } +} + +pub mod sender { + use std::sync::Arc; + + use super::Event; + + pub type Sender = Option<Arc<dyn torrust_tracker_events::sender::Sender<Event = Event>>>; + pub type Broadcaster = torrust_tracker_events::broadcaster::Broadcaster<Event>; +} + +pub mod receiver { + use super::Event; + + pub type Receiver = Box<dyn torrust_tracker_events::receiver::Receiver<Event = Event>>; +} + +pub mod bus { + use crate::event::Event; + + pub type EventBus = torrust_tracker_events::bus::EventBus<Event>; +} diff --git a/packages/udp-tracker-core/src/lib.rs b/packages/udp-tracker-core/src/lib.rs index 5aa714d35..2c1943853 100644 --- a/packages/udp-tracker-core/src/lib.rs +++ b/packages/udp-tracker-core/src/lib.rs @@ -1,9 +1,23 @@ pub mod connection_cookie; pub mod container; pub mod crypto; +pub mod event; pub mod services; pub mod statistics; +use torrust_tracker_clock::clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + use crypto::ephemeral_instance_keys; use tracing::instrument; @@ -28,3 +42,18 @@ pub fn initialize_static() { // Initialize the Zeroed Cipher lazy_static::initialize(&ephemeral_instance_keys::ZEROED_TEST_CIPHER_BLOWFISH); } + +#[cfg(test)] +pub(crate) mod tests { + use bittorrent_primitives::info_hash::InfoHash; + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::<InfoHash>() + .expect("String should be a valid info hash") + } +} diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index 698f5fba6..a69e91d8a 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -7,7 +7,7 @@ //! //! It also sends an [`udp_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use std::ops::Range; use std::sync::Arc; @@ -18,9 +18,11 @@ use bittorrent_tracker_core::error::{AnnounceError, WhitelistError}; use bittorrent_tracker_core::whitelist; use bittorrent_udp_tracker_protocol::peer_builder; use torrust_tracker_primitives::core::AnnounceData; +use torrust_tracker_primitives::peer::PeerAnnouncement; +use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; -use crate::statistics; +use crate::event::{ConnectionContext, Event}; /// The `AnnounceService` is responsible for handling the `announce` requests. /// @@ -30,7 +32,7 @@ use crate::statistics; pub struct AnnounceService { announce_handler: Arc<AnnounceHandler>, whitelist_authorization: Arc<whitelist::authorization::WhitelistAuthorization>, - opt_udp_core_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>>, + opt_udp_core_stats_event_sender: crate::event::sender::Sender, } impl AnnounceService { @@ -38,7 +40,7 @@ impl AnnounceService { pub fn new( announce_handler: Arc<AnnounceHandler>, whitelist_authorization: Arc<whitelist::authorization::WhitelistAuthorization>, - opt_udp_core_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>>, + opt_udp_core_stats_event_sender: crate::event::sender::Sender, ) -> Self { Self { announce_handler, @@ -57,17 +59,18 @@ impl AnnounceService { /// whitelist. pub async fn handle_announce( &self, - remote_addr: SocketAddr, + client_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, request: &AnnounceRequest, cookie_valid_range: Range<f64>, ) -> Result<AnnounceData, UdpAnnounceError> { - Self::authenticate(remote_addr, request, cookie_valid_range)?; + Self::authenticate(client_socket_addr, request, cookie_valid_range)?; let info_hash = request.info_hash.into(); self.authorize(&info_hash).await?; - let remote_client_ip = remote_addr.ip(); + let remote_client_ip = client_socket_addr.ip(); let mut peer = peer_builder::from_request(request, &remote_client_ip); @@ -75,10 +78,11 @@ impl AnnounceService { let announce_data = self .announce_handler - .announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted) + .handle_announcement(&info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; - self.send_stats_event(remote_client_ip).await; + self.send_event(info_hash, peer, client_socket_addr, server_service_binding) + .await; Ok(announce_data) } @@ -99,14 +103,23 @@ impl AnnounceService { self.whitelist_authorization.authorize(info_hash).await } - async fn send_stats_event(&self, peer_ip: IpAddr) { + async fn send_event( + &self, + info_hash: InfoHash, + announcement: PeerAnnouncement, + client_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, + ) { if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { - let event = match peer_ip { - IpAddr::V4(_) => statistics::event::Event::Udp4Announce, - IpAddr::V6(_) => statistics::event::Event::Udp6Announce, + let event = Event::UdpAnnounce { + connection: ConnectionContext::new(client_socket_addr, server_service_binding), + info_hash, + announcement, }; - udp_stats_event_sender.send_event(event).await; + tracing::debug!(target = crate::UDP_TRACKER_LOG_TARGET, "Sending UdpAnnounce event: {event:?}"); + + udp_stats_event_sender.send(event).await; } } } diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index 14a3068e4..6ba36f274 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -2,24 +2,24 @@ //! //! The service is responsible for handling the `connect` requests. use std::net::SocketAddr; -use std::sync::Arc; use aquatic_udp_protocol::ConnectionId; +use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::connection_cookie::{gen_remote_fingerprint, make}; -use crate::statistics; +use crate::event::{ConnectionContext, Event}; /// The `ConnectService` is responsible for handling the `connect` requests. /// /// It is responsible for generating the connection cookie and sending the /// appropriate statistics events. pub struct ConnectService { - pub opt_udp_core_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>>, + pub opt_udp_core_stats_event_sender: crate::event::sender::Sender, } impl ConnectService { #[must_use] - pub fn new(opt_udp_core_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>>) -> Self { + pub fn new(opt_udp_core_stats_event_sender: crate::event::sender::Sender) -> Self { Self { opt_udp_core_stats_event_sender, } @@ -30,18 +30,21 @@ impl ConnectService { /// # Panics /// /// It will panic if there was an error making the connection cookie. - pub async fn handle_connect(&self, remote_addr: SocketAddr, cookie_issue_time: f64) -> ConnectionId { - let connection_id = make(gen_remote_fingerprint(&remote_addr), cookie_issue_time).expect("it should be a normal value"); + pub async fn handle_connect( + &self, + client_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, + cookie_issue_time: f64, + ) -> ConnectionId { + let connection_id = + make(gen_remote_fingerprint(&client_socket_addr), cookie_issue_time).expect("it should be a normal value"); if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - udp_stats_event_sender.send_event(statistics::event::Event::Udp4Connect).await; - } - SocketAddr::V6(_) => { - udp_stats_event_sender.send_event(statistics::event::Event::Udp6Connect).await; - } - } + udp_stats_event_sender + .send(Event::UdpConnect { + connection: ConnectionContext::new(client_socket_addr, server_service_binding), + }) + .await; } connection_id @@ -54,27 +57,36 @@ mod tests { mod connect_request { use std::future; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::connection_cookie::make; + use crate::event::bus::EventBus; + use crate::event::sender::Broadcaster; + use crate::event::{ConnectionContext, Event}; use crate::services::connect::ConnectService; use crate::services::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpCoreStatsEventSender, }; - use crate::statistics; #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { - let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + + let udp_core_broadcaster = Broadcaster::default(); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); let response = connect_service - .handle_connect(sample_ipv4_remote_addr(), sample_issue_time()) + .handle_connect(sample_ipv4_remote_addr(), server_service_binding, sample_issue_time()) .await; assert_eq!( @@ -85,13 +97,17 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { - let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + + let udp_core_broadcaster = Broadcaster::default(); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); let response = connect_service - .handle_connect(sample_ipv4_remote_addr(), sample_issue_time()) + .handle_connect(sample_ipv4_remote_addr(), server_service_binding, sample_issue_time()) .await; assert_eq!( @@ -102,13 +118,18 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { - let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let client_socket_addr = sample_ipv6_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + + let udp_core_broadcaster = Broadcaster::default(); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); let response = connect_service - .handle_connect(sample_ipv6_remote_addr(), sample_issue_time()) + .handle_connect(client_socket_addr, server_service_binding, sample_issue_time()) .await; assert_eq!( @@ -119,39 +140,47 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { + let client_socket_addr = sample_ipv4_socket_address(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let mut udp_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Udp4Connect)) + .expect_send() + .with(eq(Event::UdpConnect { + connection: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let opt_udp_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - let client_socket_address = sample_ipv4_socket_address(); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let opt_udp_stats_event_sender: crate::event::sender::Sender = Some(Arc::new(udp_stats_event_sender_mock)); let connect_service = Arc::new(ConnectService::new(opt_udp_stats_event_sender)); connect_service - .handle_connect(client_socket_address, sample_issue_time()) + .handle_connect(client_socket_addr, server_service_binding, sample_issue_time()) .await; } #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { + let client_socket_addr = sample_ipv6_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let mut udp_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Udp6Connect)) + .expect_send() + .with(eq(Event::UdpConnect { + connection: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let opt_udp_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let opt_udp_stats_event_sender: crate::event::sender::Sender = Some(Arc::new(udp_stats_event_sender_mock)); let connect_service = Arc::new(ConnectService::new(opt_udp_stats_event_sender)); connect_service - .handle_connect(sample_ipv6_remote_addr(), sample_issue_time()) + .handle_connect(client_socket_addr, server_service_binding, sample_issue_time()) .await; } } diff --git a/packages/udp-tracker-core/src/services/mod.rs b/packages/udp-tracker-core/src/services/mod.rs index 6aa254f41..56882e68f 100644 --- a/packages/udp-tracker-core/src/services/mod.rs +++ b/packages/udp-tracker-core/src/services/mod.rs @@ -10,10 +10,10 @@ pub(crate) mod tests { use futures::future::BoxFuture; use mockall::mock; - use tokio::sync::mpsc::error::SendError; + use torrust_tracker_events::sender::SendError; use crate::connection_cookie::gen_remote_fingerprint; - use crate::statistics; + use crate::event::Event; pub(crate) fn sample_ipv4_remote_addr() -> SocketAddr { sample_ipv4_socket_address() @@ -32,11 +32,11 @@ pub(crate) mod tests { } pub(crate) fn sample_ipv4_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) } fn sample_ipv6_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 8080) } pub(crate) fn sample_issue_time() -> f64 { @@ -45,8 +45,10 @@ pub(crate) mod tests { mock! { pub(crate) UdpCoreStatsEventSender {} - impl statistics::event::sender::Sender for UdpCoreStatsEventSender { - fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option<Result<(),SendError<statistics::event::Event> > > > ; + impl torrust_tracker_events::sender::Sender for UdpCoreStatsEventSender { + type Event = Event; + + fn send(&self, event: Event) -> BoxFuture<'static,Option<Result<usize,SendError<Event> > > > ; } } } diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index 61301cd43..8551351fb 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -16,9 +16,10 @@ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::error::{ScrapeError, WhitelistError}; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_primitives::core::ScrapeData; +use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; -use crate::statistics; +use crate::event::{ConnectionContext, Event}; /// The `ScrapeService` is responsible for handling the `scrape` requests. /// @@ -27,15 +28,12 @@ use crate::statistics; /// - The number of UDP `scrape` requests handled by the UDP tracker. pub struct ScrapeService { scrape_handler: Arc<ScrapeHandler>, - opt_udp_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>>, + opt_udp_stats_event_sender: crate::event::sender::Sender, } impl ScrapeService { #[must_use] - pub fn new( - scrape_handler: Arc<ScrapeHandler>, - opt_udp_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>>, - ) -> Self { + pub fn new(scrape_handler: Arc<ScrapeHandler>, opt_udp_stats_event_sender: crate::event::sender::Sender) -> Self { Self { scrape_handler, opt_udp_stats_event_sender, @@ -49,18 +47,19 @@ impl ScrapeService { /// It will return an error if the tracker core scrape handler returns an error. pub async fn handle_scrape( &self, - remote_client_addr: SocketAddr, + client_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, request: &ScrapeRequest, cookie_valid_range: Range<f64>, ) -> Result<ScrapeData, UdpScrapeError> { - Self::authenticate(remote_client_addr, request, cookie_valid_range)?; + Self::authenticate(client_socket_addr, request, cookie_valid_range)?; let scrape_data = self .scrape_handler - .scrape(&Self::convert_from_aquatic(&request.info_hashes)) + .handle_scrape(&Self::convert_from_aquatic(&request.info_hashes)) .await?; - self.send_stats_event(remote_client_addr).await; + self.send_event(client_socket_addr, server_service_binding).await; Ok(scrape_data) } @@ -81,13 +80,15 @@ impl ScrapeService { aquatic_infohashes.iter().map(|&x| x.into()).collect() } - async fn send_stats_event(&self, remote_addr: SocketAddr) { + async fn send_event(&self, client_socket_addr: SocketAddr, server_service_binding: ServiceBinding) { if let Some(udp_stats_event_sender) = self.opt_udp_stats_event_sender.as_deref() { - let event = match remote_addr { - SocketAddr::V4(_) => statistics::event::Event::Udp4Scrape, - SocketAddr::V6(_) => statistics::event::Event::Udp6Scrape, + let event = Event::UdpScrape { + connection: ConnectionContext::new(client_socket_addr, server_service_binding), }; - udp_stats_event_sender.send_event(event).await; + + tracing::debug!(target = crate::UDP_TRACKER_LOG_TARGET, "Sending UdpScrape event: {event:?}"); + + udp_stats_event_sender.send(event).await; } } } diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index 096059b91..e5d2b87a7 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -1,28 +1,51 @@ -use crate::statistics::event::Event; +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::Event; use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; -pub async fn handle_event(event: Event, stats_repository: &Repository) { +/// # Panics +/// +/// This function panics if the IP version does not match the event type. +pub async fn handle_event(event: Event, stats_repository: &Repository, now: DurationSinceUnixEpoch) { match event { - // UDP4 - Event::Udp4Connect => { - stats_repository.increase_udp4_connections().await; - } - Event::Udp4Announce => { - stats_repository.increase_udp4_announces().await; + Event::UdpConnect { connection: context } => { + let mut label_set = LabelSet::from(context); + label_set.upsert(label_name!("request_kind"), LabelValue::new("connect")); + + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } - Event::Udp4Scrape => { - stats_repository.increase_udp4_scrapes().await; - } - - // UDP6 - Event::Udp6Connect => { - stats_repository.increase_udp6_connections().await; + Event::UdpAnnounce { connection: context, .. } => { + let mut label_set = LabelSet::from(context); + label_set.upsert(label_name!("request_kind"), LabelValue::new("announce")); + + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } - Event::Udp6Announce => { - stats_repository.increase_udp6_announces().await; - } - Event::Udp6Scrape => { - stats_repository.increase_udp6_scrapes().await; + Event::UdpScrape { connection: context } => { + let mut label_set = LabelSet::from(context); + label_set.upsert(label_name!("request_kind"), LabelValue::new("scrape")); + + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } } @@ -31,73 +54,169 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { #[cfg(test)] mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::peer::PeerAnnouncement; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; use crate::statistics::event::handler::handle_event; - use crate::statistics::event::Event; use crate::statistics::repository::Repository; + use crate::tests::sample_info_hash; + use crate::CurrentClock; #[tokio::test] async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp4Connect, &stats_repository).await; + handle_event( + Event::UdpConnect { + connection: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_connections_handled, 1); + assert_eq!(stats.udp4_connections_handled(), 1); } #[tokio::test] async fn should_increase_the_udp4_announces_counter_when_it_receives_a_udp4_announce_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp4Announce, &stats_repository).await; + handle_event( + Event::UdpAnnounce { + connection: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + info_hash: sample_info_hash(), + announcement: PeerAnnouncement::default(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_announces_handled, 1); + assert_eq!(stats.udp4_announces_handled(), 1); } #[tokio::test] async fn should_increase_the_udp4_scrapes_counter_when_it_receives_a_udp4_scrape_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp4Scrape, &stats_repository).await; + handle_event( + Event::UdpScrape { + connection: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_scrapes_handled, 1); + assert_eq!(stats.udp4_scrapes_handled(), 1); } #[tokio::test] async fn should_increase_the_udp6_connections_counter_when_it_receives_a_udp6_connect_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp6Connect, &stats_repository).await; + handle_event( + Event::UdpConnect { + connection: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_connections_handled, 1); + assert_eq!(stats.udp6_connections_handled(), 1); } #[tokio::test] async fn should_increase_the_udp6_announces_counter_when_it_receives_a_udp6_announce_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp6Announce, &stats_repository).await; + handle_event( + Event::UdpAnnounce { + connection: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + info_hash: sample_info_hash(), + announcement: PeerAnnouncement::default(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_announces_handled, 1); + assert_eq!(stats.udp6_announces_handled(), 1); } #[tokio::test] async fn should_increase_the_udp6_scrapes_counter_when_it_receives_a_udp6_scrape_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp6Scrape, &stats_repository).await; + handle_event( + Event::UdpScrape { + connection: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_scrapes_handled, 1); + assert_eq!(stats.udp6_scrapes_handled(), 1); } } diff --git a/packages/udp-tracker-core/src/statistics/event/listener.rs b/packages/udp-tracker-core/src/statistics/event/listener.rs index f1a2e25de..b11bcce85 100644 --- a/packages/udp-tracker-core/src/statistics/event/listener.rs +++ b/packages/udp-tracker-core/src/statistics/event/listener.rs @@ -1,11 +1,58 @@ -use tokio::sync::mpsc; +use std::sync::Arc; + +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; use super::handler::handle_event; -use super::Event; +use crate::event::receiver::Receiver; use crate::statistics::repository::Repository; +use crate::{CurrentClock, UDP_TRACKER_LOG_TARGET}; + +#[must_use] +pub fn run_event_listener( + receiver: Receiver, + cancellation_token: CancellationToken, + repository: &Arc<Repository>, +) -> JoinHandle<()> { + let stats_repository = repository.clone(); + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker core event listener"); + + tokio::spawn(async move { + dispatch_events(receiver, cancellation_token, stats_repository).await; + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker core event listener finished"); + }) +} + +async fn dispatch_events(mut receiver: Receiver, cancellation_token: CancellationToken, stats_repository: Arc<Repository>) { + loop { + tokio::select! { + biased; + + () = cancellation_token.cancelled() => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received cancellation request, shutting down UDP tracker core event listener."); + break; + } -pub async fn dispatch_events(mut receiver: mpsc::Receiver<Event>, stats_repository: Repository) { - while let Some(event) = receiver.recv().await { - handle_event(event, &stats_repository).await; + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker core statistics receiver closed."); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker core statistics receiver lagged by {} events.", n); + } + } + } + } + } + } } } diff --git a/packages/udp-tracker-core/src/statistics/event/mod.rs b/packages/udp-tracker-core/src/statistics/event/mod.rs index bfc733657..dae683398 100644 --- a/packages/udp-tracker-core/src/statistics/event/mod.rs +++ b/packages/udp-tracker-core/src/statistics/event/mod.rs @@ -1,23 +1,2 @@ pub mod handler; pub mod listener; -pub mod sender; - -/// An statistics event. It is used to collect tracker metrics. -/// -/// - `Tcp` prefix means the event was triggered by the HTTP tracker -/// - `Udp` prefix means the event was triggered by the UDP tracker -/// - `4` or `6` prefixes means the IP version used by the peer -/// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` -/// -/// > NOTE: HTTP trackers do not use `connection` requests. -#[derive(Debug, PartialEq, Eq)] -pub enum Event { - // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } - // Attributes are enums too. - Udp4Connect, - Udp4Announce, - Udp4Scrape, - Udp6Connect, - Udp6Announce, - Udp6Scrape, -} diff --git a/packages/udp-tracker-core/src/statistics/event/sender.rs b/packages/udp-tracker-core/src/statistics/event/sender.rs deleted file mode 100644 index ca4b4e210..000000000 --- a/packages/udp-tracker-core/src/statistics/event/sender.rs +++ /dev/null @@ -1,29 +0,0 @@ -use futures::future::BoxFuture; -use futures::FutureExt; -#[cfg(test)] -use mockall::{automock, predicate::str}; -use tokio::sync::mpsc; -use tokio::sync::mpsc::error::SendError; - -use super::Event; - -/// A trait to allow sending statistics events -#[cfg_attr(test, automock)] -pub trait Sender: Sync + Send { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option<Result<(), SendError<Event>>>>; -} - -/// An [`statistics::EventSender`](crate::statistics::event::sender::Sender) implementation. -/// -/// It uses a channel sender to send the statistic events. The channel is created by a -/// [`statistics::Keeper`](crate::statistics::keeper::Keeper) -#[allow(clippy::module_name_repetitions)] -pub struct ChannelSender { - pub(crate) sender: mpsc::Sender<Event>, -} - -impl Sender for ChannelSender { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option<Result<(), SendError<Event>>>> { - async move { Some(self.sender.send(event).await) }.boxed() - } -} diff --git a/packages/udp-tracker-core/src/statistics/keeper.rs b/packages/udp-tracker-core/src/statistics/keeper.rs deleted file mode 100644 index dac7e7541..000000000 --- a/packages/udp-tracker-core/src/statistics/keeper.rs +++ /dev/null @@ -1,77 +0,0 @@ -use tokio::sync::mpsc; - -use super::event::listener::dispatch_events; -use super::event::sender::{ChannelSender, Sender}; -use super::event::Event; -use super::repository::Repository; - -const CHANNEL_BUFFER_SIZE: usize = 65_535; - -/// The service responsible for keeping tracker metrics (listening to statistics events and handle them). -/// -/// It actively listen to new statistics events. When it receives a new event -/// it accordingly increases the counters. -pub struct Keeper { - pub repository: Repository, -} - -impl Default for Keeper { - fn default() -> Self { - Self::new() - } -} - -impl Keeper { - #[must_use] - pub fn new() -> Self { - Self { - repository: Repository::new(), - } - } - - #[must_use] - pub fn new_active_instance() -> (Box<dyn Sender>, Repository) { - let mut stats_tracker = Self::new(); - - let stats_event_sender = stats_tracker.run_event_listener(); - - (stats_event_sender, stats_tracker.repository) - } - - pub fn run_event_listener(&mut self) -> Box<dyn Sender> { - let (sender, receiver) = mpsc::channel::<Event>(CHANNEL_BUFFER_SIZE); - - let stats_repository = self.repository.clone(); - - tokio::spawn(async move { dispatch_events(receiver, stats_repository).await }); - - Box::new(ChannelSender { sender }) - } -} - -#[cfg(test)] -mod tests { - use crate::statistics::event::Event; - use crate::statistics::keeper::Keeper; - use crate::statistics::metrics::Metrics; - - #[tokio::test] - async fn should_contain_the_tracker_statistics() { - let stats_tracker = Keeper::new(); - - let stats = stats_tracker.repository.get_stats().await; - - assert_eq!(stats.udp4_announces_handled, Metrics::default().udp4_announces_handled); - } - - #[tokio::test] - async fn should_create_an_event_sender_to_send_statistical_events() { - let mut stats_tracker = Keeper::new(); - - let event_sender = stats_tracker.run_event_listener(); - - let result = event_sender.send_event(Event::Udp4Connect).await; - - assert!(result.is_some()); - } -} diff --git a/packages/udp-tracker-core/src/statistics/metrics.rs b/packages/udp-tracker-core/src/statistics/metrics.rs index 1b3805288..98906a596 100644 --- a/packages/udp-tracker-core/src/statistics/metrics.rs +++ b/packages/udp-tracker-core/src/statistics/metrics.rs @@ -1,28 +1,124 @@ -/// Metrics collected by the tracker. -/// -/// - Number of connections handled -/// - Number of `announce` requests handled -/// - Number of `scrape` request handled -/// -/// These metrics are collected for each connection type: UDP and HTTP -/// and also for each IP version used by the peers: IPv4 and IPv6. -#[derive(Debug, PartialEq, Default)] +use serde::Serialize; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::aggregate::sum::Sum; +use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::statistics::UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; + +#[derive(Debug, PartialEq, Default, Serialize)] pub struct Metrics { + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + /// # Errors + /// + /// This function returns an error if the metric does not exist and it + /// cannot be created. + pub fn increase_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increment_counter(metric_name, labels, now) + } + + /// # Errors + /// + /// This function returns an error if the metric does not exist and it + /// cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) + } +} + +impl Metrics { /// Total number of UDP (UDP tracker) connections from IPv4 peers. - pub udp4_connections_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_connections_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "connect")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. - pub udp4_announces_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. - pub udp4_scrapes_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. - pub udp6_connections_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_connections_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. - pub udp6_announces_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. - pub udp6_scrapes_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() as u64 + } } diff --git a/packages/udp-tracker-core/src/statistics/mod.rs b/packages/udp-tracker-core/src/statistics/mod.rs index 939a41061..fec76069e 100644 --- a/packages/udp-tracker-core/src/statistics/mod.rs +++ b/packages/udp-tracker-core/src/statistics/mod.rs @@ -1,6 +1,24 @@ pub mod event; -pub mod keeper; pub mod metrics; pub mod repository; pub mod services; -pub mod setup; + +use metrics::Metrics; +use torrust_tracker_metrics::metric::description::MetricDescription; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::unit::Unit; + +const UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_core_requests_received_total"; + +#[must_use] +pub fn describe_metrics() -> Metrics { + let mut metrics = Metrics::default(); + + metrics.metric_collection.describe_counter( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("Total number of UDP requests received")), + ); + + metrics +} diff --git a/packages/udp-tracker-core/src/statistics/repository.rs b/packages/udp-tracker-core/src/statistics/repository.rs index f7609e5c2..ceee0e369 100644 --- a/packages/udp-tracker-core/src/statistics/repository.rs +++ b/packages/udp-tracker-core/src/statistics/repository.rs @@ -1,7 +1,12 @@ use std::sync::Arc; use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::Error; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use super::describe_metrics; use super::metrics::Metrics; /// A repository for the tracker metrics. @@ -20,7 +25,7 @@ impl Repository { #[must_use] pub fn new() -> Self { Self { - stats: Arc::new(RwLock::new(Metrics::default())), + stats: Arc::new(RwLock::new(describe_metrics())), } } @@ -28,39 +33,22 @@ impl Repository { self.stats.read().await } - pub async fn increase_udp4_connections(&self) { + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increase the counter. + pub async fn increase_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { let mut stats_lock = self.stats.write().await; - stats_lock.udp4_connections_handled += 1; - drop(stats_lock); - } - pub async fn increase_udp4_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_announces_handled += 1; - drop(stats_lock); - } + let result = stats_lock.increase_counter(metric_name, labels, now); - pub async fn increase_udp4_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_scrapes_handled += 1; drop(stats_lock); - } - pub async fn increase_udp6_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_scrapes_handled += 1; - drop(stats_lock); + result } } diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index 56814f5d5..18a80bad1 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -9,7 +9,7 @@ //! //! The factory function builds two structs: //! -//! - An statistics event [`Sender`](crate::statistics::event::sender::Sender) +//! - An event [`Sender`](crate::event::sender::Sender) //! - An statistics [`Repository`] //! //! ```text @@ -39,7 +39,7 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; +use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::metrics::Metrics; use crate::statistics::repository::Repository; @@ -50,7 +50,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateSwarmMetadata, + pub torrents_metrics: AggregateActiveSwarmMetadata, /// Application level metrics. Usage statistics/metrics. /// @@ -63,20 +63,13 @@ pub async fn get_metrics( in_memory_torrent_repository: Arc<InMemoryTorrentRepository>, stats_repository: Arc<Repository>, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let stats = stats_repository.get_stats().await; TrackerMetrics { torrents_metrics, protocol_metrics: Metrics { - // UDPv4 - udp4_connections_handled: stats.udp4_connections_handled, - udp4_announces_handled: stats.udp4_announces_handled, - udp4_scrapes_handled: stats.udp4_scrapes_handled, - // UDPv6 - udp6_connections_handled: stats.udp6_connections_handled, - udp6_announces_handled: stats.udp6_announces_handled, - udp6_scrapes_handled: stats.udp6_scrapes_handled, + metric_collection: stats.metric_collection.clone(), }, } } @@ -87,34 +80,25 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; - use crate::statistics; + use crate::statistics::describe_metrics; + use crate::statistics::repository::Repository; use crate::statistics::services::{get_metrics, TrackerMetrics}; - pub fn tracker_configuration() -> Configuration { - configuration::ephemeral() - } - #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { - let config = tracker_configuration(); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let (_udp_core_stats_event_sender, udp_core_stats_repository) = - crate::statistics::setup::factory(config.core.tracker_usage_statistics); - let udp_core_stats_repository = Arc::new(udp_core_stats_repository); + let repository = Arc::new(Repository::new()); - let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), udp_core_stats_repository.clone()).await; + let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), repository.clone()).await; assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata::default(), - protocol_metrics: statistics::metrics::Metrics::default(), + torrents_metrics: AggregateActiveSwarmMetadata::default(), + protocol_metrics: describe_metrics(), } ); } diff --git a/packages/udp-tracker-core/src/statistics/setup.rs b/packages/udp-tracker-core/src/statistics/setup.rs deleted file mode 100644 index d3114a75e..000000000 --- a/packages/udp-tracker-core/src/statistics/setup.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! Setup for the tracker statistics. -//! -//! The [`factory`] function builds the structs needed for handling the tracker metrics. -use crate::statistics; - -/// It builds the structs needed for handling the tracker metrics. -/// -/// It returns: -/// -/// - An statistics event [`Sender`](crate::statistics::event::sender::Sender) that allows you to send events related to statistics. -/// - An statistics [`Repository`](crate::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. -/// -/// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics -/// events are sent are received but not dispatched to the handler. -#[must_use] -pub fn factory( - tracker_usage_statistics: bool, -) -> ( - Option<Box<dyn statistics::event::sender::Sender>>, - statistics::repository::Repository, -) { - let mut stats_event_sender = None; - - let mut stats_tracker = statistics::keeper::Keeper::new(); - - if tracker_usage_statistics { - stats_event_sender = Some(stats_tracker.run_event_listener()); - } - - (stats_event_sender, stats_tracker.repository) -} - -#[cfg(test)] -mod test { - use super::factory; - - #[tokio::test] - async fn should_not_send_any_event_when_statistics_are_disabled() { - let tracker_usage_statistics = false; - - let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); - - assert!(stats_event_sender.is_none()); - } - - #[tokio::test] - async fn should_send_events_when_statistics_are_enabled() { - let tracker_usage_statistics = true; - - let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); - - assert!(stats_event_sender.is_some()); - } -} diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index f8fcd2def..dc66572d8 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -4,7 +4,7 @@ description = "The Torrust Bittorrent UDP tracker." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "bittorrent", "server", "torrust", "tracker", "udp"] +keywords = [ "axum", "bittorrent", "server", "torrust", "tracker", "udp" ] license.workspace = true name = "torrust-udp-tracker-server" publish.workspace = true @@ -19,20 +19,24 @@ bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "../tracker-client" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } futures = "0" futures-util = "0" ringbuf = "0" +serde = "1.0.219" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } +tokio-util = "0.7.15" torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } -torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" -url = { version = "2", features = ["serde"] } -uuid = { version = "1", features = ["v4"] } +url = { version = "2", features = [ "serde" ] } +uuid = { version = "1", features = [ "v4" ] } zerocopy = "0.7" [dev-dependencies] diff --git a/packages/udp-tracker-server/src/banning/event/handler.rs b/packages/udp-tracker-server/src/banning/event/handler.rs new file mode 100644 index 000000000..4876323a8 --- /dev/null +++ b/packages/udp-tracker-server/src/banning/event/handler.rs @@ -0,0 +1,47 @@ +use std::sync::Arc; + +use bittorrent_udp_tracker_core::services::banning::BanService; +use tokio::sync::RwLock; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::{ErrorKind, Event}; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_IPS_BANNED_TOTAL; + +pub async fn handle_event( + event: Event, + ban_service: &Arc<RwLock<BanService>>, + repository: &Repository, + now: DurationSinceUnixEpoch, +) { + if let Event::UdpError { + context, + kind: _, + error: ErrorKind::ConnectionCookie(_msg), + } = event + { + let mut ban_service = ban_service.write().await; + + ban_service.increase_counter(&context.client_socket_addr().ip()); + + update_metric_for_banned_ips_total(repository, ban_service.get_banned_ips_total(), now).await; + } +} + +#[allow(clippy::cast_precision_loss)] +async fn update_metric_for_banned_ips_total(repository: &Repository, ips_banned_total: usize, now: DurationSinceUnixEpoch) { + match repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), + &LabelSet::default(), + ips_banned_total as f64, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + } +} diff --git a/packages/udp-tracker-server/src/banning/event/listener.rs b/packages/udp-tracker-server/src/banning/event/listener.rs new file mode 100644 index 000000000..0d579f912 --- /dev/null +++ b/packages/udp-tracker-server/src/banning/event/listener.rs @@ -0,0 +1,68 @@ +use std::sync::Arc; + +use bittorrent_udp_tracker_core::services::banning::BanService; +use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; +use tokio::sync::RwLock; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; + +use super::handler::handle_event; +use crate::event::receiver::Receiver; +use crate::statistics::repository::Repository; +use crate::CurrentClock; + +#[must_use] +pub fn run_event_listener( + receiver: Receiver, + cancellation_token: CancellationToken, + ban_service: &Arc<RwLock<BanService>>, + repository: &Arc<Repository>, +) -> JoinHandle<()> { + let ban_service_clone = ban_service.clone(); + let repository_clone = repository.clone(); + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener (banning)"); + + tokio::spawn(async move { + dispatch_events(receiver, cancellation_token, ban_service_clone, repository_clone).await; + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener (banning) finished"); + }) +} + +async fn dispatch_events( + mut receiver: Receiver, + cancellation_token: CancellationToken, + ban_service: Arc<RwLock<BanService>>, + repository: Arc<Repository>, +) { + loop { + tokio::select! { + biased; + + () = cancellation_token.cancelled() => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received cancellation request, shutting down UDP tracker server event listener."); + break; + } + + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, &ban_service, &repository, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker server receiver (banning) closed."); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker server receiver (banning) lagged by {} events.", n); + } + } + } + } + } + } + } +} diff --git a/packages/udp-tracker-server/src/banning/event/mod.rs b/packages/udp-tracker-server/src/banning/event/mod.rs new file mode 100644 index 000000000..dae683398 --- /dev/null +++ b/packages/udp-tracker-server/src/banning/event/mod.rs @@ -0,0 +1,2 @@ +pub mod handler; +pub mod listener; diff --git a/packages/udp-tracker-server/src/banning/mod.rs b/packages/udp-tracker-server/src/banning/mod.rs new file mode 100644 index 000000000..53f112654 --- /dev/null +++ b/packages/udp-tracker-server/src/banning/mod.rs @@ -0,0 +1 @@ +pub mod event; diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index 36ad0e671..365db4ca7 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -2,24 +2,53 @@ use std::sync::Arc; use torrust_tracker_configuration::Core; +use crate::event::bus::EventBus; +use crate::event::sender::Broadcaster; +use crate::event::{self}; use crate::statistics; +use crate::statistics::repository::Repository; pub struct UdpTrackerServerContainer { - pub udp_server_stats_event_sender: Arc<Option<Box<dyn statistics::event::sender::Sender>>>, - pub udp_server_stats_repository: Arc<statistics::repository::Repository>, + pub event_bus: Arc<event::bus::EventBus>, + pub stats_event_sender: crate::event::sender::Sender, + pub stats_repository: Arc<statistics::repository::Repository>, } impl UdpTrackerServerContainer { #[must_use] pub fn initialize(core_config: &Arc<Core>) -> Arc<Self> { - let (udp_server_stats_event_sender, udp_server_stats_repository) = - statistics::setup::factory(core_config.tracker_usage_statistics); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); - let udp_server_stats_repository = Arc::new(udp_server_stats_repository); + let udp_tracker_server_services = UdpTrackerServerServices::initialize(core_config); Arc::new(Self { - udp_server_stats_event_sender: udp_server_stats_event_sender.clone(), - udp_server_stats_repository: udp_server_stats_repository.clone(), + event_bus: udp_tracker_server_services.event_bus.clone(), + stats_event_sender: udp_tracker_server_services.stats_event_sender.clone(), + stats_repository: udp_tracker_server_services.stats_repository.clone(), + }) + } +} + +pub struct UdpTrackerServerServices { + pub event_bus: Arc<event::bus::EventBus>, + pub stats_event_sender: crate::event::sender::Sender, + pub stats_repository: Arc<statistics::repository::Repository>, +} + +impl UdpTrackerServerServices { + #[must_use] + pub fn initialize(core_config: &Arc<Core>) -> Arc<Self> { + let udp_server_broadcaster = Broadcaster::default(); + let udp_server_stats_repository = Arc::new(Repository::new()); + let udp_server_stats_event_bus = Arc::new(EventBus::new( + core_config.tracker_usage_statistics.into(), + udp_server_broadcaster.clone(), + )); + + let udp_server_stats_event_sender = udp_server_stats_event_bus.sender(); + + Arc::new(Self { + event_bus: udp_server_stats_event_bus.clone(), + stats_event_sender: udp_server_stats_event_sender.clone(), + stats_repository: udp_server_stats_repository.clone(), }) } } diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 158e39a7e..13e18ba9b 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -1,12 +1,13 @@ use std::net::SocketAddr; use std::sync::Arc; -use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; -use torrust_tracker_primitives::peer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::container::UdpTrackerServerContainer; use crate::server::spawner::Spawner; @@ -22,21 +23,10 @@ where pub container: Arc<EnvContainer>, pub registar: Registar, pub server: Server<S>, -} - -impl<S> Environment<S> -where - S: std::fmt::Debug + std::fmt::Display, -{ - /// Add a torrent to the tracker - #[allow(dead_code)] - pub fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _number_of_downloads_increased = self - .container - .tracker_core_container - .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None); - } + pub udp_core_event_listener_job: Option<JoinHandle<()>>, + pub udp_server_stats_event_listener_job: Option<JoinHandle<()>>, + pub udp_server_banning_event_listener_job: Option<JoinHandle<()>>, + pub cancellation_token: CancellationToken, } impl Environment<Stopped> { @@ -55,9 +45,15 @@ impl Environment<Stopped> { container, registar: Registar::default(), server, + udp_core_event_listener_job: None, + udp_server_stats_event_listener_job: None, + udp_server_banning_event_listener_job: None, + cancellation_token: CancellationToken::new(), } } + /// Starts the test environment and return a running environment. + /// /// # Panics /// /// Will panic if it cannot start the server. @@ -65,19 +61,48 @@ impl Environment<Stopped> { pub async fn start(self) -> Environment<Running> { let cookie_lifetime = self.container.udp_tracker_core_container.udp_tracker_config.cookie_lifetime; + // Start the UDP tracker core event listener + let udp_core_event_listener_job = Some(bittorrent_udp_tracker_core::statistics::event::listener::run_event_listener( + self.container.udp_tracker_core_container.event_bus.receiver(), + self.cancellation_token.clone(), + &self.container.udp_tracker_core_container.stats_repository, + )); + + // Start the UDP tracker server event listener (statistics) + let udp_server_stats_event_listener_job = Some(crate::statistics::event::listener::run_event_listener( + self.container.udp_tracker_server_container.event_bus.receiver(), + self.cancellation_token.clone(), + &self.container.udp_tracker_server_container.stats_repository, + )); + + // Start the UDP tracker server event listener (banning) + let udp_server_banning_event_listener_job = Some(crate::banning::event::listener::run_event_listener( + self.container.udp_tracker_server_container.event_bus.receiver(), + self.cancellation_token.clone(), + &self.container.udp_tracker_core_container.ban_service, + &self.container.udp_tracker_server_container.stats_repository, + )); + + // Start the UDP tracker server + let server = self + .server + .start( + self.container.udp_tracker_core_container.clone(), + self.container.udp_tracker_server_container.clone(), + self.registar.give_form(), + cookie_lifetime, + ) + .await + .expect("Failed to start the UDP tracker server"); + Environment { container: self.container.clone(), registar: self.registar.clone(), - server: self - .server - .start( - self.container.udp_tracker_core_container.clone(), - self.container.udp_tracker_server_container.clone(), - self.registar.give_form(), - cookie_lifetime, - ) - .await - .unwrap(), + server, + udp_core_event_listener_job, + udp_server_stats_event_listener_job, + udp_server_banning_event_listener_job, + cancellation_token: self.cancellation_token, } } } @@ -89,22 +114,51 @@ impl Environment<Running> { pub async fn new(configuration: &Arc<Configuration>) -> Self { tokio::time::timeout(DEFAULT_TIMEOUT, Environment::<Stopped>::new(configuration).start()) .await - .expect("it should create an environment within the timeout") + .expect("Failed to create a UDP tracker server running environment within the timeout") } + /// Stops the test environment and return a stopped environment. + /// /// # Panics /// /// Will panic if it cannot stop the service within the timeout. #[allow(dead_code)] pub async fn stop(self) -> Environment<Stopped> { - let stopped = tokio::time::timeout(DEFAULT_TIMEOUT, self.server.stop()) + // Stop the UDP tracker core event listener + if let Some(udp_core_event_listener_job) = self.udp_core_event_listener_job { + // todo: send a message to the event listener to stop and wait for + // it to finish + udp_core_event_listener_job.abort(); + } + + // Stop the UDP tracker server event listener (statistics) + if let Some(udp_server_stats_event_listener_job) = self.udp_server_stats_event_listener_job { + // todo: send a message to the event listener to stop and wait for + // it to finish + udp_server_stats_event_listener_job.abort(); + } + + // Stop the UDP tracker server event listener (banning) + if let Some(udp_server_banning_event_listener_job) = self.udp_server_banning_event_listener_job { + // todo: send a message to the event listener to stop and wait for + // it to finish + udp_server_banning_event_listener_job.abort(); + } + + // Stop the UDP tracker server + let server = tokio::time::timeout(DEFAULT_TIMEOUT, self.server.stop()) .await - .expect("it should stop the environment within the timeout"); + .expect("Failed to stop the UDP tracker server within the timeout") + .expect("Failed to stop the UDP tracker server"); Environment { container: self.container, registar: Registar::default(), - server: stopped.expect("it should stop the udp tracker service"), + server, + udp_core_event_listener_job: None, + udp_server_stats_event_listener_job: None, + udp_server_banning_event_listener_job: None, + cancellation_token: self.cancellation_token, } } @@ -130,8 +184,18 @@ impl EnvContainer { let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); - let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from(&tracker_core_container, &udp_tracker_config); + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &swarm_coordination_registry_container, + )); + + let udp_tracker_core_container = + UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); Self { diff --git a/packages/udp-tracker-server/src/error.rs b/packages/udp-tracker-server/src/error.rs index 93caf6853..d260ebfd4 100644 --- a/packages/udp-tracker-server/src/error.rs +++ b/packages/udp-tracker-server/src/error.rs @@ -1,59 +1,55 @@ //! Error types for the UDP server. +use std::fmt::Display; use std::panic::Location; -use aquatic_udp_protocol::{ConnectionId, RequestParseError}; +use aquatic_udp_protocol::{ConnectionId, RequestParseError, TransactionId}; use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; use bittorrent_udp_tracker_core::services::scrape::UdpScrapeError; use derive_more::derive::Display; use thiserror::Error; -use torrust_tracker_located_error::LocatedError; #[derive(Display, Debug)] #[display(":?")] pub struct ConnectionCookie(pub ConnectionId); /// Error returned by the UDP server. -#[derive(Error, Debug)] +#[derive(Error, Debug, Clone)] pub enum Error { /// Error returned when the request is invalid. - #[error("error when phrasing request: {request_parse_error:?}")] - RequestParseError { request_parse_error: RequestParseError }, + #[error("error parsing request: {request_parse_error:?}")] + InvalidRequest { request_parse_error: SendableRequestParseError }, /// Error returned when the domain tracker returns an announce error. #[error("tracker announce error: {source}")] - UdpAnnounceError { source: UdpAnnounceError }, + AnnounceFailed { source: UdpAnnounceError }, /// Error returned when the domain tracker returns an scrape error. #[error("tracker scrape error: {source}")] - UdpScrapeError { source: UdpScrapeError }, + ScrapeFailed { source: UdpScrapeError }, /// Error returned from a third-party library (`aquatic_udp_protocol`). #[error("internal server error: {message}, {location}")] - InternalServer { + Internal { location: &'static Location<'static>, message: String, }, - /// Error returned when the request is invalid. - #[error("bad request: {source}")] - BadRequest { - source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - }, - /// Error returned when tracker requires authentication. #[error("domain tracker requires authentication but is not supported in current UDP implementation. Location: {location}")] - TrackerAuthenticationRequired { location: &'static Location<'static> }, + AuthRequired { location: &'static Location<'static> }, } impl From<RequestParseError> for Error { fn from(request_parse_error: RequestParseError) -> Self { - Self::RequestParseError { request_parse_error } + Self::InvalidRequest { + request_parse_error: request_parse_error.into(), + } } } impl From<UdpAnnounceError> for Error { fn from(udp_announce_error: UdpAnnounceError) -> Self { - Self::UdpAnnounceError { + Self::AnnounceFailed { source: udp_announce_error, } } @@ -61,8 +57,44 @@ impl From<UdpAnnounceError> for Error { impl From<UdpScrapeError> for Error { fn from(udp_scrape_error: UdpScrapeError) -> Self { - Self::UdpScrapeError { + Self::ScrapeFailed { source: udp_scrape_error, } } } + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct SendableRequestParseError { + pub message: String, + pub opt_connection_id: Option<ConnectionId>, + pub opt_transaction_id: Option<TransactionId>, +} + +impl Display for SendableRequestParseError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "SendableRequestParseError: message: {}, connection_id: {:?}, transaction_id: {:?}", + self.message, self.opt_connection_id, self.opt_transaction_id + ) + } +} + +impl From<RequestParseError> for SendableRequestParseError { + fn from(request_parse_error: RequestParseError) -> Self { + let (message, opt_connection_id, opt_transaction_id) = match request_parse_error { + RequestParseError::Sendable { + connection_id, + transaction_id, + err, + } => ((*err).to_string(), Some(connection_id), Some(transaction_id)), + RequestParseError::Unsendable { err } => (err.to_string(), None, None), + }; + + Self { + message, + opt_connection_id, + opt_transaction_id, + } + } +} diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs new file mode 100644 index 000000000..a7634d58e --- /dev/null +++ b/packages/udp-tracker-server/src/event.rs @@ -0,0 +1,192 @@ +use std::fmt; +use std::net::SocketAddr; +use std::time::Duration; + +use aquatic_udp_protocol::AnnounceRequest; +use bittorrent_tracker_core::error::{AnnounceError, ScrapeError}; +use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; +use bittorrent_udp_tracker_core::services::scrape::UdpScrapeError; +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::label_name; +use torrust_tracker_primitives::service_binding::ServiceBinding; + +use crate::error::Error; + +/// A UDP server event. +#[derive(Debug, Clone, PartialEq)] +pub enum Event { + UdpRequestReceived { + context: ConnectionContext, + }, + UdpRequestAborted { + context: ConnectionContext, + }, + UdpRequestBanned { + context: ConnectionContext, + }, + UdpRequestAccepted { + context: ConnectionContext, + kind: UdpRequestKind, + }, + UdpResponseSent { + context: ConnectionContext, + kind: UdpResponseKind, + req_processing_time: Duration, + }, + UdpError { + context: ConnectionContext, + kind: Option<UdpRequestKind>, + error: ErrorKind, + }, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum UdpRequestKind { + Connect, + Announce { announce_request: AnnounceRequest }, + Scrape, +} + +impl From<UdpRequestKind> for LabelValue { + fn from(kind: UdpRequestKind) -> Self { + match kind { + UdpRequestKind::Connect => LabelValue::new("connect"), + UdpRequestKind::Announce { .. } => LabelValue::new("announce"), + UdpRequestKind::Scrape => LabelValue::new("scrape"), + } + } +} + +impl fmt::Display for UdpRequestKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let proto_str = match self { + UdpRequestKind::Connect => "connect", + UdpRequestKind::Announce { .. } => "announce", + UdpRequestKind::Scrape => "scrape", + }; + write!(f, "{proto_str}") + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum UdpResponseKind { + Ok { + req_kind: UdpRequestKind, + }, + + /// There was an error handling the request. The error contains the request + /// kind if the request was parsed successfully. + Error { + opt_req_kind: Option<UdpRequestKind>, + }, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct ConnectionContext { + client_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, +} + +impl ConnectionContext { + #[must_use] + pub fn new(client_socket_addr: SocketAddr, server_service_binding: ServiceBinding) -> Self { + Self { + client_socket_addr, + server_service_binding, + } + } + + #[must_use] + pub fn client_socket_addr(&self) -> SocketAddr { + self.client_socket_addr + } + + #[must_use] + pub fn server_socket_addr(&self) -> SocketAddr { + self.server_service_binding.bind_address() + } +} + +impl From<ConnectionContext> for LabelSet { + fn from(connection_context: ConnectionContext) -> Self { + LabelSet::from([ + ( + label_name!("server_binding_protocol"), + LabelValue::new(&connection_context.server_service_binding.protocol().to_string()), + ), + ( + label_name!("server_binding_ip"), + LabelValue::new(&connection_context.server_service_binding.bind_address().ip().to_string()), + ), + ( + label_name!("server_binding_address_ip_type"), + LabelValue::new(&connection_context.server_service_binding.bind_address_ip_type().to_string()), + ), + ( + label_name!("server_binding_address_ip_family"), + LabelValue::new(&connection_context.server_service_binding.bind_address_ip_family().to_string()), + ), + ( + label_name!("server_binding_port"), + LabelValue::new(&connection_context.server_service_binding.bind_address().port().to_string()), + ), + ]) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub enum ErrorKind { + RequestParse(String), + ConnectionCookie(String), + Whitelist(String), + Database(String), + InternalServer(String), + BadRequest(String), + TrackerAuthentication(String), +} + +impl From<Error> for ErrorKind { + fn from(error: Error) -> Self { + match error { + Error::InvalidRequest { request_parse_error } => Self::RequestParse(request_parse_error.to_string()), + Error::AnnounceFailed { source } => match source { + UdpAnnounceError::ConnectionCookieError { source } => Self::ConnectionCookie(source.to_string()), + UdpAnnounceError::TrackerCoreAnnounceError { source } => match source { + AnnounceError::Whitelist(whitelist_error) => Self::Whitelist(whitelist_error.to_string()), + AnnounceError::Database(error) => Self::Database(error.to_string()), + }, + UdpAnnounceError::TrackerCoreWhitelistError { source } => Self::Whitelist(source.to_string()), + }, + Error::ScrapeFailed { source } => match source { + UdpScrapeError::ConnectionCookieError { source } => Self::ConnectionCookie(source.to_string()), + UdpScrapeError::TrackerCoreScrapeError { source } => match source { + ScrapeError::Whitelist(whitelist_error) => Self::Whitelist(whitelist_error.to_string()), + }, + UdpScrapeError::TrackerCoreWhitelistError { source } => Self::Whitelist(source.to_string()), + }, + Error::Internal { location: _, message } => Self::InternalServer(message.clone()), + Error::AuthRequired { location } => Self::TrackerAuthentication(location.to_string()), + } + } +} + +pub mod sender { + use std::sync::Arc; + + use super::Event; + + pub type Sender = Option<Arc<dyn torrust_tracker_events::sender::Sender<Event = Event>>>; + pub type Broadcaster = torrust_tracker_events::broadcaster::Broadcaster<Event>; +} + +pub mod receiver { + use super::Event; + + pub type Receiver = Box<dyn torrust_tracker_events::receiver::Receiver<Event = Event>>; +} + +pub mod bus { + use crate::event::Event; + + pub type EventBus = torrust_tracker_events::bus::EventBus<Event>; +} diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index e56e1d831..ea19611ce 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -11,12 +11,12 @@ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_udp_tracker_core::services::announce::AnnounceService; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; use crate::error::Error; -use crate::statistics as server_statistics; -use crate::statistics::event::UdpResponseKind; +use crate::event::{ConnectionContext, Event, UdpRequestKind}; /// It handles the `Announce` request. /// @@ -26,12 +26,13 @@ use crate::statistics::event::UdpResponseKind; #[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_service, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_announce( announce_service: &Arc<AnnounceService>, - remote_addr: SocketAddr, + client_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, request: &AnnounceRequest, core_config: &Arc<Core>, - opt_udp_server_stats_event_sender: &Arc<Option<Box<dyn server_statistics::event::sender::Sender>>>, + opt_udp_server_stats_event_sender: &crate::event::sender::Sender, cookie_valid_range: Range<f64>, -) -> Result<Response, (Error, TransactionId)> { +) -> Result<Response, (Error, TransactionId, UdpRequestKind)> { tracing::Span::current() .record("transaction_id", request.transaction_id.0.to_string()) .record("connection_id", request.connection_id.0.to_string()) @@ -40,30 +41,30 @@ pub async fn handle_announce( tracing::trace!("handle announce"); if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { - match remote_addr.ip() { - IpAddr::V4(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp4Request { - kind: UdpResponseKind::Announce, - }) - .await; - } - IpAddr::V6(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp6Request { - kind: UdpResponseKind::Announce, - }) - .await; - } - } + udp_server_stats_event_sender + .send(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + kind: UdpRequestKind::Announce { + announce_request: *request, + }, + }) + .await; } let announce_data = announce_service - .handle_announce(remote_addr, request, cookie_valid_range) + .handle_announce(client_socket_addr, server_service_binding, request, cookie_valid_range) .await - .map_err(|e| (e.into(), request.transaction_id))?; - - Ok(build_response(remote_addr, request, core_config, &announce_data)) + .map_err(|e| { + ( + e.into(), + request.transaction_id, + UdpRequestKind::Announce { + announce_request: *request, + }, + ) + })?; + + Ok(build_response(client_socket_addr, request, core_config, &announce_data)) } fn build_response( @@ -127,9 +128,9 @@ fn build_response( } #[cfg(test)] -mod tests { +pub(crate) mod tests { - mod announce_request { + pub mod announce_request { use std::net::Ipv4Addr; use std::num::NonZeroU16; @@ -142,7 +143,7 @@ mod tests { use crate::handlers::tests::{sample_ipv4_remote_addr_fingerprint, sample_issue_time}; - struct AnnounceRequestBuilder { + pub struct AnnounceRequestBuilder { request: AnnounceRequest, } @@ -215,17 +216,18 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, MockUdpServerStatsEventSender, - TorrentPeerBuilder, }; - use crate::statistics as server_statistics; - use crate::statistics::event::UdpResponseKind; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -237,10 +239,12 @@ mod tests { let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); - let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + let client_socket_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -249,7 +253,8 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -260,10 +265,11 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let expected_peer = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip), client_port)) .updated_on(peers[0].updated) .into(); @@ -276,15 +282,18 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let client_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .into(); let response = handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -325,10 +334,12 @@ mod tests { let remote_client_port = 8081; let peer_address = Ipv4Addr::new(126, 0, 0, 2); - let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); + let client_socket_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -337,7 +348,8 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -348,12 +360,13 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } - fn add_a_torrent_peer_using_ipv6(in_memory_torrent_repository: &Arc<InMemoryTorrentRepository>) { + async fn add_a_torrent_peer_using_ipv6(in_memory_torrent_repository: &Arc<InMemoryTorrentRepository>) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -361,34 +374,40 @@ mod tests { let client_port = 8080; let peer_id = AquaticPeerId([255u8; 20]); - let peer_using_ipv6 = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let peer_using_ipv6 = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6, None); + in_memory_torrent_repository + .handle_announcement(&info_hash.0.into(), &peer_using_ipv6, None) + .await; } async fn announce_a_new_peer_using_ipv4( core_tracker_services: Arc<CoreTrackerServices>, core_udp_tracker_services: Arc<CoreUdpTrackerServices>, ) -> Response { - let (udp_core_stats_event_sender, _udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(false); - let _udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); + let event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); + + let udp_server_stats_event_sender = event_bus.sender(); - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let client_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .into(); handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &udp_server_stats_event_sender, @@ -403,7 +422,7 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository); + add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository).await; let response = announce_a_new_peer_using_ipv4(Arc::new(core_tracker_services), Arc::new(core_udp_tracker_services)).await; @@ -419,24 +438,31 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { + let client_socket_addr = sample_ipv4_socket_address(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let announce_request = AnnounceRequestBuilder::default().into(); + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock - .expect_send_event() - .with(eq(server_statistics::event::Event::Udp4Request { - kind: UdpResponseKind::Announce, + .expect_send() + .with(eq(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + kind: UdpRequestKind::Announce { announce_request }, })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_server_stats_event_sender: Arc<Option<Box<dyn server_statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let udp_server_stats_event_sender: crate::event::sender::Sender = + Some(Arc::new(udp_server_stats_event_sender_mock)); let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); handle_announce( &core_udp_tracker_services.announce_service, - sample_ipv4_socket_address(), - &AnnounceRequestBuilder::default().into(), + client_socket_addr, + server_service_binding, + &announce_request, &core_tracker_services.core_config, &udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -451,12 +477,13 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; use crate::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_issue_time, - TorrentPeerBuilder, }; #[tokio::test] @@ -464,15 +491,17 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - let client_ip = Ipv4Addr::new(127, 0, 0, 1); + let client_ip = Ipv4Addr::LOCALHOST; let client_port = 8080; let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); - let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + let client_socket_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -481,7 +510,8 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -492,12 +522,13 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; let external_ip_in_tracker_configuration = core_tracker_services.core_config.net.external_ip.unwrap(); - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let expected_peer = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) .updated_on(peers[0].updated) .into(); @@ -510,7 +541,7 @@ mod tests { mod using_ipv6 { use std::future; - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{ @@ -521,19 +552,23 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use bittorrent_udp_tracker_core::event::bus::EventBus; + use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::announce::AnnounceService; use mockall::predicate::eq; use torrust_tracker_configuration::Core; + use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, - sample_issue_time, MockUdpServerStatsEventSender, TorrentPeerBuilder, + sample_issue_time, MockUdpServerStatsEventSender, }; - use crate::statistics as server_statistics; - use crate::statistics::event::UdpResponseKind; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -546,10 +581,12 @@ mod tests { let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let client_socket_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -558,7 +595,8 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -569,10 +607,11 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let expected_peer = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .updated_on(peers[0].updated) .into(); @@ -588,15 +627,18 @@ mod tests { let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); + let client_socket_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .into(); let response = handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -637,10 +679,12 @@ mod tests { let remote_client_port = 8081; let peer_address = "126.0.0.1".parse().unwrap(); - let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); + let client_socket_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -649,7 +693,8 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &server_udp_tracker_service.udp_server_stats_event_sender, @@ -660,26 +705,28 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; // When using IPv6 the tracker converts the remote client ip into a IPv4 address assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); } - fn add_a_torrent_peer_using_ipv4(in_memory_torrent_repository: &Arc<InMemoryTorrentRepository>) { + async fn add_a_torrent_peer_using_ipv4(in_memory_torrent_repository: &Arc<InMemoryTorrentRepository>) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_port = 8080; let peer_id = AquaticPeerId([255u8; 20]); - let peer_using_ipv4 = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let peer_using_ipv4 = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4, None); + in_memory_torrent_repository + .handle_announcement(&info_hash.0.into(), &peer_using_ipv4, None) + .await; } async fn announce_a_new_peer_using_ipv6( @@ -687,19 +734,28 @@ mod tests { announce_handler: Arc<AnnounceHandler>, whitelist_authorization: Arc<whitelist::authorization::WhitelistAuthorization>, ) -> Response { - let (udp_core_stats_event_sender, _udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let udp_core_broadcaster = Broadcaster::default(); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = core_event_bus.sender(); + + let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let udp_server_stats_event_sender = server_event_bus.sender(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); let client_port = 8080; - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + + let client_socket_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .into(); let announce_service = Arc::new(AnnounceService::new( @@ -710,7 +766,8 @@ mod tests { handle_announce( &announce_service, - remote_addr, + client_socket_addr, + server_service_binding, &request, &core_config, &udp_server_stats_event_sender, @@ -725,7 +782,7 @@ mod tests { let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository); + add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository).await; let response = announce_a_new_peer_using_ipv6( core_tracker_services.core_config.clone(), @@ -745,29 +802,33 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { + let client_socket_addr = sample_ipv6_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + + let announce_request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) + .into(); + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock - .expect_send_event() - .with(eq(server_statistics::event::Event::Udp6Request { - kind: UdpResponseKind::Announce, + .expect_send() + .with(eq(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + kind: UdpRequestKind::Announce { announce_request }, })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_server_stats_event_sender: Arc<Option<Box<dyn server_statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let udp_server_stats_event_sender: crate::event::sender::Sender = + Some(Arc::new(udp_server_stats_event_sender_mock)); let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); - let remote_addr = sample_ipv6_remote_addr(); - - let announce_request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .into(); - handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_service_binding, &announce_request, &core_tracker_services.core_config, &udp_server_stats_event_sender, @@ -785,82 +846,107 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use bittorrent_udp_tracker_core::services::announce::AnnounceService; - use bittorrent_udp_tracker_core::{self, statistics as core_statistics}; - use mockall::predicate::eq; + use bittorrent_udp_tracker_core::{self, event as core_event}; + use mockall::predicate::{self, eq}; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; use crate::handlers::tests::{ sample_cookie_valid_range, sample_issue_time, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, TrackerConfigurationBuilder, }; - use crate::statistics as server_statistics; - use crate::statistics::event::UdpResponseKind; + use crate::tests::{announce_events_match, sample_peer}; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); + let loopback_ipv4 = Ipv4Addr::LOCALHOST; + let loopback_ipv6 = Ipv6Addr::LOCALHOST; + + let client_ip_v4 = loopback_ipv4; + let client_ip_v6 = loopback_ipv6; + let client_port = 8080; + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + let mut announcement = sample_peer(); + announcement.peer_id = peer_id; + announcement.peer_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7e00, 1)), client_port); + + let client_socket_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let mut server_socket_addr = config.udp_trackers.clone().unwrap()[0].bind_address; + if server_socket_addr.port() == 0 { + // Port 0 cannot be use in service binding + server_socket_addr.set_port(6969); + } + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let server_service_binding_clone = server_service_binding.clone(); + let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock - .expect_send_event() - .with(eq(core_statistics::event::Event::Udp6Announce)) + .expect_send() + .with(predicate::function(move |event| { + let expected_event = core_event::Event::UdpAnnounce { + connection: core_event::ConnectionContext::new( + client_socket_addr, + server_service_binding.clone(), + ), + info_hash: info_hash.into(), + announcement, + }; + + announce_events_match(event, &expected_event) + })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_core_stats_event_sender: Arc<Option<Box<dyn core_statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let udp_core_stats_event_sender: bittorrent_udp_tracker_core::event::sender::Sender = + Some(Arc::new(udp_core_stats_event_sender_mock)); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock - .expect_send_event() - .with(eq(server_statistics::event::Event::Udp6Request { - kind: UdpResponseKind::Announce, + .expect_send() + .with(eq(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_service_binding_clone.clone()), + kind: UdpRequestKind::Announce { + announce_request: request, + }, })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_server_stats_event_sender: Arc<Option<Box<dyn server_statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let udp_server_stats_event_sender: crate::event::sender::Sender = + Some(Arc::new(udp_server_stats_event_sender_mock)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); - let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); - let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); - - let client_ip_v4 = loopback_ipv4; - let client_ip_v6 = loopback_ipv6; - let client_port = 8080; - - let info_hash = AquaticInfoHash([0u8; 20]); - let peer_id = AquaticPeerId([255u8; 20]); - - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(client_ip_v4) - .with_port(client_port) - .into(); - let core_config = Arc::new(config.core.clone()); let announce_service = Arc::new(AnnounceService::new( @@ -871,7 +957,8 @@ mod tests { handle_announce( &announce_service, - remote_addr, + client_socket_addr, + server_service_binding_clone, &request, &core_config, &udp_server_stats_event_sender, @@ -880,7 +967,7 @@ mod tests { .await .unwrap(); - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()).await; let external_ip_in_tracker_configuration = core_config.net.external_ip.unwrap(); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 93d3bb6f1..961189945 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -1,46 +1,39 @@ //! UDP tracker connect handler. -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, ConnectionId, Response}; use bittorrent_udp_tracker_core::services::connect::ConnectService; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::{instrument, Level}; -use crate::statistics as server_statistics; -use crate::statistics::event::UdpResponseKind; +use crate::event::{ConnectionContext, Event, UdpRequestKind}; /// It handles the `Connect` request. #[instrument(fields(transaction_id), skip(connect_service, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_connect( - remote_addr: SocketAddr, + client_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, request: &ConnectRequest, connect_service: &Arc<ConnectService>, - opt_udp_server_stats_event_sender: &Arc<Option<Box<dyn server_statistics::event::sender::Sender>>>, + opt_udp_server_stats_event_sender: &crate::event::sender::Sender, cookie_issue_time: f64, ) -> Response { tracing::Span::current().record("transaction_id", request.transaction_id.0.to_string()); tracing::trace!("handle connect"); if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { - match remote_addr.ip() { - IpAddr::V4(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp4Request { - kind: UdpResponseKind::Connect, - }) - .await; - } - IpAddr::V6(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp6Request { - kind: UdpResponseKind::Connect, - }) - .await; - } - } + udp_server_stats_event_sender + .send(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + kind: UdpRequestKind::Connect, + }) + .await; } - let connection_id = connect_service.handle_connect(remote_addr, cookie_issue_time).await; + let connection_id = connect_service + .handle_connect(client_socket_addr, server_service_binding, cookie_issue_time) + .await; build_response(*request, connection_id) } @@ -60,21 +53,25 @@ mod tests { mod connect_request { use std::future; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use bittorrent_udp_tracker_core::connection_cookie::make; + use bittorrent_udp_tracker_core::event as core_event; + use bittorrent_udp_tracker_core::event::bus::EventBus; + use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::connect::ConnectService; - use bittorrent_udp_tracker_core::statistics as core_statistics; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::handlers::handle_connect; use crate::handlers::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, }; - use crate::statistics as server_statistics; - use crate::statistics::event::UdpResponseKind; fn sample_connect_request() -> ConnectRequest { ConnectRequest { @@ -84,12 +81,20 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { - let (udp_core_stats_event_sender, _udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + + let udp_core_broadcaster = Broadcaster::default(); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = core_event_bus.sender(); - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); + + let udp_server_stats_event_sender = server_event_bus.sender(); let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), @@ -99,6 +104,7 @@ mod tests { let response = handle_connect( sample_ipv4_remote_addr(), + server_service_binding, &request, &connect_service, &udp_server_stats_event_sender, @@ -117,12 +123,20 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { - let (udp_core_stats_event_sender, _udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + + let udp_core_broadcaster = Broadcaster::default(); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = core_event_bus.sender(); - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); + + let udp_server_stats_event_sender = server_event_bus.sender(); let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), @@ -132,6 +146,7 @@ mod tests { let response = handle_connect( sample_ipv4_remote_addr(), + server_service_binding, &request, &connect_service, &udp_server_stats_event_sender, @@ -150,12 +165,21 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { - let (udp_core_stats_event_sender, _udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + + let udp_core_broadcaster = Broadcaster::default(); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let udp_core_stats_event_sender = core_event_bus.sender(); + + let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); + + let udp_server_stats_event_sender = server_event_bus.sender(); let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), @@ -165,6 +189,7 @@ mod tests { let response = handle_connect( sample_ipv6_remote_addr(), + server_service_binding, &request, &connect_service, &udp_server_stats_event_sender, @@ -183,32 +208,37 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { + let client_socket_addr = sample_ipv4_socket_address(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock - .expect_send_event() - .with(eq(core_statistics::event::Event::Udp4Connect)) + .expect_send() + .with(eq(core_event::Event::UdpConnect { + connection: core_event::ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_core_stats_event_sender: Arc<Option<Box<dyn core_statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let udp_core_stats_event_sender: bittorrent_udp_tracker_core::event::sender::Sender = + Some(Arc::new(udp_core_stats_event_sender_mock)); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock - .expect_send_event() - .with(eq(server_statistics::event::Event::Udp4Request { - kind: UdpResponseKind::Connect, + .expect_send() + .with(eq(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + kind: UdpRequestKind::Connect, })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_server_stats_event_sender: Arc<Option<Box<dyn server_statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); - - let client_socket_address = sample_ipv4_socket_address(); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let udp_server_stats_event_sender: crate::event::sender::Sender = Some(Arc::new(udp_server_stats_event_sender_mock)); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); handle_connect( - client_socket_address, + client_socket_addr, + server_service_binding, &sample_connect_request(), &connect_service, &udp_server_stats_event_sender, @@ -219,30 +249,37 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { + let client_socket_addr = sample_ipv6_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock - .expect_send_event() - .with(eq(core_statistics::event::Event::Udp6Connect)) + .expect_send() + .with(eq(core_event::Event::UdpConnect { + connection: core_event::ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_core_stats_event_sender: Arc<Option<Box<dyn core_statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let udp_core_stats_event_sender: bittorrent_udp_tracker_core::event::sender::Sender = + Some(Arc::new(udp_core_stats_event_sender_mock)); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock - .expect_send_event() - .with(eq(server_statistics::event::Event::Udp6Request { - kind: UdpResponseKind::Connect, + .expect_send() + .with(eq(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + kind: UdpRequestKind::Connect, })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_server_stats_event_sender: Arc<Option<Box<dyn server_statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let udp_server_stats_event_sender: crate::event::sender::Sender = Some(Arc::new(udp_server_stats_event_sender_mock)); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); handle_connect( - sample_ipv6_remote_addr(), + client_socket_addr, + server_service_binding, &sample_connect_request(), &connect_service, &udp_server_stats_event_sender, diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index e4bd382da..7fb4141b2 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -1,79 +1,82 @@ //! UDP tracker error handling. use std::net::SocketAddr; use std::ops::Range; -use std::sync::Arc; -use aquatic_udp_protocol::{ErrorResponse, RequestParseError, Response, TransactionId}; -use bittorrent_udp_tracker_core::connection_cookie::{check, gen_remote_fingerprint}; +use aquatic_udp_protocol::{ErrorResponse, Response, TransactionId}; use bittorrent_udp_tracker_core::{self, UDP_TRACKER_LOG_TARGET}; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::{instrument, Level}; use uuid::Uuid; use zerocopy::network_endian::I32; use crate::error::Error; -use crate::statistics as server_statistics; +use crate::event::{ConnectionContext, Event, UdpRequestKind}; #[allow(clippy::too_many_arguments)] #[instrument(fields(transaction_id), skip(opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_error( - remote_addr: SocketAddr, - local_addr: SocketAddr, + req_kind: Option<UdpRequestKind>, + client_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, request_id: Uuid, - opt_udp_server_stats_event_sender: &Arc<Option<Box<dyn server_statistics::event::sender::Sender>>>, + opt_udp_server_stats_event_sender: &crate::event::sender::Sender, cookie_valid_range: Range<f64>, - e: &Error, - transaction_id: Option<TransactionId>, + error: &Error, + opt_transaction_id: Option<TransactionId>, ) -> Response { tracing::trace!("handle error"); - match transaction_id { + let server_socket_addr = server_service_binding.bind_address(); + + log_error(error, client_socket_addr, server_socket_addr, opt_transaction_id, request_id); + + trigger_udp_error_event( + error, + client_socket_addr, + server_service_binding, + opt_udp_server_stats_event_sender, + req_kind, + ) + .await; + + Response::from(ErrorResponse { + transaction_id: opt_transaction_id.unwrap_or(TransactionId(I32::new(0))), + message: error.to_string().into(), + }) +} + +fn log_error( + error: &Error, + client_socket_addr: SocketAddr, + server_socket_addr: SocketAddr, + opt_transaction_id: Option<TransactionId>, + request_id: Uuid, +) { + match opt_transaction_id { Some(transaction_id) => { let transaction_id = transaction_id.0.to_string(); - tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %remote_addr, %local_addr, %request_id, %transaction_id, "response error"); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %error, %client_socket_addr, %server_socket_addr, %request_id, %transaction_id, "response error"); } None => { - tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %remote_addr, %local_addr, %request_id, "response error"); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %error, %client_socket_addr, %server_socket_addr, %request_id, "response error"); } } +} - let e = if let Error::RequestParseError { request_parse_error } = e { - match request_parse_error { - RequestParseError::Sendable { - connection_id, - transaction_id, - err, - } => { - if let Err(e) = check(connection_id, gen_remote_fingerprint(&remote_addr), cookie_valid_range) { - (e.to_string(), Some(*transaction_id)) - } else { - ((*err).to_string(), Some(*transaction_id)) - } - } - RequestParseError::Unsendable { err } => (err.to_string(), transaction_id), - } - } else { - (e.to_string(), transaction_id) - }; - - if e.1.is_some() { - if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp4Error) - .await; - } - SocketAddr::V6(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp6Error) - .await; - } - } - } +async fn trigger_udp_error_event( + error: &Error, + client_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, + opt_udp_server_stats_event_sender: &crate::event::sender::Sender, + req_kind: Option<UdpRequestKind>, +) { + if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { + udp_server_stats_event_sender + .send(Event::UdpError { + context: ConnectionContext::new(client_socket_addr, server_service_binding), + kind: req_kind, + error: error.clone().into(), + }) + .await; } - - Response::from(ErrorResponse { - transaction_id: e.1.unwrap_or(TransactionId(I32::new(0))), - message: e.0.into(), - }) } diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 165b307e0..add576a89 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -13,21 +13,22 @@ use announce::handle_announce; use aquatic_udp_protocol::{Request, Response, TransactionId}; use bittorrent_tracker_core::MAX_SCRAPE_TORRENTS; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; -use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; use connect::handle_connect; use error::handle_error; use scrape::handle_scrape; use torrust_tracker_clock::clock::Time; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::{instrument, Level}; use uuid::Uuid; use super::RawRequest; use crate::container::UdpTrackerServerContainer; use crate::error::Error; +use crate::event::UdpRequestKind; use crate::CurrentClock; #[derive(Debug, Clone, PartialEq)] -pub(super) struct CookieTimeValues { +pub struct CookieTimeValues { pub(super) issue_time: f64, pub(super) valid_range: Range<f64>, } @@ -58,9 +59,9 @@ pub(crate) async fn handle_packet( udp_request: RawRequest, udp_tracker_core_container: Arc<UdpTrackerCoreContainer>, udp_tracker_server_container: Arc<UdpTrackerServerContainer>, - local_addr: SocketAddr, + server_service_binding: ServiceBinding, cookie_time_values: CookieTimeValues, -) -> Response { +) -> (Response, Option<UdpRequestKind>) { let request_id = Uuid::new_v4(); tracing::Span::current().record("request_id", request_id.to_string()); @@ -68,58 +69,64 @@ pub(crate) async fn handle_packet( let start_time = Instant::now(); - let response = + let (response, opt_req_kind) = match Request::parse_bytes(&udp_request.payload[..udp_request.payload.len()], MAX_SCRAPE_TORRENTS).map_err(Error::from) { Ok(request) => match handle_request( request, udp_request.from, + server_service_binding.clone(), udp_tracker_core_container.clone(), udp_tracker_server_container.clone(), cookie_time_values.clone(), ) .await { - Ok(response) => return response, - Err((error, transaction_id)) => { - if let Error::UdpAnnounceError { - source: UdpAnnounceError::ConnectionCookieError { .. }, - } = error - { - // code-review: should we include `RequestParseError` and `BadRequest`? - let mut ban_service = udp_tracker_core_container.ban_service.write().await; - ban_service.increase_counter(&udp_request.from.ip()); - } - - handle_error( + Ok((response, req_kid)) => return (response, Some(req_kid)), + Err((error, transaction_id, req_kind)) => { + let response = handle_error( + Some(req_kind.clone()), udp_request.from, - local_addr, + server_service_binding, request_id, - &udp_tracker_server_container.udp_server_stats_event_sender, + &udp_tracker_server_container.stats_event_sender, cookie_time_values.valid_range.clone(), &error, Some(transaction_id), ) - .await + .await; + + (response, Some(req_kind)) } }, Err(e) => { - handle_error( + // The request payload could not be parsed, so we handle it as an error. + + let opt_transaction_id = if let Error::InvalidRequest { request_parse_error } = e.clone() { + request_parse_error.opt_transaction_id + } else { + None + }; + + let response = handle_error( + None, udp_request.from, - local_addr, + server_service_binding, request_id, - &udp_tracker_server_container.udp_server_stats_event_sender, + &udp_tracker_server_container.stats_event_sender, cookie_time_values.valid_range.clone(), &e, - None, + opt_transaction_id, ) - .await + .await; + + (response, None) } }; let latency = start_time.elapsed(); tracing::trace!(?latency, "responded"); - response + (response, opt_req_kind) } /// It dispatches the request to the correct handler. @@ -129,49 +136,65 @@ pub(crate) async fn handle_packet( /// If a error happens in the `handle_request` function, it will just return the `ServerError`. #[instrument(skip( request, - remote_addr, + client_socket_addr, + server_service_binding, udp_tracker_core_container, udp_tracker_server_container, cookie_time_values ))] pub async fn handle_request( request: Request, - remote_addr: SocketAddr, + client_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, udp_tracker_core_container: Arc<UdpTrackerCoreContainer>, udp_tracker_server_container: Arc<UdpTrackerServerContainer>, cookie_time_values: CookieTimeValues, -) -> Result<Response, (Error, TransactionId)> { +) -> Result<(Response, UdpRequestKind), (Error, TransactionId, UdpRequestKind)> { tracing::trace!("handle request"); match request { - Request::Connect(connect_request) => Ok(handle_connect( - remote_addr, - &connect_request, - &udp_tracker_core_container.connect_service, - &udp_tracker_server_container.udp_server_stats_event_sender, - cookie_time_values.issue_time, - ) - .await), + Request::Connect(connect_request) => Ok(( + handle_connect( + client_socket_addr, + server_service_binding, + &connect_request, + &udp_tracker_core_container.connect_service, + &udp_tracker_server_container.stats_event_sender, + cookie_time_values.issue_time, + ) + .await, + UdpRequestKind::Connect, + )), Request::Announce(announce_request) => { - handle_announce( + match handle_announce( &udp_tracker_core_container.announce_service, - remote_addr, + client_socket_addr, + server_service_binding, &announce_request, - &udp_tracker_core_container.core_config, - &udp_tracker_server_container.udp_server_stats_event_sender, + &udp_tracker_core_container.tracker_core_container.core_config, + &udp_tracker_server_container.stats_event_sender, cookie_time_values.valid_range, ) .await + { + Ok(response) => Ok((response, UdpRequestKind::Announce { announce_request })), + Err(err) => Err(err), + } } Request::Scrape(scrape_request) => { - handle_scrape( + match handle_scrape( &udp_tracker_core_container.scrape_service, - remote_addr, + client_socket_addr, + server_service_binding, &scrape_request, - &udp_tracker_server_container.udp_server_stats_event_sender, + &udp_tracker_server_container.stats_event_sender, cookie_time_values.valid_range, ) .await + { + Ok(response) => Ok((response, UdpRequestKind::Scrape)), + Err(err) => Err(err), + } } } } @@ -183,28 +206,28 @@ pub(crate) mod tests { use std::ops::Range; use std::sync::Arc; - use aquatic_udp_protocol::{NumberOfBytes, PeerId}; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::gen_remote_fingerprint; + use bittorrent_udp_tracker_core::event::bus::EventBus; + use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::announce::AnnounceService; use bittorrent_udp_tracker_core::services::scrape::ScrapeService; - use bittorrent_udp_tracker_core::{self, statistics as core_statistics}; + use bittorrent_udp_tracker_core::{self, event as core_event}; use futures::future::BoxFuture; use mockall::mock; - use tokio::sync::mpsc::error::SendError; - use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, Core}; - use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_events::sender::SendError; use torrust_tracker_test_helpers::configuration; - use crate::{statistics as server_statistics, CurrentClock}; + use crate::event as server_event; pub(crate) struct CoreTrackerServices { pub core_config: Arc<Core>, @@ -220,7 +243,7 @@ pub(crate) mod tests { } pub(crate) struct ServerUdpTrackerServices { - pub udp_server_stats_event_sender: Arc<Option<Box<dyn server_statistics::event::sender::Sender>>>, + pub udp_server_stats_event_sender: crate::event::sender::Sender, } fn default_testing_tracker_configuration() -> Configuration { @@ -250,21 +273,26 @@ pub(crate) mod tests { let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - let (udp_core_stats_event_sender, _udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let udp_core_broadcaster = Broadcaster::default(); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = core_event_bus.sender(); + + let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let udp_server_stats_event_sender = server_event_bus.sender(); let announce_service = Arc::new(AnnounceService::new( announce_handler.clone(), @@ -312,11 +340,11 @@ pub(crate) mod tests { } pub(crate) fn sample_ipv4_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) } fn sample_ipv6_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 8080) } pub(crate) fn sample_issue_time() -> f64 { @@ -327,52 +355,6 @@ pub(crate) mod tests { sample_issue_time() - 10.0..sample_issue_time() + 10.0 } - #[derive(Debug, Default)] - pub(crate) struct TorrentPeerBuilder { - peer: peer::Peer, - } - - impl TorrentPeerBuilder { - #[must_use] - pub fn new() -> Self { - Self { - peer: peer::Peer { - updated: CurrentClock::now(), - ..Default::default() - }, - } - } - - #[must_use] - pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - #[must_use] - pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { - self.peer.peer_id = peer_id; - self - } - - #[must_use] - pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes::new(left); - self - } - - #[must_use] - pub fn updated_on(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self - } - - #[must_use] - pub fn into(self) -> peer::Peer { - self.peer - } - } - pub(crate) struct TrackerConfigurationBuilder { configuration: Configuration, } @@ -397,15 +379,19 @@ pub(crate) mod tests { mock! { pub(crate) UdpCoreStatsEventSender {} - impl core_statistics::event::sender::Sender for UdpCoreStatsEventSender { - fn send_event(&self, event: core_statistics::event::Event) -> BoxFuture<'static,Option<Result<(),SendError<core_statistics::event::Event> > > > ; + impl torrust_tracker_events::sender::Sender for UdpCoreStatsEventSender { + type Event = core_event::Event; + + fn send(&self, event: core_event::Event) -> BoxFuture<'static,Option<Result<usize,SendError<core_event::Event> > > > ; } } mock! { pub(crate) UdpServerStatsEventSender {} - impl server_statistics::event::sender::Sender for UdpServerStatsEventSender { - fn send_event(&self, event: server_statistics::event::Event) -> BoxFuture<'static,Option<Result<(),SendError<server_statistics::event::Event> > > > ; + impl torrust_tracker_events::sender::Sender for UdpServerStatsEventSender { + type Event = server_event::Event; + + fn send(&self, event: server_event::Event) -> BoxFuture<'static,Option<Result<usize,SendError<server_event::Event> > > > ; } } } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index c385718a2..8bac05c1e 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -1,5 +1,5 @@ //! UDP tracker scrape handler. -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use std::ops::Range; use std::sync::Arc; @@ -9,12 +9,12 @@ use aquatic_udp_protocol::{ use bittorrent_udp_tracker_core::services::scrape::ScrapeService; use bittorrent_udp_tracker_core::{self}; use torrust_tracker_primitives::core::ScrapeData; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; use crate::error::Error; -use crate::statistics as server_statistics; -use crate::statistics::event::UdpResponseKind; +use crate::event::{ConnectionContext, Event, UdpRequestKind}; /// It handles the `Scrape` request. /// @@ -24,11 +24,12 @@ use crate::statistics::event::UdpResponseKind; #[instrument(fields(transaction_id, connection_id), skip(scrape_service, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_scrape( scrape_service: &Arc<ScrapeService>, - remote_addr: SocketAddr, + client_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, request: &ScrapeRequest, - opt_udp_server_stats_event_sender: &Arc<Option<Box<dyn server_statistics::event::sender::Sender>>>, + opt_udp_server_stats_event_sender: &crate::event::sender::Sender, cookie_valid_range: Range<f64>, -) -> Result<Response, (Error, TransactionId)> { +) -> Result<Response, (Error, TransactionId, UdpRequestKind)> { tracing::Span::current() .record("transaction_id", request.transaction_id.0.to_string()) .record("connection_id", request.connection_id.0.to_string()); @@ -36,28 +37,18 @@ pub async fn handle_scrape( tracing::trace!("handle scrape"); if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { - match remote_addr.ip() { - IpAddr::V4(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp4Request { - kind: UdpResponseKind::Scrape, - }) - .await; - } - IpAddr::V6(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp6Request { - kind: UdpResponseKind::Scrape, - }) - .await; - } - } + udp_server_stats_event_sender + .send(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + kind: UdpRequestKind::Scrape, + }) + .await; } let scrape_data = scrape_service - .handle_scrape(remote_addr, request, cookie_valid_range) + .handle_scrape(client_socket_addr, server_service_binding, request, cookie_valid_range) .await - .map_err(|e| (e.into(), request.transaction_id))?; + .map_err(|e| (e.into(), request.transaction_id, UdpRequestKind::Scrape))?; Ok(build_response(request, &scrape_data)) } @@ -92,7 +83,7 @@ fn build_response(request: &ScrapeRequest, scrape_data: &ScrapeData) -> Response mod tests { mod scrape_request { - use std::net::SocketAddr; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{ @@ -101,11 +92,16 @@ mod tests { }; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + use crate::event::bus::EventBus; + use crate::event::sender::Broadcaster; use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, - sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, TorrentPeerBuilder, + sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, }; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { @@ -121,20 +117,23 @@ mod tests { let (_core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - let remote_addr = sample_ipv4_remote_addr(); + let client_socket_addr = sample_ipv4_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let info_hash = InfoHash([0u8; 20]); let info_hashes = vec![info_hash]; let request = ScrapeRequest { - connection_id: make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap(), + connection_id: make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap(), transaction_id: TransactionId(0i32.into()), info_hashes, }; let response = handle_scrape( &core_udp_tracker_services.scrape_service, - remote_addr, + client_socket_addr, + server_service_binding, &request, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -160,13 +159,15 @@ mod tests { ) { let peer_id = PeerId([255u8; 20]); - let peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let peer = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(*remote_addr) - .with_number_of_bytes_left(0) + .with_bytes_left_to_download(0) .into(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer, None); + in_memory_torrent_repository + .handle_announcement(&info_hash.0.into(), &peer, None) + .await; } fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { @@ -183,24 +184,30 @@ mod tests { core_tracker_services: Arc<CoreTrackerServices>, core_udp_tracker_services: Arc<CoreUdpTrackerServices>, ) -> Response { - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let udp_server_broadcaster = Broadcaster::default(); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_server_broadcaster.clone())); + + let udp_server_stats_event_sender = event_bus.sender(); + + let client_socket_addr = sample_ipv4_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); add_a_seeder( core_tracker_services.in_memory_torrent_repository.clone(), - &remote_addr, + &client_socket_addr, &info_hash, ) .await; - let request = build_scrape_request(&remote_addr, &info_hash); + let request = build_scrape_request(&client_socket_addr, &info_hash); handle_scrape( &core_udp_tracker_services.scrape_service, - remote_addr, + client_socket_addr, + server_service_binding, &request, &udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -242,7 +249,10 @@ mod tests { } mod with_a_whitelisted_tracker { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::handlers::handle_scrape; use crate::handlers::scrape::tests::scrape_request::{ @@ -257,24 +267,28 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); - let remote_addr = sample_ipv4_remote_addr(); + let client_socket_addr = sample_ipv4_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let info_hash = InfoHash([0u8; 20]); add_a_seeder( core_tracker_services.in_memory_torrent_repository.clone(), - &remote_addr, + &client_socket_addr, &info_hash, ) .await; core_tracker_services.in_memory_whitelist.add(&info_hash.0.into()).await; - let request = build_scrape_request(&remote_addr, &info_hash); + let request = build_scrape_request(&client_socket_addr, &info_hash); let torrent_stats = match_scrape_response( handle_scrape( &core_udp_tracker_services.scrape_service, - remote_addr, + client_socket_addr, + server_service_binding, &request, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -298,22 +312,26 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); - let remote_addr = sample_ipv4_remote_addr(); + let client_socket_addr = sample_ipv4_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let info_hash = InfoHash([0u8; 20]); add_a_seeder( core_tracker_services.in_memory_torrent_repository.clone(), - &remote_addr, + &client_socket_addr, &info_hash, ) .await; - let request = build_scrape_request(&remote_addr, &info_hash); + let request = build_scrape_request(&client_socket_addr, &info_hash); let torrent_stats = match_scrape_response( handle_scrape( &core_udp_tracker_services.scrape_service, - remote_addr, + client_socket_addr, + server_service_binding, &request, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -342,40 +360,46 @@ mod tests { mod using_ipv4 { use std::future; + use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::sample_scrape_request; + use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, sample_ipv4_remote_addr, MockUdpServerStatsEventSender, }; - use crate::statistics as server_statistics; #[tokio::test] async fn should_send_the_upd4_scrape_event() { + let client_socket_addr = sample_ipv4_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock - .expect_send_event() - .with(eq(server_statistics::event::Event::Udp4Request { - kind: server_statistics::event::UdpResponseKind::Scrape, + .expect_send() + .with(eq(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + kind: UdpRequestKind::Scrape, })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_server_stats_event_sender: Arc<Option<Box<dyn server_statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); - - let remote_addr = sample_ipv4_remote_addr(); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let udp_server_stats_event_sender: crate::event::sender::Sender = + Some(Arc::new(udp_server_stats_event_sender_mock)); let (_core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); handle_scrape( &core_udp_tracker_services.scrape_service, - remote_addr, - &sample_scrape_request(&remote_addr), + client_socket_addr, + server_service_binding, + &sample_scrape_request(&client_socket_addr), &udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -386,40 +410,46 @@ mod tests { mod using_ipv6 { use std::future; + use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::sample_scrape_request; + use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, sample_ipv6_remote_addr, MockUdpServerStatsEventSender, }; - use crate::statistics as server_statistics; #[tokio::test] async fn should_send_the_upd6_scrape_event() { + let client_socket_addr = sample_ipv6_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock - .expect_send_event() - .with(eq(server_statistics::event::Event::Udp6Request { - kind: server_statistics::event::UdpResponseKind::Scrape, + .expect_send() + .with(eq(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + kind: UdpRequestKind::Scrape, })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_server_stats_event_sender: Arc<Option<Box<dyn server_statistics::event::sender::Sender>>> = - Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); - - let remote_addr = sample_ipv6_remote_addr(); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let udp_server_stats_event_sender: crate::event::sender::Sender = + Some(Arc::new(udp_server_stats_event_sender_mock)); let (_core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); handle_scrape( &core_udp_tracker_services.scrape_service, - remote_addr, - &sample_scrape_request(&remote_addr), + client_socket_addr, + server_service_binding, + &sample_scrape_request(&client_socket_addr), &udp_server_stats_event_sender, sample_cookie_valid_range(), ) diff --git a/packages/udp-tracker-server/src/lib.rs b/packages/udp-tracker-server/src/lib.rs index 9e013bf81..58a3830e1 100644 --- a/packages/udp-tracker-server/src/lib.rs +++ b/packages/udp-tracker-server/src/lib.rs @@ -634,9 +634,11 @@ //! documentation by [Arvid Norberg](https://github.com/arvidn) was very //! supportive in the development of this documentation. Some descriptions were //! taken from the [libtorrent](https://www.rasterbar.com/products/libtorrent/udp_tracker_protocol.html). +pub mod banning; pub mod container; pub mod environment; pub mod error; +pub mod event; pub mod handlers; pub mod server; pub mod statistics; @@ -672,3 +674,57 @@ pub struct RawRequest { payload: Vec<u8>, from: SocketAddr, } + +#[cfg(test)] +pub(crate) mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use bittorrent_udp_tracker_core::event::Event; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + + pub fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Started, + } + } + + #[must_use] + pub fn announce_events_match(event: &Event, expected_event: &Event) -> bool { + match (event, expected_event) { + ( + Event::UdpAnnounce { + connection, + info_hash, + announcement, + }, + Event::UdpAnnounce { + connection: expected_connection, + info_hash: expected_info_hash, + announcement: expected_announcement, + }, + ) => { + *connection == *expected_connection + && *info_hash == *expected_info_hash + && announcement.peer_id == expected_announcement.peer_id + && announcement.peer_addr == expected_announcement.peer_addr + // Events can't be compared due to the `updated` field. + // The `announcement.uploaded` contains the current time + // when the test is executed. + // todo: mock time + //&& announcement.updated == expected_announcement.updated + && announcement.uploaded == expected_announcement.uploaded + && announcement.downloaded == expected_announcement.downloaded + && announcement.left == expected_announcement.left + && announcement.event == expected_announcement.event + } + _ => false, + } + } +} diff --git a/packages/udp-tracker-server/src/server/bound_socket.rs b/packages/udp-tracker-server/src/server/bound_socket.rs index 988bfb67f..6b81545d2 100644 --- a/packages/udp-tracker-server/src/server/bound_socket.rs +++ b/packages/udp-tracker-server/src/server/bound_socket.rs @@ -3,6 +3,7 @@ use std::net::SocketAddr; use std::ops::Deref; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use url::Url; /// Wrapper for Tokio [`UdpSocket`][`tokio::net::UdpSocket`] that is bound to a particular socket. @@ -47,6 +48,15 @@ impl BoundSocket { pub fn url(&self) -> Url { Url::parse(&format!("udp://{}", self.address())).expect("UDP socket address should be valid") } + + /// # Panics + /// + /// It should never panic because the conversion to a [`ServiceBinding`] + /// is infallible. + #[must_use] + pub fn service_binding(&self) -> ServiceBinding { + ServiceBinding::new(Protocol::UDP, self.address()).expect("Conversion to ServiceBinding should not fail") + } } impl Deref for BoundSocket { diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index acd214ab0..4fd3a95d9 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -1,4 +1,4 @@ -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; @@ -13,17 +13,19 @@ use tokio::time::interval; use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::ServiceHealthCheckJob; use torrust_server_lib::signals::{shutdown_signal_with_message, Halted, Started}; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use tracing::instrument; use super::request_buffer::ActiveRequests; use crate::container::UdpTrackerServerContainer; +use crate::event::{ConnectionContext, Event}; use crate::server::bound_socket::BoundSocket; use crate::server::processor::Processor; use crate::server::receiver::Receiver; -use crate::statistics; -const IP_BANS_RESET_INTERVAL_IN_SECS: u64 = 3600; +const IP_BANS_RESET_INTERVAL_IN_SECS: u64 = 3600 * 24; +const TYPE_STRING: &str = "udp_tracker"; /// A UDP server instance launcher. #[derive(Constructor)] pub struct Launcher; @@ -47,12 +49,12 @@ impl Launcher { ) { tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting on: {bind_to}"); - if udp_tracker_core_container.core_config.private { + if udp_tracker_core_container.tracker_core_container.core_config.private { tracing::error!("udp services cannot be used for private trackers"); panic!("it should not use udp if using authentication"); } - let socket = tokio::time::timeout(Duration::from_millis(5000), BoundSocket::new(bind_to)) + let socket = tokio::time::timeout(Duration::from_secs(5), BoundSocket::new(bind_to)) .await .expect("it should bind to the socket within five seconds"); @@ -64,6 +66,7 @@ impl Launcher { } }; + let service_binding = bound_socket.service_binding().clone(); let address = bound_socket.address(); let local_udp_url = bound_socket.url().to_string(); @@ -88,7 +91,10 @@ impl Launcher { }; tx_start - .send(Started { address }) + .send(Started { + service_binding, + address, + }) .expect("the UDP Tracker service should not be dropped"); tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (started)"); @@ -110,14 +116,15 @@ impl Launcher { } #[must_use] - #[instrument(skip(binding))] - pub fn check(binding: &SocketAddr) -> ServiceHealthCheckJob { - let binding = *binding; - let info = format!("checking the udp tracker health check at: {binding}"); + #[instrument(skip(service_binding))] + pub fn check(service_binding: &ServiceBinding) -> ServiceHealthCheckJob { + let info = format!("checking the udp tracker health check at: {}", service_binding.bind_address()); + + let service_binding_clone = service_binding.clone(); - let job = tokio::spawn(async move { check(&binding).await }); + let job = tokio::spawn(async move { check(&service_binding_clone).await }); - ServiceHealthCheckJob::new(binding, info, job) + ServiceHealthCheckJob::new(service_binding.clone(), info, TYPE_STRING.to_string(), job) } #[instrument(skip(receiver, udp_tracker_core_container, udp_tracker_server_container))] @@ -129,9 +136,12 @@ impl Launcher { ) { let active_requests = &mut ActiveRequests::default(); - let addr = receiver.bound_socket_address(); + let server_socket_addr = receiver.bound_socket_address(); - let local_addr = format!("udp://{addr}"); + let server_service_binding = + ServiceBinding::new(Protocol::UDP, server_socket_addr).expect("Bound socket to service binding should not fail"); + + let local_addr = server_service_binding.clone().to_string(); let cookie_lifetime = cookie_lifetime.as_secs_f64(); @@ -149,6 +159,9 @@ impl Launcher { }); loop { + let server_service_binding = + ServiceBinding::new(Protocol::UDP, server_socket_addr).expect("Bound socket to service binding should not fail"); + if let Some(req) = { tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server (wait for request)"); receiver.next().await @@ -167,30 +180,24 @@ impl Launcher { } }; - if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.udp_server_stats_event_sender.as_deref() - { - match req.from.ip() { - IpAddr::V4(_) => { - udp_server_stats_event_sender - .send_event(statistics::event::Event::Udp4IncomingRequest) - .await; - } - IpAddr::V6(_) => { - udp_server_stats_event_sender - .send_event(statistics::event::Event::Udp6IncomingRequest) - .await; - } - } + let client_socket_addr = req.from; + + if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.stats_event_sender.as_deref() { + udp_server_stats_event_sender + .send(Event::UdpRequestReceived { + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + }) + .await; } if udp_tracker_core_container.ban_service.read().await.is_banned(&req.from.ip()) { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop continue: (banned ip)"); - if let Some(udp_server_stats_event_sender) = - udp_tracker_server_container.udp_server_stats_event_sender.as_deref() - { + if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(statistics::event::Event::UdpRequestBanned) + .send(Event::UdpRequestBanned { + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + }) .await; } @@ -226,11 +233,11 @@ impl Launcher { if old_request_aborted { // Evicted task from active requests buffer was aborted. - if let Some(udp_server_stats_event_sender) = - udp_tracker_server_container.udp_server_stats_event_sender.as_deref() - { + if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(statistics::event::Event::UdpRequestAborted) + .send(Event::UdpRequestAborted { + context: ConnectionContext::new(client_socket_addr, server_service_binding), + }) .await; } } diff --git a/packages/udp-tracker-server/src/server/processor.rs b/packages/udp-tracker-server/src/server/processor.rs index 44b543571..dd6ba633d 100644 --- a/packages/udp-tracker-server/src/server/processor.rs +++ b/packages/udp-tracker-server/src/server/processor.rs @@ -1,5 +1,5 @@ use std::io::Cursor; -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; @@ -7,57 +7,75 @@ use aquatic_udp_protocol::Response; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::{self}; use tokio::time::Instant; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; use crate::container::UdpTrackerServerContainer; +use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; use crate::handlers::CookieTimeValues; -use crate::{handlers, statistics, RawRequest}; +use crate::{handlers, RawRequest}; pub struct Processor { socket: Arc<BoundSocket>, udp_tracker_core_container: Arc<UdpTrackerCoreContainer>, udp_tracker_server_container: Arc<UdpTrackerServerContainer>, cookie_lifetime: f64, + server_service_binding: ServiceBinding, } impl Processor { + /// # Panics + /// + /// It will panic if a bound socket address port is 0. It should never + /// happen. pub fn new( socket: Arc<BoundSocket>, udp_tracker_core_container: Arc<UdpTrackerCoreContainer>, udp_tracker_server_container: Arc<UdpTrackerServerContainer>, cookie_lifetime: f64, ) -> Self { + let server_service_binding = + ServiceBinding::new(Protocol::UDP, socket.address()).expect("Bound socket port should't be 0"); + Self { socket, udp_tracker_core_container, udp_tracker_server_container, cookie_lifetime, + server_service_binding, } } #[instrument(skip(self, request))] pub async fn process_request(self, request: RawRequest) { - let from = request.from; + let client_socket_addr = request.from; let start_time = Instant::now(); - let response = handlers::handle_packet( + let (response, opt_req_kind) = handlers::handle_packet( request, self.udp_tracker_core_container.clone(), self.udp_tracker_server_container.clone(), - self.socket.address(), + self.server_service_binding.clone(), CookieTimeValues::new(self.cookie_lifetime), ) .await; let elapsed_time = start_time.elapsed(); - self.send_response(from, response, elapsed_time).await; + self.send_response(client_socket_addr, response, opt_req_kind, elapsed_time) + .await; } #[instrument(skip(self))] - async fn send_response(self, target: SocketAddr, response: Response, req_processing_time: Duration) { + async fn send_response( + self, + client_socket_addr: SocketAddr, + response: Response, + opt_req_kind: Option<UdpRequestKind>, + req_processing_time: Duration, + ) { tracing::debug!("send response"); let response_type = match &response { @@ -69,10 +87,15 @@ impl Processor { }; let udp_response_kind = match &response { - Response::Connect(_) => statistics::event::UdpResponseKind::Connect, - Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => statistics::event::UdpResponseKind::Announce, - Response::Scrape(_) => statistics::event::UdpResponseKind::Scrape, - Response::Error(_e) => statistics::event::UdpResponseKind::Error, + Response::Error(_e) => event::UdpResponseKind::Error { opt_req_kind: None }, + _ => { + if let Some(req_kind) = opt_req_kind { + event::UdpResponseKind::Ok { req_kind } + } else { + // code-review: this case should never happen. + event::UdpResponseKind::Error { opt_req_kind } + } + } }; let mut writer = Cursor::new(Vec::with_capacity(200)); @@ -82,7 +105,7 @@ impl Processor { let bytes_count = writer.get_ref().len(); let payload = writer.get_ref(); - let () = match self.send_packet(&target, payload).await { + let () = match self.send_packet(&client_socket_addr, payload).await { Ok(sent_bytes) => { if tracing::event_enabled!(Level::TRACE) { tracing::debug!(%bytes_count, %sent_bytes, ?payload, "sent {response_type}"); @@ -91,26 +114,15 @@ impl Processor { } if let Some(udp_server_stats_event_sender) = - self.udp_tracker_server_container.udp_server_stats_event_sender.as_deref() + self.udp_tracker_server_container.stats_event_sender.as_deref() { - match target.ip() { - IpAddr::V4(_) => { - udp_server_stats_event_sender - .send_event(statistics::event::Event::Udp4Response { - kind: udp_response_kind, - req_processing_time, - }) - .await; - } - IpAddr::V6(_) => { - udp_server_stats_event_sender - .send_event(statistics::event::Event::Udp6Response { - kind: udp_response_kind, - req_processing_time, - }) - .await; - } - } + udp_server_stats_event_sender + .send(Event::UdpResponseSent { + context: ConnectionContext::new(client_socket_addr, self.server_service_binding), + kind: udp_response_kind, + req_processing_time, + }) + .await; } } Err(error) => tracing::warn!(%bytes_count, %error, ?payload, "failed to send"), diff --git a/packages/udp-tracker-server/src/server/states.rs b/packages/udp-tracker-server/src/server/states.rs index 4d1c97167..4ad059095 100644 --- a/packages/udp-tracker-server/src/server/states.rs +++ b/packages/udp-tracker-server/src/server/states.rs @@ -83,9 +83,12 @@ impl Server<Stopped> { rx_halt, ); - let local_addr = rx_start.await.expect("it should be able to start the service").address; + let started = rx_start.await.expect("it should be able to start the service"); - form.send(ServiceRegistration::new(local_addr, Launcher::check)) + let service_binding = started.service_binding; + let local_addr = started.address; + + form.send(ServiceRegistration::new(service_binding, Launcher::check)) .expect("it should be able to send service registration"); let running_udp_server: Server<Running> = Server { diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs deleted file mode 100644 index b3b86e20a..000000000 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ /dev/null @@ -1,190 +0,0 @@ -use crate::statistics::event::{Event, UdpResponseKind}; -use crate::statistics::repository::Repository; - -pub async fn handle_event(event: Event, stats_repository: &Repository) { - match event { - // UDP - Event::UdpRequestAborted => { - stats_repository.increase_udp_requests_aborted().await; - } - Event::UdpRequestBanned => { - stats_repository.increase_udp_requests_banned().await; - } - - // UDP4 - Event::Udp4IncomingRequest => { - stats_repository.increase_udp4_requests().await; - } - Event::Udp4Request { kind } => match kind { - UdpResponseKind::Connect => { - stats_repository.increase_udp4_connections().await; - } - UdpResponseKind::Announce => { - stats_repository.increase_udp4_announces().await; - } - UdpResponseKind::Scrape => { - stats_repository.increase_udp4_scrapes().await; - } - UdpResponseKind::Error => {} - }, - Event::Udp4Response { - kind, - req_processing_time, - } => { - stats_repository.increase_udp4_responses().await; - - match kind { - UdpResponseKind::Connect => { - stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) - .await; - } - UdpResponseKind::Announce => { - stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) - .await; - } - UdpResponseKind::Scrape => { - stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) - .await; - } - UdpResponseKind::Error => {} - } - } - Event::Udp4Error => { - stats_repository.increase_udp4_errors().await; - } - - // UDP6 - Event::Udp6IncomingRequest => { - stats_repository.increase_udp6_requests().await; - } - Event::Udp6Request { kind } => match kind { - UdpResponseKind::Connect => { - stats_repository.increase_udp6_connections().await; - } - UdpResponseKind::Announce => { - stats_repository.increase_udp6_announces().await; - } - UdpResponseKind::Scrape => { - stats_repository.increase_udp6_scrapes().await; - } - UdpResponseKind::Error => {} - }, - Event::Udp6Response { - kind: _, - req_processing_time: _, - } => { - stats_repository.increase_udp6_responses().await; - } - Event::Udp6Error => { - stats_repository.increase_udp6_errors().await; - } - } - - tracing::debug!("stats: {:?}", stats_repository.get_stats().await); -} - -#[cfg(test)] -mod tests { - use crate::statistics::event::handler::handle_event; - use crate::statistics::event::Event; - use crate::statistics::repository::Repository; - - #[tokio::test] - async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { - let stats_repository = Repository::new(); - - handle_event(Event::UdpRequestAborted, &stats_repository).await; - let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_aborted, 1); - } - #[tokio::test] - async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { - let stats_repository = Repository::new(); - - handle_event(Event::UdpRequestBanned, &stats_repository).await; - let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_banned, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_requests_counter_when_it_receives_a_udp4_request_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Udp4IncomingRequest, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_requests, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_responses_counter_when_it_receives_a_udp4_response_event() { - let stats_repository = Repository::new(); - - handle_event( - Event::Udp4Response { - kind: crate::statistics::event::UdpResponseKind::Announce, - req_processing_time: std::time::Duration::from_secs(1), - }, - &stats_repository, - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_responses, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_errors_counter_when_it_receives_a_udp4_error_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Udp4Error, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_errors_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_requests_counter_when_it_receives_a_udp6_request_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Udp6IncomingRequest, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_requests, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_response_counter_when_it_receives_a_udp6_response_event() { - let stats_repository = Repository::new(); - - handle_event( - Event::Udp6Response { - kind: crate::statistics::event::UdpResponseKind::Announce, - req_processing_time: std::time::Duration::from_secs(1), - }, - &stats_repository, - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_responses, 1); - } - #[tokio::test] - async fn should_increase_the_udp6_errors_counter_when_it_receives_a_udp6_error_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Udp6Error, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_errors_handled, 1); - } -} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/error.rs b/packages/udp-tracker-server/src/statistics/event/handler/error.rs new file mode 100644 index 000000000..63e480ca5 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/error.rs @@ -0,0 +1,142 @@ +use aquatic_udp_protocol::PeerClient; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::{ConnectionContext, ErrorKind, UdpRequestKind}; +use crate::statistics::repository::Repository; +use crate::statistics::{UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL, UDP_TRACKER_SERVER_ERRORS_TOTAL}; + +pub async fn handle_event( + connection_context: ConnectionContext, + opt_udp_request_kind: Option<UdpRequestKind>, + error_kind: ErrorKind, + repository: &Repository, + now: DurationSinceUnixEpoch, +) { + update_extendable_metrics(&connection_context, opt_udp_request_kind, error_kind, repository, now).await; +} + +async fn update_extendable_metrics( + connection_context: &ConnectionContext, + opt_udp_request_kind: Option<UdpRequestKind>, + error_kind: ErrorKind, + repository: &Repository, + now: DurationSinceUnixEpoch, +) { + update_all_errors_counter(connection_context, opt_udp_request_kind.clone(), repository, now).await; + update_connection_id_errors_counter(opt_udp_request_kind, error_kind, repository, now).await; +} + +async fn update_all_errors_counter( + connection_context: &ConnectionContext, + opt_udp_request_kind: Option<UdpRequestKind>, + repository: &Repository, + now: DurationSinceUnixEpoch, +) { + let mut label_set = LabelSet::from(connection_context.clone()); + + if let Some(kind) = opt_udp_request_kind.clone() { + label_set.upsert(label_name!("request_kind"), kind.to_string().into()); + } + + match repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + } +} + +async fn update_connection_id_errors_counter( + opt_udp_request_kind: Option<UdpRequestKind>, + error_kind: ErrorKind, + repository: &Repository, + now: DurationSinceUnixEpoch, +) { + if let ErrorKind::ConnectionCookie(_) = error_kind { + if let Some(UdpRequestKind::Announce { announce_request }) = opt_udp_request_kind { + let (client_software_name, client_software_version) = extract_name_and_version(&announce_request.peer_id.client()); + + let label_set = LabelSet::from([ + (label_name!("client_software_name"), client_software_name.into()), + (label_name!("client_software_version"), client_software_version.into()), + ]); + + match repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; + } + } +} + +fn extract_name_and_version(peer_client: &PeerClient) -> (String, String) { + match peer_client { + PeerClient::BitTorrent(compact_string) => ("BitTorrent".to_string(), compact_string.as_str().to_owned()), + PeerClient::Deluge(compact_string) => ("Deluge".to_string(), compact_string.as_str().to_owned()), + PeerClient::LibTorrentRakshasa(compact_string) => ("lt (rakshasa)".to_string(), compact_string.as_str().to_owned()), + PeerClient::LibTorrentRasterbar(compact_string) => ("lt (rasterbar)".to_string(), compact_string.as_str().to_owned()), + PeerClient::QBitTorrent(compact_string) => ("QBitTorrent".to_string(), compact_string.as_str().to_owned()), + PeerClient::Transmission(compact_string) => ("Transmission".to_string(), compact_string.as_str().to_owned()), + PeerClient::UTorrent(compact_string) => ("µTorrent".to_string(), compact_string.as_str().to_owned()), + PeerClient::UTorrentEmbedded(compact_string) => ("µTorrent Emb.".to_string(), compact_string.as_str().to_owned()), + PeerClient::UTorrentMac(compact_string) => ("µTorrent Mac".to_string(), compact_string.as_str().to_owned()), + PeerClient::UTorrentWeb(compact_string) => ("µTorrent Web".to_string(), compact_string.as_str().to_owned()), + PeerClient::Vuze(compact_string) => ("Vuze".to_string(), compact_string.as_str().to_owned()), + PeerClient::WebTorrent(compact_string) => ("WebTorrent".to_string(), compact_string.as_str().to_owned()), + PeerClient::WebTorrentDesktop(compact_string) => ("WebTorrent Desktop".to_string(), compact_string.as_str().to_owned()), + PeerClient::Mainline(compact_string) => ("Mainline".to_string(), compact_string.as_str().to_owned()), + PeerClient::OtherWithPrefixAndVersion { prefix, version } => { + (format!("Other ({})", prefix.as_str()), version.as_str().to_owned()) + } + PeerClient::OtherWithPrefix(compact_string) => (format!("Other ({compact_string})"), String::new()), + PeerClient::Other => ("Other".to_string(), String::new()), + _ => ("Unknown".to_string(), String::new()), + } +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::statistics::event::handler::error::ErrorKind; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_udp4_errors_counter_when_it_receives_a_udp4_error_event() { + let stats_repository = Repository::new(); + + handle_event( + Event::UdpError { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: None, + error: ErrorKind::RequestParse("Invalid request format".to_string()), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_errors_total(), 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/mod.rs b/packages/udp-tracker-server/src/statistics/event/handler/mod.rs new file mode 100644 index 000000000..9e7f5cd47 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/mod.rs @@ -0,0 +1,40 @@ +mod error; +mod request_aborted; +mod request_accepted; +mod request_banned; +mod request_received; +mod response_sent; + +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::Event; +use crate::statistics::repository::Repository; + +pub async fn handle_event(event: Event, stats_repository: &Repository, now: DurationSinceUnixEpoch) { + match event { + Event::UdpRequestAborted { context } => { + request_aborted::handle_event(context, stats_repository, now).await; + } + Event::UdpRequestBanned { context } => { + request_banned::handle_event(context, stats_repository, now).await; + } + Event::UdpRequestReceived { context } => { + request_received::handle_event(context, stats_repository, now).await; + } + Event::UdpRequestAccepted { context, kind } => { + request_accepted::handle_event(context, kind, stats_repository, now).await; + } + Event::UdpResponseSent { + context, + kind, + req_processing_time, + } => { + response_sent::handle_event(context, kind, req_processing_time, stats_repository, now).await; + } + Event::UdpError { context, kind, error } => { + error::handle_event(context, kind, error, stats_repository, now).await; + } + } + + tracing::debug!("stats: {:?}", stats_repository.get_stats().await); +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs new file mode 100644 index 000000000..f340fe51a --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs @@ -0,0 +1,82 @@ +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::ConnectionContext; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL; + +pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_number_of_aborted_requests_when_it_receives_a_udp_request_aborted_event() { + let stats_repository = Repository::new(); + + handle_event( + Event::UdpRequestAborted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp_requests_aborted_total(), 1); + } + + #[tokio::test] + async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { + let stats_repository = Repository::new(); + + handle_event( + Event::UdpRequestAborted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + let stats = stats_repository.get_stats().await; + assert_eq!(stats.udp_requests_aborted_total(), 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs new file mode 100644 index 000000000..33971926e --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs @@ -0,0 +1,200 @@ +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::{ConnectionContext, UdpRequestKind}; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL; + +pub async fn handle_event( + context: ConnectionContext, + kind: UdpRequestKind, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + let mut label_set = LabelSet::from(context); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&kind.to_string())); + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) + .await + { + Ok(()) => { + tracing::debug!("Successfully increased the counter for UDP requests accepted: {}", label_set); + } + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_udp4_connect_requests_counter_when_it_receives_a_udp4_request_event_of_connect_kind() { + let stats_repository = Repository::new(); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Connect, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_connect_requests_accepted_total(), 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_announce_requests_counter_when_it_receives_a_udp4_request_event_of_announce_kind() { + let stats_repository = Repository::new(); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Announce { + announce_request: AnnounceRequestBuilder::default().into(), + }, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_announce_requests_accepted_total(), 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_scrape_requests_counter_when_it_receives_a_udp4_request_event_of_scrape_kind() { + let stats_repository = Repository::new(); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Scrape, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_scrape_requests_accepted_total(), 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_connect_requests_counter_when_it_receives_a_udp6_request_event_of_connect_kind() { + let stats_repository = Repository::new(); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Connect, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_connect_requests_accepted_total(), 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_announce_requests_counter_when_it_receives_a_udp6_request_event_of_announce_kind() { + let stats_repository = Repository::new(); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Announce { + announce_request: AnnounceRequestBuilder::default().into(), + }, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_announce_requests_accepted_total(), 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_scrape_requests_counter_when_it_receives_a_udp6_request_event_of_scrape_kind() { + let stats_repository = Repository::new(); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Scrape, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_scrape_requests_accepted_total(), 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs new file mode 100644 index 000000000..10f6cad88 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs @@ -0,0 +1,82 @@ +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::ConnectionContext; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL; + +pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_number_of_banned_requests_when_it_receives_a_udp_request_banned_event() { + let stats_repository = Repository::new(); + + handle_event( + Event::UdpRequestBanned { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp_requests_banned_total(), 1); + } + + #[tokio::test] + async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { + let stats_repository = Repository::new(); + + handle_event( + Event::UdpRequestBanned { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + let stats = stats_repository.get_stats().await; + assert_eq!(stats.udp_requests_banned_total(), 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs new file mode 100644 index 000000000..148b9d8da --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs @@ -0,0 +1,59 @@ +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::ConnectionContext; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL; + +pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_number_of_incoming_requests_when_it_receives_a_udp4_incoming_request_event() { + let stats_repository = Repository::new(); + + handle_event( + Event::UdpRequestReceived { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_requests_received_total(), 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs new file mode 100644 index 000000000..b1a046b5b --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -0,0 +1,141 @@ +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::{ConnectionContext, UdpRequestKind, UdpResponseKind}; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL; + +pub async fn handle_event( + context: ConnectionContext, + kind: UdpResponseKind, + req_processing_time: std::time::Duration, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + let (result_label_value, kind_label_value) = match kind { + UdpResponseKind::Ok { req_kind } => match req_kind { + UdpRequestKind::Connect => { + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + + let _new_avg = stats_repository + .recalculate_udp_avg_processing_time_ns(req_processing_time, &label_set, now) + .await; + + (LabelValue::new("ok"), UdpRequestKind::Connect.into()) + } + UdpRequestKind::Announce { announce_request } => { + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + + let _new_avg = stats_repository + .recalculate_udp_avg_processing_time_ns(req_processing_time, &label_set, now) + .await; + + (LabelValue::new("ok"), UdpRequestKind::Announce { announce_request }.into()) + } + UdpRequestKind::Scrape => { + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + + let _new_avg = stats_repository + .recalculate_udp_avg_processing_time_ns(req_processing_time, &label_set, now) + .await; + + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) + } + }, + UdpResponseKind::Error { opt_req_kind: _ } => (LabelValue::new("error"), LabelValue::ignore()), + }; + + // Increase the number of responses sent + let mut label_set = LabelSet::from(context); + if result_label_value == LabelValue::new("ok") { + label_set.upsert(label_name!("request_kind"), kind_label_value); + } + label_set.upsert(label_name!("result"), result_label_value); + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_udp4_responses_counter_when_it_receives_a_udp4_response_event() { + let stats_repository = Repository::new(); + + handle_event( + Event::UdpResponseSent { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpResponseKind::Ok { + req_kind: crate::event::UdpRequestKind::Announce { + announce_request: AnnounceRequestBuilder::default().into(), + }, + }, + req_processing_time: std::time::Duration::from_secs(1), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_responses_sent_total(), 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_response_counter_when_it_receives_a_udp6_response_event() { + let stats_repository = Repository::new(); + + handle_event( + Event::UdpResponseSent { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpResponseKind::Ok { + req_kind: crate::event::UdpRequestKind::Announce { + announce_request: AnnounceRequestBuilder::default().into(), + }, + }, + req_processing_time: std::time::Duration::from_secs(1), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_responses_sent_total(), 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index f1a2e25de..caaf5a2bc 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -1,11 +1,59 @@ -use tokio::sync::mpsc; +use std::sync::Arc; + +use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; use super::handler::handle_event; -use super::Event; +use crate::event::receiver::Receiver; use crate::statistics::repository::Repository; +use crate::CurrentClock; + +#[must_use] +pub fn run_event_listener( + receiver: Receiver, + cancellation_token: CancellationToken, + repository: &Arc<Repository>, +) -> JoinHandle<()> { + let repository_clone = repository.clone(); + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener"); + + tokio::spawn(async move { + dispatch_events(receiver, cancellation_token, repository_clone).await; + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener finished"); + }) +} + +async fn dispatch_events(mut receiver: Receiver, cancellation_token: CancellationToken, stats_repository: Arc<Repository>) { + loop { + tokio::select! { + biased; + + () = cancellation_token.cancelled() => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received cancellation request, shutting down UDP tracker server event listener."); + break; + } -pub async fn dispatch_events(mut receiver: mpsc::Receiver<Event>, stats_repository: Repository) { - while let Some(event) = receiver.recv().await { - handle_event(event, &stats_repository).await; + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker server statistics receiver closed."); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker server statistics receiver lagged by {} events.", n); + } + } + } + } + } + } } } diff --git a/packages/udp-tracker-server/src/statistics/event/mod.rs b/packages/udp-tracker-server/src/statistics/event/mod.rs index 6a48b9449..dae683398 100644 --- a/packages/udp-tracker-server/src/statistics/event/mod.rs +++ b/packages/udp-tracker-server/src/statistics/event/mod.rs @@ -1,51 +1,2 @@ -use std::time::Duration; - pub mod handler; pub mod listener; -pub mod sender; - -/// An statistics event. It is used to collect tracker metrics. -/// -/// - `Tcp` prefix means the event was triggered by the HTTP tracker -/// - `Udp` prefix means the event was triggered by the UDP tracker -/// - `4` or `6` prefixes means the IP version used by the peer -/// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` -/// -/// > NOTE: HTTP trackers do not use `connection` requests. -#[derive(Debug, PartialEq, Eq)] -pub enum Event { - // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } - // Attributes are enums too. - UdpRequestAborted, - UdpRequestBanned, - - // UDP4 - Udp4IncomingRequest, - Udp4Request { - kind: UdpResponseKind, - }, - Udp4Response { - kind: UdpResponseKind, - req_processing_time: Duration, - }, - Udp4Error, - - // UDP6 - Udp6IncomingRequest, - Udp6Request { - kind: UdpResponseKind, - }, - Udp6Response { - kind: UdpResponseKind, - req_processing_time: Duration, - }, - Udp6Error, -} - -#[derive(Debug, PartialEq, Eq)] -pub enum UdpResponseKind { - Connect, - Announce, - Scrape, - Error, -} diff --git a/packages/udp-tracker-server/src/statistics/event/sender.rs b/packages/udp-tracker-server/src/statistics/event/sender.rs deleted file mode 100644 index ca4b4e210..000000000 --- a/packages/udp-tracker-server/src/statistics/event/sender.rs +++ /dev/null @@ -1,29 +0,0 @@ -use futures::future::BoxFuture; -use futures::FutureExt; -#[cfg(test)] -use mockall::{automock, predicate::str}; -use tokio::sync::mpsc; -use tokio::sync::mpsc::error::SendError; - -use super::Event; - -/// A trait to allow sending statistics events -#[cfg_attr(test, automock)] -pub trait Sender: Sync + Send { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option<Result<(), SendError<Event>>>>; -} - -/// An [`statistics::EventSender`](crate::statistics::event::sender::Sender) implementation. -/// -/// It uses a channel sender to send the statistic events. The channel is created by a -/// [`statistics::Keeper`](crate::statistics::keeper::Keeper) -#[allow(clippy::module_name_repetitions)] -pub struct ChannelSender { - pub(crate) sender: mpsc::Sender<Event>, -} - -impl Sender for ChannelSender { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option<Result<(), SendError<Event>>>> { - async move { Some(self.sender.send(event).await) }.boxed() - } -} diff --git a/packages/udp-tracker-server/src/statistics/keeper.rs b/packages/udp-tracker-server/src/statistics/keeper.rs deleted file mode 100644 index ae80e7970..000000000 --- a/packages/udp-tracker-server/src/statistics/keeper.rs +++ /dev/null @@ -1,77 +0,0 @@ -use tokio::sync::mpsc; - -use super::event::listener::dispatch_events; -use super::event::sender::{ChannelSender, Sender}; -use super::event::Event; -use super::repository::Repository; - -const CHANNEL_BUFFER_SIZE: usize = 65_535; - -/// The service responsible for keeping tracker metrics (listening to statistics events and handle them). -/// -/// It actively listen to new statistics events. When it receives a new event -/// it accordingly increases the counters. -pub struct Keeper { - pub repository: Repository, -} - -impl Default for Keeper { - fn default() -> Self { - Self::new() - } -} - -impl Keeper { - #[must_use] - pub fn new() -> Self { - Self { - repository: Repository::new(), - } - } - - #[must_use] - pub fn new_active_instance() -> (Box<dyn Sender>, Repository) { - let mut stats_tracker = Self::new(); - - let stats_event_sender = stats_tracker.run_event_listener(); - - (stats_event_sender, stats_tracker.repository) - } - - pub fn run_event_listener(&mut self) -> Box<dyn Sender> { - let (sender, receiver) = mpsc::channel::<Event>(CHANNEL_BUFFER_SIZE); - - let stats_repository = self.repository.clone(); - - tokio::spawn(async move { dispatch_events(receiver, stats_repository).await }); - - Box::new(ChannelSender { sender }) - } -} - -#[cfg(test)] -mod tests { - use crate::statistics::event::Event; - use crate::statistics::keeper::Keeper; - use crate::statistics::metrics::Metrics; - - #[tokio::test] - async fn should_contain_the_tracker_statistics() { - let stats_tracker = Keeper::new(); - - let stats = stats_tracker.repository.get_stats().await; - - assert_eq!(stats.udp4_requests, Metrics::default().udp4_requests); - } - - #[tokio::test] - async fn should_create_an_event_sender_to_send_statistical_events() { - let mut stats_tracker = Keeper::new(); - - let event_sender = stats_tracker.run_event_listener(); - - let result = event_sender.send_event(Event::Udp4IncomingRequest).await; - - assert!(result.is_some()); - } -} diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index cce618d74..e167dc5ae 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -1,60 +1,1315 @@ +use std::time::Duration; + +use serde::Serialize; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::aggregate::avg::Avg; +use torrust_tracker_metrics::metric_collection::aggregate::sum::Sum; +use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::statistics::{ + UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, + UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, + UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, + UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, +}; + /// Metrics collected by the UDP tracker server. -#[derive(Debug, PartialEq, Default)] +#[derive(Debug, PartialEq, Default, Serialize)] pub struct Metrics { + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increase_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increment_counter(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) + } +} + +impl Metrics { + #[allow(clippy::cast_precision_loss)] + pub fn recalculate_udp_avg_processing_time_ns( + &mut self, + req_processing_time: Duration, + label_set: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> f64 { + self.increment_udp_processed_requests_total(label_set, now); + + let processed_requests_total = self.udp_processed_requests_total(label_set) as f64; + let previous_avg = self.udp_avg_processing_time_ns(label_set); + let req_processing_time = req_processing_time.as_nanos() as f64; + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / processed_requests_total; + + tracing::debug!( + "Recalculated UDP average processing time for labels {}: {} ns (previous: {} ns, req_processing_time: {} ns, request_processed_total: {})", + label_set, + new_avg, + previous_avg, + req_processing_time, + processed_requests_total + ); + + self.update_udp_avg_processing_time_ns(new_avg, label_set, now); + + new_avg + } + + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + fn udp_avg_processing_time_ns(&self, label_set: &LabelSet) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + label_set, + ) + .unwrap_or_default() as u64 + } + + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_request_accepted_total(&self, label_set: &LabelSet) -> u64 { + self.metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), label_set) + .unwrap_or_default() as u64 + } + + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + fn udp_processed_requests_total(&self, label_set: &LabelSet) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + label_set, + ) + .unwrap_or_default() as u64 + } + + fn update_udp_avg_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { + tracing::debug!( + "Updating average processing time metric to {} ns for label set {}", + new_avg, + label_set, + ); + + match self.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + label_set, + new_avg, + now, + ) { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + } + + fn increment_udp_processed_requests_total(&mut self, label_set: &LabelSet, now: DurationSinceUnixEpoch) { + tracing::debug!("Incrementing processed requests total for label set {}", label_set,); + + match self.increase_counter( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + label_set, + now, + ) { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increment counter: {}", err), + } + } + // UDP /// Total number of UDP (UDP tracker) requests aborted. - pub udp_requests_aborted: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_requests_aborted_total(&self) -> u64 { + self.metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) requests banned. - pub udp_requests_banned: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_requests_banned_total(&self) -> u64 { + self.metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() as u64 + } /// Total number of banned IPs. - pub udp_banned_ips_total: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_banned_ips_total(&self) -> u64 { + self.metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() as u64 + } - /// Average rounded time spent processing UDP connect requests. - pub udp_avg_connect_processing_time_ns: u64, + /// Average processing time for UDP connect requests across all servers (in nanoseconds). + /// This calculates the average of all gauge samples for connect requests. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_connect_processing_time_ns_averaged(&self) -> u64 { + self.metric_collection + .avg( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "connect")].into(), + ) + .unwrap_or(0.0) as u64 + } - /// Average rounded time spent processing UDP announce requests. - pub udp_avg_announce_processing_time_ns: u64, + /// Average processing time for UDP announce requests across all servers (in nanoseconds). + /// This calculates the average of all gauge samples for announce requests. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_announce_processing_time_ns_averaged(&self) -> u64 { + self.metric_collection + .avg( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "announce")].into(), + ) + .unwrap_or(0.0) as u64 + } - /// Average rounded time spent processing UDP scrape requests. - pub udp_avg_scrape_processing_time_ns: u64, + /// Average processing time for UDP scrape requests across all servers (in nanoseconds). + /// This calculates the average of all gauge samples for scrape requests. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_scrape_processing_time_ns_averaged(&self) -> u64 { + self.metric_collection + .avg( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "scrape")].into(), + ) + .unwrap_or(0.0) as u64 + } // UDPv4 /// Total number of UDP (UDP tracker) requests from IPv4 peers. - pub udp4_requests: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_requests_received_total(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) connections from IPv4 peers. - pub udp4_connections_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_connect_requests_accepted_total(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "connect")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. - pub udp4_announces_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_announce_requests_accepted_total(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. - pub udp4_scrapes_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_scrape_requests_accepted_total(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) responses from IPv4 peers. - pub udp4_responses: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_responses_sent_total(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. - pub udp4_errors_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_errors_total(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() as u64 + } // UDPv6 /// Total number of UDP (UDP tracker) requests from IPv6 peers. - pub udp6_requests: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_requests_received_total(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. - pub udp6_connections_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_connect_requests_accepted_total(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. - pub udp6_announces_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_announce_requests_accepted_total(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. - pub udp6_scrapes_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_scrape_requests_accepted_total(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) responses from IPv6 peers. - pub udp6_responses: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_responses_sent_total(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() as u64 + } /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. - pub udp6_errors_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_errors_total(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() as u64 + } +} + +#[cfg(test)] +mod tests { + use torrust_tracker_clock::clock::Time; + use torrust_tracker_metrics::metric_name; + + use super::*; + use crate::statistics::{ + UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, + UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, + UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, + UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, + }; + use crate::CurrentClock; + + #[test] + fn it_should_implement_default() { + let metrics = Metrics::default(); + // MetricCollection starts with empty collections + assert_eq!(metrics, Metrics::default()); + } + + #[test] + fn it_should_implement_debug() { + let metrics = Metrics::default(); + let debug_string = format!("{metrics:?}"); + assert!(debug_string.contains("Metrics")); + assert!(debug_string.contains("metric_collection")); + } + + #[test] + fn it_should_implement_partial_eq() { + let metrics1 = Metrics::default(); + let metrics2 = Metrics::default(); + assert_eq!(metrics1, metrics2); + } + + #[test] + fn it_should_increase_counter_metric() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + let result = metrics.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_increase_counter_metric_with_labels() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + + let result = metrics.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_set_gauge_metric() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + let result = metrics.set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 42.0, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_set_gauge_metric_with_labels() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "connect")]); + + let result = metrics.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels, + 1000.0, + now, + ); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_return_zero_for_udp_processed_requests_total_when_no_data() { + let metrics = Metrics::default(); + let labels = LabelSet::from([("request_kind", "connect")]); + assert_eq!(metrics.udp_processed_requests_total(&labels), 0); + } + + #[test] + fn it_should_increment_processed_requests_total() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "connect")]); + + // Directly increment the counter using the public method + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + &labels, + now, + ) + .unwrap(); + + assert_eq!(metrics.udp_processed_requests_total(&labels), 1); + } + + mod udp_general_metrics { + use super::*; + + #[test] + fn it_should_return_zero_for_udp_requests_aborted_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_requests_aborted_total(), 0); + } + + #[test] + fn it_should_return_sum_of_udp_requests_aborted() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .unwrap(); + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .unwrap(); + + assert_eq!(metrics.udp_requests_aborted_total(), 2); + } + + #[test] + fn it_should_return_zero_for_udp_requests_banned_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_requests_banned_total(), 0); + } + + #[test] + fn it_should_return_sum_of_udp_requests_banned() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp_requests_banned_total(), 3); + } + + #[test] + fn it_should_return_zero_for_udp_banned_ips_total_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_banned_ips_total(), 0); + } + + #[test] + fn it_should_return_gauge_value_for_udp_banned_ips_total() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 10.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 10); + } + } + + mod udpv4_metrics { + use super::*; + + #[test] + fn it_should_return_zero_for_udp4_requests_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_requests_received_total(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_requests() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + + for _ in 0..5 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_requests_received_total(), 5); + } + + #[test] + fn it_should_return_zero_for_udp4_connections_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_connect_requests_accepted_total(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_connections_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_connect_requests_accepted_total(), 3); + } + + #[test] + fn it_should_return_zero_for_udp4_announces_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_announce_requests_accepted_total(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_announces_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "announce")]); + + for _ in 0..7 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_announce_requests_accepted_total(), 7); + } + + #[test] + fn it_should_return_zero_for_udp4_scrapes_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_scrape_requests_accepted_total(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_scrapes_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")]); + + for _ in 0..4 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_scrape_requests_accepted_total(), 4); + } + + #[test] + fn it_should_return_zero_for_udp4_responses_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_responses_sent_total(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_responses() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + + for _ in 0..6 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_responses_sent_total(), 6); + } + + #[test] + fn it_should_return_zero_for_udp4_errors_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_errors_total(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_errors_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + + for _ in 0..2 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_errors_total(), 2); + } + } + + mod udpv6_metrics { + use super::*; + + #[test] + fn it_should_return_zero_for_udp6_requests_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_requests_received_total(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_requests() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + for _ in 0..8 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_requests_received_total(), 8); + } + + #[test] + fn it_should_return_zero_for_udp6_connections_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_connect_requests_accepted_total(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_connections_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")]); + + for _ in 0..4 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_connect_requests_accepted_total(), 4); + } + + #[test] + fn it_should_return_zero_for_udp6_announces_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_announce_requests_accepted_total(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_announces_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")]); + + for _ in 0..9 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_announce_requests_accepted_total(), 9); + } + + #[test] + fn it_should_return_zero_for_udp6_scrapes_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_scrape_requests_accepted_total(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_scrapes_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")]); + + for _ in 0..6 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_scrape_requests_accepted_total(), 6); + } + + #[test] + fn it_should_return_zero_for_udp6_responses_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_responses_sent_total(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_responses() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + for _ in 0..11 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_responses_sent_total(), 11); + } + + #[test] + fn it_should_return_zero_for_udp6_errors_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_errors_total(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_errors_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_errors_total(), 3); + } + } + + mod combined_metrics { + use super::*; + + #[test] + fn it_should_distinguish_between_ipv4_and_ipv6_metrics() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + let ipv6_labels = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + // Add different counts for IPv4 and IPv6 + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &ipv4_labels, now) + .unwrap(); + } + + for _ in 0..7 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &ipv6_labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_requests_received_total(), 3); + assert_eq!(metrics.udp6_requests_received_total(), 7); + } + + #[test] + fn it_should_distinguish_between_different_request_kinds() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + let connect_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + let announce_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "announce")]); + let scrape_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")]); + + // Add different counts for different request kinds + for _ in 0..2 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &connect_labels, + now, + ) + .unwrap(); + } + + for _ in 0..5 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &announce_labels, + now, + ) + .unwrap(); + } + + for _ in 0..1 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &scrape_labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_connect_requests_accepted_total(), 2); + assert_eq!(metrics.udp4_announce_requests_accepted_total(), 5); + assert_eq!(metrics.udp4_scrape_requests_accepted_total(), 1); + } + + #[test] + fn it_should_handle_mixed_ipv4_and_ipv6_for_different_request_kinds() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + let ipv4_connect_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + let ipv6_connect_labels = + LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")]); + let ipv4_announce_labels = + LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "announce")]); + let ipv6_announce_labels = + LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")]); + + // Add mixed IPv4/IPv6 counts + for _ in 0..3 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &ipv4_connect_labels, + now, + ) + .unwrap(); + } + + for _ in 0..2 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &ipv6_connect_labels, + now, + ) + .unwrap(); + } + + for _ in 0..4 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &ipv4_announce_labels, + now, + ) + .unwrap(); + } + + for _ in 0..6 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &ipv6_announce_labels, + now, + ) + .unwrap(); + } + + assert_eq!(metrics.udp4_connect_requests_accepted_total(), 3); + assert_eq!(metrics.udp6_connect_requests_accepted_total(), 2); + assert_eq!(metrics.udp4_announce_requests_accepted_total(), 4); + assert_eq!(metrics.udp6_announce_requests_accepted_total(), 6); + } + } + + mod edge_cases { + use super::*; + + #[test] + fn it_should_handle_large_counter_values() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Add a large number of increments + for _ in 0..1000 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp_requests_aborted_total(), 1000); + } + + #[test] + fn it_should_handle_large_gauge_values() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Set a large gauge value + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 999_999.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 999_999); + } + + #[test] + fn it_should_handle_zero_gauge_values() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 0.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 0); + } + + #[test] + fn it_should_overwrite_gauge_values_when_set_multiple_times() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Set initial value + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 50.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 50); + + // Overwrite with new value + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 75.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 75); + } + + #[test] + fn it_should_handle_empty_label_sets() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let empty_labels = LabelSet::empty(); + + let result = metrics.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &empty_labels, now); + + assert!(result.is_ok()); + assert_eq!(metrics.udp_requests_aborted_total(), 1); + } + + #[test] + fn it_should_handle_multiple_labels_on_same_metric() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + let labels1 = LabelSet::from([("server_binding_address_ip_family", "inet")]); + let labels2 = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + // Add to same metric with different labels + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels1, now) + .unwrap(); + } + + for _ in 0..5 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels2, now) + .unwrap(); + } + + // Should return labeled sums correctly + assert_eq!(metrics.udp4_requests_received_total(), 3); + assert_eq!(metrics.udp6_requests_received_total(), 5); + } + } + + mod error_handling { + use super::*; + + #[test] + fn it_should_return_ok_result_for_valid_counter_operations() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + let result = metrics.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_return_ok_result_for_valid_gauge_operations() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + let result = metrics.set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 42.0, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_handle_unknown_metric_names_gracefully() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // This should still work as metrics are created on demand + let result = metrics.increase_counter(&metric_name!("unknown_metric"), &labels, now); + + assert!(result.is_ok()); + } + } + + mod averaged_processing_time_metrics { + use super::*; + + #[test] + fn it_should_return_zero_for_udp_avg_connect_processing_time_ns_averaged_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 0); + } + + #[test] + fn it_should_return_averaged_value_for_udp_avg_connect_processing_time_ns_averaged() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels1 = LabelSet::from([("request_kind", "connect"), ("server_id", "server1")]); + let labels2 = LabelSet::from([("request_kind", "connect"), ("server_id", "server2")]); + + // Set different gauge values for connect requests from different servers + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels1, + 1000.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels2, + 2000.0, + now, + ) + .unwrap(); + + // Should return the average: (1000 + 2000) / 2 = 1500 + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 1500); + } + + #[test] + fn it_should_return_zero_for_udp_avg_announce_processing_time_ns_averaged_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_announce_processing_time_ns_averaged(), 0); + } + + #[test] + fn it_should_return_averaged_value_for_udp_avg_announce_processing_time_ns_averaged() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels1 = LabelSet::from([("request_kind", "announce"), ("server_id", "server1")]); + let labels2 = LabelSet::from([("request_kind", "announce"), ("server_id", "server2")]); + let labels3 = LabelSet::from([("request_kind", "announce"), ("server_id", "server3")]); + + // Set different gauge values for announce requests from different servers + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels1, + 1500.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels2, + 2500.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels3, + 3000.0, + now, + ) + .unwrap(); + + // Should return the average: (1500 + 2500 + 3000) / 3 = 2333 (truncated) + assert_eq!(metrics.udp_avg_announce_processing_time_ns_averaged(), 2333); + } + + #[test] + fn it_should_return_zero_for_udp_avg_scrape_processing_time_ns_averaged_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_scrape_processing_time_ns_averaged(), 0); + } + + #[test] + fn it_should_return_averaged_value_for_udp_avg_scrape_processing_time_ns_averaged() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels1 = LabelSet::from([("request_kind", "scrape"), ("server_id", "server1")]); + let labels2 = LabelSet::from([("request_kind", "scrape"), ("server_id", "server2")]); + + // Set different gauge values for scrape requests from different servers + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels1, + 500.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels2, + 1500.0, + now, + ) + .unwrap(); + + // Should return the average: (500 + 1500) / 2 = 1000 + assert_eq!(metrics.udp_avg_scrape_processing_time_ns_averaged(), 1000); + } + + #[test] + fn it_should_handle_fractional_averages_with_truncation() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels1 = LabelSet::from([("request_kind", "connect"), ("server_id", "server1")]); + let labels2 = LabelSet::from([("request_kind", "connect"), ("server_id", "server2")]); + let labels3 = LabelSet::from([("request_kind", "connect"), ("server_id", "server3")]); + + // Set values that will result in a fractional average + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels1, + 1000.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels2, + 1001.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels3, + 1001.0, + now, + ) + .unwrap(); + + // Should return the average: (1000 + 1001 + 1001) / 3 = 1000.666... → 1000 (truncated) + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 1000); + } + + #[test] + fn it_should_only_average_matching_request_kinds() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + // Set values for different request kinds with the same server_id + let connect_labels = LabelSet::from([("request_kind", "connect"), ("server_id", "server1")]); + let announce_labels = LabelSet::from([("request_kind", "announce"), ("server_id", "server1")]); + let scrape_labels = LabelSet::from([("request_kind", "scrape"), ("server_id", "server1")]); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &connect_labels, + 1000.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &announce_labels, + 2000.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &scrape_labels, + 3000.0, + now, + ) + .unwrap(); + + // Each function should only return the value for its specific request kind + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 1000); + assert_eq!(metrics.udp_avg_announce_processing_time_ns_averaged(), 2000); + assert_eq!(metrics.udp_avg_scrape_processing_time_ns_averaged(), 3000); + } + + #[test] + fn it_should_handle_single_server_averaged_metrics() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "connect"), ("server_id", "single_server")]); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels, + 1234.0, + now, + ) + .unwrap(); + + // With only one server, the average should be the same as the single value + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 1234); + } + } } diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 939a41061..6bd35b9a1 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -1,6 +1,90 @@ pub mod event; -pub mod keeper; pub mod metrics; pub mod repository; pub mod services; -pub mod setup; + +use metrics::Metrics; +use torrust_tracker_metrics::metric::description::MetricDescription; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::unit::Unit; + +pub const UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL: &str = "udp_tracker_server_requests_aborted_total"; +pub const UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL: &str = "udp_tracker_server_requests_banned_total"; +pub const UDP_TRACKER_SERVER_IPS_BANNED_TOTAL: &str = "udp_tracker_server_ips_banned_total"; +pub const UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL: &str = "udp_tracker_server_connection_id_errors_total"; +pub const UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_server_requests_received_total"; +pub const UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL: &str = "udp_tracker_server_requests_accepted_total"; +pub const UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL: &str = "udp_tracker_server_responses_sent_total"; +pub const UDP_TRACKER_SERVER_ERRORS_TOTAL: &str = "udp_tracker_server_errors_total"; +pub const UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS: &str = "udp_tracker_server_performance_avg_processing_time_ns"; +pub const UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL: &str = + "udp_tracker_server_performance_avg_processed_requests_total"; + +#[must_use] +pub fn describe_metrics() -> Metrics { + let mut metrics = Metrics::default(); + + metrics.metric_collection.describe_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("Total number of UDP requests aborted")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("Total number of UDP requests banned")), + ); + + metrics.metric_collection.describe_gauge( + &metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("Total number of IPs banned from UDP requests")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("Total number of requests with connection ID errors")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("Total number of UDP requests received")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("Total number of UDP requests accepted")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("Total number of UDP responses sent")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("Total number of errors processing UDP requests")), + ); + + metrics.metric_collection.describe_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + Some(Unit::Nanoseconds), + Some(MetricDescription::new("Average time to process a UDP request in nanoseconds")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new( + "Total number of UDP requests processed for the average performance metrics", + )), + ); + + metrics +} diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 22e793036..c4c995b8a 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -2,7 +2,12 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::Error; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use super::describe_metrics; use super::metrics::Metrics; /// A repository for the tracker metrics. @@ -21,7 +26,7 @@ impl Repository { #[must_use] pub fn new() -> Self { Self { - stats: Arc::new(RwLock::new(Metrics::default())), + stats: Arc::new(RwLock::new(describe_metrics())), } } @@ -29,145 +34,742 @@ impl Repository { self.stats.read().await } - pub async fn increase_udp_requests_aborted(&self) { + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increase the counter. + pub async fn increase_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { let mut stats_lock = self.stats.write().await; - stats_lock.udp_requests_aborted += 1; - drop(stats_lock); - } - pub async fn increase_udp_requests_banned(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp_requests_banned += 1; - drop(stats_lock); - } + let result = stats_lock.increase_counter(metric_name, labels, now); - pub async fn increase_udp4_requests(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_requests += 1; drop(stats_lock); + + result } - pub async fn increase_udp4_connections(&self) { + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increase the counter. + pub async fn set_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { let mut stats_lock = self.stats.write().await; - stats_lock.udp4_connections_handled += 1; + + let result = stats_lock.set_gauge(metric_name, labels, value, now); + drop(stats_lock); + + result } - pub async fn increase_udp4_announces(&self) { + pub async fn recalculate_udp_avg_processing_time_ns( + &self, + req_processing_time: Duration, + label_set: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> f64 { let mut stats_lock = self.stats.write().await; - stats_lock.udp4_announces_handled += 1; + + let new_avg = stats_lock.recalculate_udp_avg_processing_time_ns(req_processing_time, label_set, now); + drop(stats_lock); + + new_avg } +} - pub async fn increase_udp4_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_scrapes_handled += 1; - drop(stats_lock); +#[cfg(test)] +mod tests { + use core::f64; + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_metrics::metric_collection::aggregate::sum::Sum; + use torrust_tracker_metrics::metric_name; + + use super::*; + use crate::statistics::*; + use crate::CurrentClock; + + #[test] + fn it_should_implement_default() { + let repo = Repository::default(); + assert!(!std::ptr::eq(&repo.stats, &Repository::new().stats)); } - pub async fn increase_udp4_responses(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_responses += 1; - drop(stats_lock); + #[test] + fn it_should_be_cloneable() { + let repo = Repository::new(); + let cloned_repo = repo.clone(); + assert!(!std::ptr::eq(&repo.stats, &cloned_repo.stats)); } - pub async fn increase_udp4_errors(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_errors_handled += 1; - drop(stats_lock); + #[tokio::test] + async fn it_should_be_initialized_with_described_metrics() { + let repo = Repository::new(); + let stats = repo.get_stats().await; + + // Check that the described metrics are present + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL))); + assert!(stats + .metric_collection + .contains_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL))); + assert!(stats + .metric_collection + .contains_gauge(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS))); } - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] - pub async fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) { - let mut stats_lock = self.stats.write().await; + #[tokio::test] + async fn it_should_return_a_read_guard_to_metrics() { + let repo = Repository::new(); + let stats = repo.get_stats().await; - let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_connections_handled = (stats_lock.udp4_connections_handled + stats_lock.udp6_connections_handled) as f64; + // Should be able to read metrics through the guard + assert_eq!(stats.udp_requests_aborted_total(), 0); + assert_eq!(stats.udp_requests_banned_total(), 0); + } - let previous_avg = stats_lock.udp_avg_connect_processing_time_ns; + #[tokio::test] + async fn it_should_allow_increasing_a_counter_metric_successfully() { + let repo = Repository::new(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled; + // Increase a counter metric + let result = repo + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .await; - stats_lock.udp_avg_connect_processing_time_ns = new_avg.ceil() as u64; + assert!(result.is_ok()); - drop(stats_lock); + // Verify the counter was incremented + let stats = repo.get_stats().await; + assert_eq!(stats.udp_requests_aborted_total(), 1); } - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] - pub async fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) { - let mut stats_lock = self.stats.write().await; + #[tokio::test] + async fn it_should_allow_increasing_a_counter_multiple_times() { + let repo = Repository::new(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Increase counter multiple times + for _ in 0..5 { + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .await + .unwrap(); + } - let req_processing_time = req_processing_time.as_nanos() as f64; + // Verify the counter was incremented correctly + let stats = repo.get_stats().await; + assert_eq!(stats.udp_requests_aborted_total(), 5); + } - let udp_announces_handled = (stats_lock.udp4_announces_handled + stats_lock.udp6_announces_handled) as f64; + #[tokio::test] + async fn it_should_allow_increasing_a_counter_with_different_labels() { + let repo = Repository::new(); + let now = CurrentClock::now(); - let previous_avg = stats_lock.udp_avg_announce_processing_time_ns; + let labels_ipv4 = LabelSet::from([("server_binding_address_ip_family", "inet")]); + let labels_ipv6 = LabelSet::from([("server_binding_address_ip_family", "inet6")]); - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled; + // Increase counters with different labels + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels_ipv4, now) + .await + .unwrap(); - stats_lock.udp_avg_announce_processing_time_ns = new_avg.ceil() as u64; + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels_ipv6, now) + .await + .unwrap(); - drop(stats_lock); + // Verify both labeled metrics + let stats = repo.get_stats().await; + assert_eq!(stats.udp4_requests_received_total(), 1); + assert_eq!(stats.udp6_requests_received_total(), 1); } - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] - pub async fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) { - let mut stats_lock = self.stats.write().await; + #[tokio::test] + async fn it_should_set_a_gauge_metric_successfully() { + let repo = Repository::new(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); - let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_scrapes_handled = (stats_lock.udp4_scrapes_handled + stats_lock.udp6_scrapes_handled) as f64; + // Set a gauge metric + let result = repo + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 42.0, now) + .await; - let previous_avg = stats_lock.udp_avg_scrape_processing_time_ns; + assert!(result.is_ok()); - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled; + // Verify the gauge was set + let stats = repo.get_stats().await; + assert_eq!(stats.udp_banned_ips_total(), 42); + } - stats_lock.udp_avg_scrape_processing_time_ns = new_avg.ceil() as u64; + #[tokio::test] + async fn it_should_overwrite_previous_value_when_setting_a_gauge_with_a_previous_value() { + let repo = Repository::new(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Set gauge to initial value + repo.set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 10.0, now) + .await + .unwrap(); + + // Overwrite with new value + repo.set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 25.0, now) + .await + .unwrap(); + + // Verify the gauge has the new value + let stats = repo.get_stats().await; + assert_eq!(stats.udp_banned_ips_total(), 25); + } - drop(stats_lock); + #[tokio::test] + async fn it_should_allow_setting_a_gauge_with_different_labels() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + let labels_connect = LabelSet::from([("request_kind", "connect")]); + let labels_announce = LabelSet::from([("request_kind", "announce")]); + + // Set gauges with different labels + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels_connect, + 1000.0, + now, + ) + .await + .unwrap(); + + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels_announce, + 2000.0, + now, + ) + .await + .unwrap(); + + // Verify both labeled metrics + let stats = repo.get_stats().await; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_avg_connect_processing_time_ns = stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "connect")].into(), + ) + .unwrap_or_default() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_avg_announce_processing_time_ns = stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "announce")].into(), + ) + .unwrap_or_default() as u64; + + assert_eq!(udp_avg_connect_processing_time_ns, 1000); + assert_eq!(udp_avg_announce_processing_time_ns, 2000); } - pub async fn increase_udp6_requests(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_requests += 1; - drop(stats_lock); + #[tokio::test] + async fn it_should_recalculate_the_udp_average_connect_processing_time_in_nanoseconds_using_moving_average() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Set initial average to 1000ns + let connect_labels = LabelSet::from([("request_kind", "connect")]); + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &connect_labels, + 1000.0, + now, + ) + .await + .unwrap(); + + // Calculate new average with processing time of 2000ns + // This will increment the processed requests counter from 0 to 1 + let processing_time = Duration::from_micros(2); + let new_avg = repo + .recalculate_udp_avg_processing_time_ns(processing_time, &connect_labels, now) + .await; + + // Moving average: previous_avg + (new_value - previous_avg) / processed_requests_total + // With processed_requests_total = 1 (incremented during the call): + // 1000 + (2000 - 1000) / 1 = 1000 + 1000 = 2000 + let expected_avg = 1000.0 + (2000.0 - 1000.0) / 1.0; + assert!( + (new_avg - expected_avg).abs() < 0.01, + "Expected {expected_avg}, got {new_avg}" + ); } - pub async fn increase_udp6_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_connections_handled += 1; - drop(stats_lock); + #[tokio::test] + async fn it_should_recalculate_the_udp_average_announce_processing_time_in_nanoseconds_using_moving_average() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Set initial average to 500ns + let announce_labels = LabelSet::from([("request_kind", "announce")]); + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &announce_labels, + 500.0, + now, + ) + .await + .unwrap(); + + // Calculate new average with processing time of 1500ns + // This will increment the processed requests counter from 0 to 1 + let processing_time = Duration::from_nanos(1500); + let new_avg = repo + .recalculate_udp_avg_processing_time_ns(processing_time, &announce_labels, now) + .await; + + // Moving average: previous_avg + (new_value - previous_avg) / processed_requests_total + // With processed_requests_total = 1 (incremented during the call): + // 500 + (1500 - 500) / 1 = 500 + 1000 = 1500 + let expected_avg = 500.0 + (1500.0 - 500.0) / 1.0; + assert!( + (new_avg - expected_avg).abs() < 0.01, + "Expected {expected_avg}, got {new_avg}" + ); } - pub async fn increase_udp6_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_announces_handled += 1; - drop(stats_lock); + #[tokio::test] + async fn it_should_recalculate_the_udp_average_scrape_processing_time_in_nanoseconds_using_moving_average() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Set initial average to 800ns + let scrape_labels = LabelSet::from([("request_kind", "scrape")]); + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &scrape_labels, + 800.0, + now, + ) + .await + .unwrap(); + + // Calculate new average with processing time of 1200ns + // This will increment the processed requests counter from 0 to 1 + let processing_time = Duration::from_nanos(1200); + let new_avg = repo + .recalculate_udp_avg_processing_time_ns(processing_time, &scrape_labels, now) + .await; + + // Moving average: previous_avg + (new_value - previous_avg) / processed_requests_total + // With processed_requests_total = 1 (incremented during the call): + // 800 + (1200 - 800) / 1 = 800 + 400 = 1200 + let expected_avg = 800.0 + (1200.0 - 800.0) / 1.0; + assert!( + (new_avg - expected_avg).abs() < 0.01, + "Expected {expected_avg}, got {new_avg}" + ); } - pub async fn increase_udp6_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_scrapes_handled += 1; - drop(stats_lock); + #[tokio::test] + async fn recalculate_average_methods_should_handle_zero_connections_gracefully() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Test with zero connections (should not panic, should handle division by zero) + let processing_time = Duration::from_micros(1); + + let connect_labels = LabelSet::from([("request_kind", "connect")]); + let connect_avg = repo + .recalculate_udp_avg_processing_time_ns(processing_time, &connect_labels, now) + .await; + + let announce_labels = LabelSet::from([("request_kind", "announce")]); + let announce_avg = repo + .recalculate_udp_avg_processing_time_ns(processing_time, &announce_labels, now) + .await; + + let scrape_labels = LabelSet::from([("request_kind", "scrape")]); + let scrape_avg = repo + .recalculate_udp_avg_processing_time_ns(processing_time, &scrape_labels, now) + .await; + + // With 0 total connections, the formula becomes 0 + (1000 - 0) / 0 + // This should handle the division by zero case gracefully + assert!((connect_avg - 1000.0).abs() < f64::EPSILON); + assert!((announce_avg - 1000.0).abs() < f64::EPSILON); + assert!((scrape_avg - 1000.0).abs() < f64::EPSILON); } - pub async fn increase_udp6_responses(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_responses += 1; - drop(stats_lock); + #[tokio::test] + async fn it_should_handle_concurrent_access() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Spawn multiple concurrent tasks + let mut handles = vec![]; + + for i in 0..10 { + let repo_clone = repo.clone(); + let handle = tokio::spawn(async move { + for _ in 0..5 { + repo_clone + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), + &LabelSet::empty(), + now, + ) + .await + .unwrap(); + } + i + }); + handles.push(handle); + } + + // Wait for all tasks to complete + for handle in handles { + handle.await.unwrap(); + } + + // Verify all increments were properly recorded + let stats = repo.get_stats().await; + assert_eq!(stats.udp_requests_aborted_total(), 50); // 10 tasks * 5 increments each } - pub async fn increase_udp6_errors(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_errors_handled += 1; - drop(stats_lock); + #[tokio::test] + async fn it_should_handle_large_processing_times() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Set up a connection + let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) + .await + .unwrap(); + + // Test with very large processing time + let large_duration = Duration::from_secs(1); // 1 second = 1,000,000,000 ns + let connect_labels = LabelSet::from([("request_kind", "connect")]); + let new_avg = repo + .recalculate_udp_avg_processing_time_ns(large_duration, &connect_labels, now) + .await; + + // Should handle large numbers without overflow + assert!(new_avg > 0.0); + assert!(new_avg.is_finite()); + } + + #[tokio::test] + async fn it_should_maintain_consistency_across_operations() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Perform a series of operations + repo.increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), + &LabelSet::empty(), + now, + ) + .await + .unwrap(); + + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), + &LabelSet::empty(), + 10.0, + now, + ) + .await + .unwrap(); + + repo.increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), + &LabelSet::empty(), + now, + ) + .await + .unwrap(); + + // Check final state + let stats = repo.get_stats().await; + assert_eq!(stats.udp_requests_aborted_total(), 1); + assert_eq!(stats.udp_banned_ips_total(), 10); + assert_eq!(stats.udp_requests_banned_total(), 1); + } + + #[tokio::test] + async fn it_should_handle_error_cases_gracefully() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Test with invalid metric name (this should still work as metrics are created dynamically) + let result = repo + .increase_counter(&metric_name!("non_existent_metric"), &LabelSet::empty(), now) + .await; + + // Should succeed as metrics are created on demand + assert!(result.is_ok()); + + // Test with NaN value for gauge + let result = repo + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), + &LabelSet::empty(), + f64::NAN, + now, + ) + .await; + + // Should handle NaN values + assert!(result.is_ok()); + } + + mod race_conditions { + + use core::f64; + use std::time::Duration; + + use tokio::task::JoinHandle; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_metrics::metric_name; + + use super::*; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_handle_race_conditions_when_updating_udp_performance_metrics_in_parallel() { + const REQUESTS_PER_SERVER: usize = 100; + + // ** Set up test data and environment ** + + let repo = Repository::new(); + let now = CurrentClock::now(); + + let server1_labels = create_server_metric_labels("6868"); + let server2_labels = create_server_metric_labels("6969"); + + // ** Execute concurrent metric updates ** + + // Spawn concurrent tasks for server 1 with processing times [1000, 2000, 3000, 4000, 5000] ns + let server1_handles = spawn_server_tasks(&repo, &server1_labels, 1000, now, REQUESTS_PER_SERVER); + + // Spawn concurrent tasks for server 2 with processing times [2000, 3000, 4000, 5000, 6000] ns + let server2_handles = spawn_server_tasks(&repo, &server2_labels, 2000, now, REQUESTS_PER_SERVER); + + // Wait for both servers' results + let (server1_results, server2_results) = tokio::join!( + collect_concurrent_task_results(server1_handles), + collect_concurrent_task_results(server2_handles) + ); + + // ** Verify results and metrics ** + + // Verify correctness of concurrent operations + assert_server_results_are_valid(&server1_results, "Server 1", REQUESTS_PER_SERVER); + assert_server_results_are_valid(&server2_results, "Server 2", REQUESTS_PER_SERVER); + + let stats = repo.get_stats().await; + + // Verify each server's metrics individually + let server1_avg = assert_server_metrics_are_correct(&stats, &server1_labels, "Server 1", REQUESTS_PER_SERVER, 3000.0); + let server2_avg = assert_server_metrics_are_correct(&stats, &server2_labels, "Server 2", REQUESTS_PER_SERVER, 4000.0); + + // Verify relationship between servers + assert_server_metrics_relationship(server1_avg, server2_avg); + + // Verify each server's result consistency individually + assert_server_result_matches_stored_average(&server1_results, &stats, &server1_labels, "Server 1"); + assert_server_result_matches_stored_average(&server2_results, &stats, &server2_labels, "Server 2"); + + // Verify metric collection integrity + assert_metric_collection_integrity(&stats); + } + + // Test helper functions to hide implementation details + + fn create_server_metric_labels(port: &str) -> LabelSet { + LabelSet::from([ + ("request_kind", "connect"), + ("server_binding_address_ip_family", "inet"), + ("server_port", port), + ]) + } + + fn spawn_server_tasks( + repo: &Repository, + labels: &LabelSet, + base_processing_time_ns: usize, + now: DurationSinceUnixEpoch, + requests_per_server: usize, + ) -> Vec<JoinHandle<f64>> { + let mut handles = vec![]; + + for i in 0..requests_per_server { + let repo_clone = repo.clone(); + let labels_clone = labels.clone(); + let handle = tokio::spawn(async move { + let processing_time_ns = base_processing_time_ns + (i % 5) * 1000; + let processing_time = Duration::from_nanos(processing_time_ns as u64); + repo_clone + .recalculate_udp_avg_processing_time_ns(processing_time, &labels_clone, now) + .await + }); + handles.push(handle); + } + + handles + } + + async fn collect_concurrent_task_results(handles: Vec<tokio::task::JoinHandle<f64>>) -> Vec<f64> { + let mut server_results = Vec::new(); + + for handle in handles { + let result = handle.await.unwrap(); + server_results.push(result); + } + + server_results + } + + fn assert_server_results_are_valid(results: &[f64], server_name: &str, expected_count: usize) { + // Verify all tasks completed + assert_eq!( + results.len(), + expected_count, + "{server_name} should have {expected_count} results" + ); + + // Verify all results are valid numbers + for result in results { + assert!(result.is_finite(), "{server_name} result should be finite: {result}"); + assert!(*result > 0.0, "{server_name} result should be positive: {result}"); + } + } + + fn assert_server_metrics_are_correct( + stats: &Metrics, + labels: &LabelSet, + server_name: &str, + expected_request_count: usize, + expected_avg_ns: f64, + ) -> f64 { + // Verify request count + let processed_requests = get_processed_requests_count(stats, labels); + assert_eq!( + processed_requests, expected_request_count as u64, + "{server_name} should have processed {expected_request_count} requests" + ); + + // Verify average processing time is within expected range + let avg_processing_time = get_average_processing_time(stats, labels); + assert!( + (avg_processing_time - expected_avg_ns).abs() < 50.0, + "{server_name} average should be ~{expected_avg_ns}ns (±50ns), got {avg_processing_time}ns" + ); + + avg_processing_time + } + + fn assert_server_metrics_relationship(server1_avg: f64, server2_avg: f64) { + const MIN_DIFFERENCE_NS: f64 = 950.0; + + assert_averages_are_significantly_different(server1_avg, server2_avg, MIN_DIFFERENCE_NS); + assert_server_ordering_is_correct(server1_avg, server2_avg); + } + + fn assert_averages_are_significantly_different(avg1: f64, avg2: f64, min_difference: f64) { + let difference = (avg1 - avg2).abs(); + assert!( + difference > min_difference, + "Server averages should differ by more than {min_difference}ns, but difference was {difference}ns" + ); + } + + fn assert_server_ordering_is_correct(server1_avg: f64, server2_avg: f64) { + // Server 2 should have higher average since it has higher processing times [2000-6000] vs [1000-5000] + assert!( + server2_avg > server1_avg, + "Server 2 average ({server2_avg}ns) should be higher than Server 1 ({server1_avg}ns) due to higher processing time ranges" + ); + } + + fn assert_server_result_matches_stored_average(results: &[f64], stats: &Metrics, labels: &LabelSet, server_name: &str) { + let final_avg = get_average_processing_time(stats, labels); + let last_result = results.last().copied().unwrap(); + + assert!( + (last_result - final_avg).abs() <= f64::EPSILON, + "{server_name} last result ({last_result}) should match final average ({final_avg}) exactly" + ); + } + + fn assert_metric_collection_integrity(stats: &Metrics) { + assert!(stats + .metric_collection + .contains_gauge(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL))); + } + + fn get_processed_requests_count(stats: &Metrics, labels: &LabelSet) -> u64 { + stats + .metric_collection + .get_counter_value( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + labels, + ) + .unwrap() + .value() + } + + fn get_average_processing_time(stats: &Metrics, labels: &LabelSet) -> f64 { + stats + .metric_collection + .get_gauge_value(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), labels) + .unwrap() + .value() + } } } diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index a16685077..0eac01270 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -39,9 +39,7 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_udp_tracker_core::services::banning::BanService; -use tokio::sync::RwLock; -use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; +use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::metrics::Metrics; use crate::statistics::repository::Repository; @@ -52,7 +50,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateSwarmMetadata, + pub torrents_metrics: AggregateActiveSwarmMetadata, /// Application level metrics. Usage statistics/metrics. /// @@ -63,37 +61,15 @@ pub struct TrackerMetrics { /// It returns all the [`TrackerMetrics`] pub async fn get_metrics( in_memory_torrent_repository: Arc<InMemoryTorrentRepository>, - ban_service: Arc<RwLock<BanService>>, stats_repository: Arc<Repository>, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let stats = stats_repository.get_stats().await; - let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); TrackerMetrics { torrents_metrics, protocol_metrics: Metrics { - // UDP - udp_requests_aborted: stats.udp_requests_aborted, - udp_requests_banned: stats.udp_requests_banned, - udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: stats.udp_avg_connect_processing_time_ns, - udp_avg_announce_processing_time_ns: stats.udp_avg_announce_processing_time_ns, - udp_avg_scrape_processing_time_ns: stats.udp_avg_scrape_processing_time_ns, - // UDPv4 - udp4_requests: stats.udp4_requests, - udp4_connections_handled: stats.udp4_connections_handled, - udp4_announces_handled: stats.udp4_announces_handled, - udp4_scrapes_handled: stats.udp4_scrapes_handled, - udp4_responses: stats.udp4_responses, - udp4_errors_handled: stats.udp4_errors_handled, - // UDPv6 - udp6_requests: stats.udp6_requests, - udp6_connections_handled: stats.udp6_connections_handled, - udp6_announces_handled: stats.udp6_announces_handled, - udp6_scrapes_handled: stats.udp6_scrapes_handled, - udp6_responses: stats.udp6_responses, - udp6_errors_handled: stats.udp6_errors_handled, + metric_collection: stats.metric_collection.clone(), }, } } @@ -104,43 +80,25 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; - use bittorrent_udp_tracker_core::services::banning::BanService; - use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; - use tokio::sync::RwLock; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; - use crate::statistics; + use crate::statistics::describe_metrics; + use crate::statistics::repository::Repository; use crate::statistics::services::{get_metrics, TrackerMetrics}; - pub fn tracker_configuration() -> Configuration { - configuration::ephemeral() - } - #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { - let config = tracker_configuration(); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let (_udp_server_stats_event_sender, udp_server_stats_repository) = - statistics::setup::factory(config.core.tracker_usage_statistics); - let udp_server_stats_repository = Arc::new(udp_server_stats_repository); + let stats_repository = Arc::new(Repository::new()); - let tracker_metrics = get_metrics( - in_memory_torrent_repository.clone(), - ban_service.clone(), - udp_server_stats_repository.clone(), - ) - .await; + let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), stats_repository.clone()).await; assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata::default(), - protocol_metrics: statistics::metrics::Metrics::default(), + torrents_metrics: AggregateActiveSwarmMetadata::default(), + protocol_metrics: describe_metrics(), } ); } diff --git a/packages/udp-tracker-server/src/statistics/setup.rs b/packages/udp-tracker-server/src/statistics/setup.rs deleted file mode 100644 index d3114a75e..000000000 --- a/packages/udp-tracker-server/src/statistics/setup.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! Setup for the tracker statistics. -//! -//! The [`factory`] function builds the structs needed for handling the tracker metrics. -use crate::statistics; - -/// It builds the structs needed for handling the tracker metrics. -/// -/// It returns: -/// -/// - An statistics event [`Sender`](crate::statistics::event::sender::Sender) that allows you to send events related to statistics. -/// - An statistics [`Repository`](crate::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. -/// -/// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics -/// events are sent are received but not dispatched to the handler. -#[must_use] -pub fn factory( - tracker_usage_statistics: bool, -) -> ( - Option<Box<dyn statistics::event::sender::Sender>>, - statistics::repository::Repository, -) { - let mut stats_event_sender = None; - - let mut stats_tracker = statistics::keeper::Keeper::new(); - - if tracker_usage_statistics { - stats_event_sender = Some(stats_tracker.run_event_listener()); - } - - (stats_event_sender, stats_tracker.repository) -} - -#[cfg(test)] -mod test { - use super::factory; - - #[tokio::test] - async fn should_not_send_any_event_when_statistics_are_disabled() { - let tracker_usage_statistics = false; - - let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); - - assert!(stats_event_sender.is_none()); - } - - #[tokio::test] - async fn should_send_events_when_statistics_are_enabled() { - let tracker_usage_statistics = true; - - let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); - - assert!(stats_event_sender.is_some()); - } -} diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index 4cb23621d..350f3b8eb 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -32,7 +32,7 @@ async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrac match response { Response::Connect(connect_response) => connect_response.connection_id, - _ => panic!("error connecting to udp server {:?}", response), + _ => panic!("error connecting to udp server {response:?}"), } } @@ -59,7 +59,9 @@ async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_req let response = Response::parse_bytes(&response, true).unwrap(); - assert_eq!(get_error_response_message(&response).unwrap(), "Protocol identifier missing"); + assert!(get_error_response_message(&response) + .unwrap() + .contains("Protocol identifier missing")); env.stop().await; } @@ -165,7 +167,7 @@ mod receiving_an_announce_request { bytes_uploaded: NumberOfBytes(0i64.into()), bytes_left: NumberOfBytes(0i64.into()), event: AnnounceEvent::Started.into(), - ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), + ip_address: Ipv4Addr::UNSPECIFIED.into(), key: PeerKey::new(0i32), peers_wanted: NumberOfPeers(1i32.into()), port: Port(port.into()), @@ -249,7 +251,7 @@ mod receiving_an_announce_request { let transaction_id = tx_id.0.to_string(); assert!( - logs_contains_a_line_with(&["ERROR", "UDP TRACKER", &transaction_id.to_string()]), + logs_contains_a_line_with(&["ERROR", "UDP TRACKER", &transaction_id]), "Expected logs to contain: ERROR ... UDP TRACKER ... transaction_id={transaction_id}" ); } @@ -268,10 +270,10 @@ mod receiving_an_announce_request { let udp_requests_banned_before = env .container .udp_tracker_server_container - .udp_server_stats_repository + .stats_repository .get_stats() .await - .udp_requests_banned; + .udp_requests_banned_total(); // This should return a timeout error match client.send(announce_request.into()).await { @@ -284,10 +286,10 @@ mod receiving_an_announce_request { let udp_requests_banned_after = env .container .udp_tracker_server_container - .udp_server_stats_repository + .stats_repository .get_stats() .await - .udp_requests_banned; + .udp_requests_banned_total(); let udp_banned_ips_total_after = ban_service.read().await.get_banned_ips_total(); // UDP counter for banned requests should be increased by 1 diff --git a/project-words.txt b/project-words.txt new file mode 100644 index 000000000..9458ebbf3 --- /dev/null +++ b/project-words.txt @@ -0,0 +1,271 @@ +Addrs +adduser +alekitto +analyse +appuser +Arvid +ASMS +asyn +autoclean +AUTOINCREMENT +autolinks +automock +Avicora +Azureus +backlinks +bdecode +bencode +bencoded +bencoding +behaviour +beps +binascii +binstall +Bitflu +bools +Bragilevsky +bufs +buildid +Buildx +byteorder +callgrind +camino +canonicalize +canonicalized +certbot +chrono +Cinstrument +ciphertext +clippy +cloneable +codecov +codegen +commiter +completei +Condvar +connectionless +Containerfile +conv +curr +cvar +cyclomatic +Cyberneering +dashmap +datagram +datetime +debuginfo +Deque +Dijke +distroless +dockerhub +downloadedi +dtolnay +elif +endianness +Eray +filesd +flamegraph +formatjson +Freebox +Frostegård +gecos +Gibibytes +Grcov +hasher +healthcheck +heaptrack +hexlify +hlocalhost +Hydranode +hyperthread +Icelake +iiiiiiiiiiiiiiiiiiiid +imdl +impls +incompletei +infohash +infohashes +infoschema +Intermodal +intervali +Joakim +kallsyms +Karatay +kcachegrind +kexec +keyout +Kibibytes +kptr +lcov +leecher +leechers +libsqlite +libtorrent +libz +LOGNAME +Lphant +matchmakes +Mebibytes +metainfo +middlewares +misresolved +mockall +multimap +myacicontext +ñaca +Naim +nanos +newkey +nextest +nocapture +nologin +nonroot +Norberg +numwant +nvCFlJCq7fz7Qx6KoKTDiMZvns8l5Kw7 +obra +oneshot +ostr +Pando +peekable +peerlist +penalise +programatik +proot +proto +Quickstart +Radeon +Rakshasa +Rasterbar +realpath +reannounce +Registar +repomix +repr +reqs +reqwest +rerequests +ringbuf +ringsize +rngs +rosegment +routable +rstest +rusqlite +rustc +RUSTDOCFLAGS +RUSTFLAGS +rustfmt +Rustls +Ryzen +Seedable +serde +Shareaza +sharktorrent +SHLVL +skiplist +slowloris +socketaddr +specialised +sqllite +subsec +Swatinem +Swiftbit +taiki +tdyne +Tebibytes +tempfile +testcontainers +Tera +thiserror +tlsv +toplevel +Torrentstorm +torrust +torrustracker +trackerid +Trackon +typenum +udpv +Unamed +underflows +Unsendable +untuple +uroot +usize +Vagaa +valgrind +Vitaly +vmlinux +Vuze +Weidendorfer +Werror +whitespaces +Xacrimon +XBTT +Xdebug +Xeon +Xtorrent +Xunlei +xxxxxxxxxxxxxxxxxxxxd +yyyyyyyyyyyyyyyyyyyyd +zerocopy +Aideq +autoremove +CALLSITE +Dihc +Dmqcd +QJSF +Glrg +Irwe +Uninit +Unparker +eventfd +fastrand +fdbased +fdget +fput +iiiiiiiiiiiiiiiippe +iiiiiiiiiiiiiiiipp +iiiiiiiiiiiiiiip +iipp +iiiipp +jdbe +ksys +llist +mmap +mprotect +nonblocking +peersld +pkey +porti +prealloc +println +shellcheck +sockfd +subkey +sysmalloc +sysret +timespec +toki +torru +ttwu +uninit +unparked +unsync +vtable +wakelist +wakeup +actix +iterationsadd +josecelano +mysqladmin +setgroups +taplo +trixie +adrs +Agentic +agentskills +frontmatter +MSRV +rustup diff --git a/scripts/install-git-hooks.sh b/scripts/install-git-hooks.sh new file mode 100755 index 000000000..478377791 --- /dev/null +++ b/scripts/install-git-hooks.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# Install project Git hooks from .githooks/ into .git/hooks/. +# +# Usage: +# ./scripts/install-git-hooks.sh +# +# Run once after cloning the repository. Re-run to update hooks after +# they change. + +set -euo pipefail + +REPO_ROOT="$(git rev-parse --show-toplevel)" +HOOKS_SRC="${REPO_ROOT}/.githooks" +HOOKS_DST="$(git rev-parse --git-path hooks)" +mkdir -p "${HOOKS_DST}" + +if [ ! -d "${HOOKS_SRC}" ]; then + echo "ERROR: .githooks/ directory not found at ${HOOKS_SRC}" + exit 1 +fi + +installed=0 + +for hook in "${HOOKS_SRC}"/*; do + hook_name="$(basename "${hook}")" + dest="${HOOKS_DST}/${hook_name}" + + cp "${hook}" "${dest}" + chmod +x "${dest}" + + echo "Installed: ${hook_name} → .git/hooks/${hook_name}" + installed=$((installed + 1)) +done + +echo "" +echo "==========================================" +echo "SUCCESS: ${installed} hook(s) installed." +echo "==========================================" diff --git a/scripts/pre-commit.sh b/scripts/pre-commit.sh new file mode 100755 index 000000000..c360ad6b6 --- /dev/null +++ b/scripts/pre-commit.sh @@ -0,0 +1,83 @@ +#!/bin/bash +# Pre-commit verification script +# Run all mandatory checks before committing changes. +# +# Usage: +# ./scripts/pre-commit.sh +# +# Expected runtime: ~3 minutes on a modern developer machine. +# AI agents: set a per-command timeout of at least 5 minutes before invoking this script. +# +# All steps must pass (exit 0) before committing. + +set -euo pipefail + +# ============================================================================ +# STEPS +# ============================================================================ +# Each step: "description|success_message|command" + +declare -a STEPS=( + "Checking for unused dependencies (cargo machete)|No unused dependencies found|cargo machete" + "Running all linters|All linters passed|linter all" + "Running documentation tests|Documentation tests passed|cargo test --doc --workspace" + "Running all tests|All tests passed|cargo test --tests --benches --examples --workspace --all-targets --all-features" +) + +# ============================================================================ +# HELPER FUNCTIONS +# ============================================================================ + +format_time() { + local total_seconds=$1 + local minutes=$((total_seconds / 60)) + local seconds=$((total_seconds % 60)) + if [ "$minutes" -gt 0 ]; then + echo "${minutes}m ${seconds}s" + else + echo "${seconds}s" + fi +} + +run_step() { + local step_number=$1 + local total_steps=$2 + local description=$3 + local success_message=$4 + local command=$5 + + echo "[Step ${step_number}/${total_steps}] ${description}..." + + local step_start=$SECONDS + local -a cmd_array + read -ra cmd_array <<< "${command}" + "${cmd_array[@]}" + local step_elapsed=$((SECONDS - step_start)) + + echo "PASSED: ${success_message} ($(format_time "${step_elapsed}"))" + echo +} + +trap 'echo ""; echo "=========================================="; echo "FAILED: Pre-commit checks failed!"; echo "Fix the errors above before committing."; echo "=========================================="; exit 1' ERR + +# ============================================================================ +# MAIN +# ============================================================================ + +TOTAL_START=$SECONDS +TOTAL_STEPS=${#STEPS[@]} + +echo "Running pre-commit checks..." +echo + +for i in "${!STEPS[@]}"; do + IFS='|' read -r description success_message command <<< "${STEPS[$i]}" + run_step $((i + 1)) "${TOTAL_STEPS}" "${description}" "${success_message}" "${command}" +done + +TOTAL_ELAPSED=$((SECONDS - TOTAL_START)) +echo "==========================================" +echo "SUCCESS: All pre-commit checks passed! ($(format_time "${TOTAL_ELAPSED}"))" +echo "==========================================" +echo +echo "You can now safely stage and commit your changes." diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml index 865ea224e..33fcf713a 100644 --- a/share/default/config/tracker.container.mysql.toml +++ b/share/default/config/tracker.container.mysql.toml @@ -12,6 +12,9 @@ private = false [core.database] driver = "mysql" +# If the MySQL password includes reserved URL characters (for example + or /), +# percent-encode it in the DSN password component. +# Example: password a+b/c -> a%2Bb%2Fc path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" # Uncomment to enable services diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 96addaf87..17a73a1d2 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -7,14 +7,30 @@ schema_version = "2.0.0" threshold = "info" [core] +inactive_peer_cleanup_interval = 120 listed = false private = false +[core.tracker_policy] +max_peer_timeout = 60 +persistent_torrent_completed_stat = true +remove_peerless_torrents = true + +[[udp_trackers]] +bind_address = "0.0.0.0:6868" +tracker_usage_statistics = true + [[udp_trackers]] bind_address = "0.0.0.0:6969" +tracker_usage_statistics = true [[http_trackers]] bind_address = "0.0.0.0:7070" +tracker_usage_statistics = true + +[[http_trackers]] +bind_address = "0.0.0.0:7171" +tracker_usage_statistics = true [http_api] bind_address = "0.0.0.0:1212" diff --git a/src/AGENTS.md b/src/AGENTS.md new file mode 100644 index 000000000..88296f152 --- /dev/null +++ b/src/AGENTS.md @@ -0,0 +1,109 @@ +# `src/` — Binary and Library Entry Points + +This directory contains only the top-level wiring of the application: the binary entry points, +the bootstrap sequence, and the dependency-injection container. All domain logic lives in +`packages/`; this directory merely assembles and launches it. + +## File Map + +| Path | Purpose | +| --------------------------- | ------------------------------------------------------------------------------------------------------------------------- | +| `main.rs` | Binary entry point. Calls `app::run()`, waits for Ctrl-C, then cancels jobs and waits for graceful shutdown. | +| `lib.rs` | Library crate root and crate-level documentation. Re-exports the public API used by integration tests and other binaries. | +| `app.rs` | `run()` and `start()` — orchestrates the full startup sequence (setup → load data from DB → start jobs). | +| `container.rs` | `AppContainer` — dependency-injection struct that holds `Arc`-wrapped instances of every per-layer container. | +| `bootstrap/app.rs` | `setup()` — loads config, validates it, initializes logging and global services, builds `AppContainer`. | +| `bootstrap/config.rs` | `initialize_configuration()` — reads config from the environment / file. | +| `bootstrap/jobs/` | One module per service: each module exposes a starter function called from `app::start_jobs`. | +| `bootstrap/jobs/manager.rs` | `JobManager` — collects `JoinHandle`s, owns the `CancellationToken`, and drives graceful shutdown. | +| `bin/e2e_tests_runner.rs` | Binary that runs E2E tests by delegating to `src/console/ci/`. | +| `bin/http_health_check.rs` | Minimal HTTP health-check binary used inside containers (avoids curl/wget dependency). | +| `bin/profiling.rs` | Binary for Valgrind / kcachegrind profiling sessions. | +| `console/` | Internal console apps (`ci/e2e`, `profiling`) used by the extra binaries above. | + +## Bootstrap Flow + +```text +main() + └─ app::run() + ├─ bootstrap::app::setup() + │ ├─ bootstrap::config::initialize_configuration() ← reads TOML / env vars + │ ├─ configuration.validate() ← panics on invalid config + │ ├─ initialize_global_services() ← logging, crypto seed + │ └─ AppContainer::initialize(&configuration) ← builds all containers + │ + └─ app::start(&config, &app_container) + ├─ load_data_from_database() ← peer keys, whitelist, metrics + └─ start_jobs() + ├─ start_swarm_coordination_registry_event_listener + ├─ start_tracker_core_event_listener + ├─ start_http_core_event_listener + ├─ start_udp_core_event_listener + ├─ start_udp_server_stats_event_listener + ├─ start_udp_server_banning_event_listener + ├─ start_the_udp_instances ← one job per configured UDP bind address + ├─ start_the_http_instances ← one job per configured HTTP bind address + ├─ start_torrent_cleanup + ├─ start_peers_inactivity_update + ├─ start_the_http_api + └─ start_health_check_api ← always started +``` + +Shutdown (`main`): receives `Ctrl-C` → calls `jobs.cancel()` (fires the `CancellationToken`) → +waits up to 10 seconds for all `JoinHandle`s to complete. + +## `AppContainer` + +`AppContainer` (`container.rs`) is a plain struct — not a framework, not a trait object tree. +It holds one `Arc<…Container>` per architectural layer: + +| Field | Layer / Package | +| ------------------------------------------------------------------------------------------------ | -------------------------------------------------------- | +| `registar` | `server-lib` — tracks active server socket registrations | +| `swarm_coordination_registry_container` | `swarm-coordination-registry` | +| `tracker_core_container` | `tracker-core` | +| `http_tracker_core_services` / `http_tracker_instance_containers` | `http-tracker-core` | +| `udp_tracker_core_services` / `udp_tracker_server_container` / `udp_tracker_instance_containers` | `udp-tracker-core` / `udp-tracker-server` | + +`AppContainer::initialize` is the only place where domain containers are constructed. +Every `bootstrap/jobs/` starter receives an `&Arc<AppContainer>` and pulls out exactly what it +needs — no globals, no lazy statics for domain objects. + +## `JobManager` + +`JobManager` (`bootstrap/jobs/manager.rs`) is a thin wrapper around a `Vec<Job>` (each `Job` +holds a name + `JoinHandle<()>`) and a shared `CancellationToken`: + +- `push(name, handle)` — registers a job. +- `push_opt(name, handle)` — convenience for jobs that may be disabled. +- `cancel()` — fires the token; all jobs that own a clone of it will observe cancellation. +- `wait_for_all(timeout)` — joins all handles with a timeout, logging warnings for any that + exceed it. + +## Adding a New Service + +When wiring a new server or background task, follow this checklist in order: + +1. **Package** — add the new crate under `packages/` with the appropriate layer prefix. +2. **Container field** — add an `Arc<NewServiceContainer>` field to `AppContainer` and + initialize it inside `AppContainer::initialize`. +3. **Job launcher** — create `src/bootstrap/jobs/new_service.rs` and register it in + `src/bootstrap/jobs/mod.rs`. +4. **Wire into `app::start_jobs`** — call the new starter function and push its handle to + `job_manager`. +5. **Graceful shutdown** — ensure the new service listens for the `CancellationToken` passed + from `JobManager`. +6. **Config guard** — if the service is optional, gate the starter behind the appropriate + config field and use `push_opt`. + +## Key Rules for This Directory + +- **No domain logic here.** This directory is pure wiring. Business rules belong in `packages/`. +- **No globals for domain objects.** All state flows through `AppContainer`. +- **Startup errors panic.** `bootstrap::app::setup()` panics on invalid config or a bad crypto + seed — this is intentional (fail fast before binding ports). +- **Health check always starts.** The health-check API job is unconditional — do not gate it + behind a config flag. +- **`lib.rs` is the integration-test surface.** Integration tests import + `torrust_tracker_lib::…`. Keep the public API in `lib.rs` stable; avoid leaking internal + bootstrap details. diff --git a/src/app.rs b/src/app.rs index 60e907a88..2149a6d4c 100644 --- a/src/app.rs +++ b/src/app.rs @@ -23,14 +23,30 @@ //! - Tracker REST API: the tracker API can be enabled/disabled. use std::sync::Arc; -use tokio::task::JoinHandle; -use torrust_server_lib::registar::Registar; -use torrust_tracker_configuration::Configuration; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_configuration::{Configuration, HttpTracker, UdpTracker}; use tracing::instrument; -use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::bootstrap::jobs::manager::JobManager; +use crate::bootstrap::jobs::{ + self, activity_metrics_updater, health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker, +}; +use crate::bootstrap::{self}; use crate::container::AppContainer; +use crate::CurrentClock; +pub async fn run() -> (Arc<AppContainer>, JobManager) { + let (config, app_container) = bootstrap::app::setup(); + + let app_container = Arc::new(app_container); + + let jobs = start(&config, &app_container).await; + + (app_container, jobs) +} + +/// Starts the tracker application. +/// /// # Panics /// /// Will panic if: @@ -38,103 +54,243 @@ use crate::container::AppContainer; /// - Can't retrieve tracker keys from database. /// - Can't load whitelist from database. #[instrument(skip(config, app_container))] -pub async fn start(config: &Configuration, app_container: &Arc<AppContainer>) -> Vec<JoinHandle<()>> { +pub async fn start(config: &Configuration, app_container: &Arc<AppContainer>) -> JobManager { + warn_if_no_services_enabled(config); + + load_data_from_database(config, app_container).await; + + start_jobs(config, app_container).await +} + +async fn load_data_from_database(config: &Configuration, app_container: &Arc<AppContainer>) { + load_peer_keys(config, app_container).await; + load_whitelisted_torrents(config, app_container).await; + load_torrent_metrics(config, app_container).await; +} + +async fn start_jobs(config: &Configuration, app_container: &Arc<AppContainer>) -> JobManager { + let mut job_manager = JobManager::new(); + + start_swarm_coordination_registry_event_listener(config, app_container, &mut job_manager); + start_tracker_core_event_listener(config, app_container, &mut job_manager); + start_http_core_event_listener(config, app_container, &mut job_manager); + start_udp_core_event_listener(config, app_container, &mut job_manager); + start_udp_server_stats_event_listener(config, app_container, &mut job_manager); + start_udp_server_banning_event_listener(app_container, &mut job_manager); + + start_the_udp_instances(config, app_container, &mut job_manager).await; + start_the_http_instances(config, app_container, &mut job_manager).await; + + start_torrent_cleanup(config, app_container, &mut job_manager); + start_peers_inactivity_update(config, app_container, &mut job_manager); + + start_the_http_api(config, app_container, &mut job_manager).await; + start_health_check_api(config, app_container, &mut job_manager).await; + + job_manager +} + +fn warn_if_no_services_enabled(config: &Configuration) { if config.http_api.is_none() && (config.udp_trackers.is_none() || config.udp_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) && (config.http_trackers.is_none() || config.http_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) { tracing::warn!("No services enabled in configuration"); } +} - let mut jobs: Vec<JoinHandle<()>> = Vec::new(); - - let registar = Registar::default(); - - // Load peer keys +async fn load_peer_keys(config: &Configuration, app_container: &Arc<AppContainer>) { if config.core.private { app_container + .tracker_core_container .keys_handler .load_peer_keys_from_database() .await .expect("Could not retrieve keys from database."); } +} - // Load whitelisted torrents +async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc<AppContainer>) { if config.core.listed { app_container + .tracker_core_container .whitelist_manager .load_whitelist_from_database() .await .expect("Could not load whitelist from database."); } +} + +async fn load_torrent_metrics(config: &Configuration, app_container: &Arc<AppContainer>) { + if config.core.tracker_policy.persistent_torrent_completed_stat { + bittorrent_tracker_core::statistics::persisted::load_persisted_metrics( + &app_container.tracker_core_container.stats_repository, + &app_container.tracker_core_container.db_downloads_metric_repository, + CurrentClock::now(), + ) + .await + .expect("Could not load persisted metrics from database."); + } +} + +fn start_swarm_coordination_registry_event_listener( + config: &Configuration, + app_container: &Arc<AppContainer>, + job_manager: &mut JobManager, +) { + job_manager.push_opt( + "swarm_coordination_registry_event_listener", + jobs::torrent_repository::start_event_listener(config, app_container, job_manager.new_cancellation_token()), + ); +} + +fn start_tracker_core_event_listener(config: &Configuration, app_container: &Arc<AppContainer>, job_manager: &mut JobManager) { + job_manager.push_opt( + "tracker_core_event_listener", + jobs::tracker_core::start_event_listener(config, app_container, job_manager.new_cancellation_token()), + ); +} + +fn start_http_core_event_listener(config: &Configuration, app_container: &Arc<AppContainer>, job_manager: &mut JobManager) { + job_manager.push_opt( + "http_core_event_listener", + jobs::http_tracker_core::start_event_listener(config, app_container, job_manager.new_cancellation_token()), + ); +} + +fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc<AppContainer>, job_manager: &mut JobManager) { + job_manager.push_opt( + "udp_core_event_listener", + jobs::udp_tracker_core::start_event_listener(config, app_container, job_manager.new_cancellation_token()), + ); +} + +fn start_udp_server_stats_event_listener( + config: &Configuration, + app_container: &Arc<AppContainer>, + job_manager: &mut JobManager, +) { + job_manager.push_opt( + "udp_server_stats_event_listener", + jobs::udp_tracker_server::start_stats_event_listener(config, app_container, job_manager.new_cancellation_token()), + ); +} + +fn start_udp_server_banning_event_listener(app_container: &Arc<AppContainer>, job_manager: &mut JobManager) { + job_manager.push( + "udp_server_banning_event_listener", + jobs::udp_tracker_server::start_banning_event_listener(app_container, job_manager.new_cancellation_token()), + ); +} - // Start the UDP blocks +async fn start_the_udp_instances(config: &Configuration, app_container: &Arc<AppContainer>, job_manager: &mut JobManager) { if let Some(udp_trackers) = &config.udp_trackers { - for udp_tracker_config in udp_trackers { + for (idx, udp_tracker_config) in udp_trackers.iter().enumerate() { if config.core.private { tracing::warn!( "Could not start UDP tracker on: {} while in private mode. UDP is not safe for private trackers!", udp_tracker_config.bind_address ); } else { - let udp_tracker_config = Arc::new(udp_tracker_config.clone()); - let udp_tracker_container = Arc::new(app_container.udp_tracker_container(&udp_tracker_config)); - let udp_tracker_server_container = Arc::new(app_container.udp_tracker_server_container()); - - jobs.push( - udp_tracker::start_job(udp_tracker_container, udp_tracker_server_container, registar.give_form()).await, - ); + start_udp_instance(idx, udp_tracker_config, app_container, job_manager).await; } } } else { tracing::info!("No UDP blocks in configuration"); } +} + +async fn start_udp_instance( + idx: usize, + udp_tracker_config: &UdpTracker, + app_container: &Arc<AppContainer>, + job_manager: &mut JobManager, +) { + let udp_tracker_container = app_container + .udp_tracker_container(udp_tracker_config.bind_address) + .expect("Could not create UDP tracker container"); + let udp_tracker_server_container = app_container.udp_tracker_server_container(); + + let handle = udp_tracker::start_job( + udp_tracker_container, + udp_tracker_server_container, + app_container.registar.give_form(), + ) + .await; + + job_manager.push(format!("udp_instance_{}_{}", idx, udp_tracker_config.bind_address), handle); +} - // Start the HTTP blocks +async fn start_the_http_instances(config: &Configuration, app_container: &Arc<AppContainer>, job_manager: &mut JobManager) { if let Some(http_trackers) = &config.http_trackers { - for http_tracker_config in http_trackers { - let http_tracker_config = Arc::new(http_tracker_config.clone()); - let http_tracker_container = Arc::new(app_container.http_tracker_container(&http_tracker_config)); - - if let Some(job) = http_tracker::start_job( - http_tracker_container, - registar.give_form(), - torrust_axum_http_tracker_server::Version::V1, - ) - .await - { - jobs.push(job); - } + for (idx, http_tracker_config) in http_trackers.iter().enumerate() { + start_http_instance(idx, http_tracker_config, app_container, job_manager).await; } } else { tracing::info!("No HTTP blocks in configuration"); } +} + +async fn start_http_instance( + idx: usize, + http_tracker_config: &HttpTracker, + app_container: &Arc<AppContainer>, + job_manager: &mut JobManager, +) { + let http_tracker_container = app_container + .http_tracker_container(http_tracker_config.bind_address) + .expect("Could not create HTTP tracker container"); + + if let Some(handle) = http_tracker::start_job( + http_tracker_container, + app_container.registar.give_form(), + torrust_axum_http_tracker_server::Version::V1, + ) + .await + { + job_manager.push(format!("http_instance_{}_{}", idx, http_tracker_config.bind_address), handle); + } +} - // Start HTTP API +async fn start_the_http_api(config: &Configuration, app_container: &Arc<AppContainer>, job_manager: &mut JobManager) { if let Some(http_api_config) = &config.http_api { let http_api_config = Arc::new(http_api_config.clone()); - let http_api_container = Arc::new(app_container.tracker_http_api_container(&http_api_config)); + let http_api_container = app_container.tracker_http_api_container(&http_api_config); if let Some(job) = tracker_apis::start_job( http_api_container, - registar.give_form(), + app_container.registar.give_form(), torrust_axum_rest_tracker_api_server::Version::V1, ) .await { - jobs.push(job); + job_manager.push("http_api", job); } } else { tracing::info!("No API block in configuration"); } +} - // Start runners to remove torrents without peers, every interval +fn start_torrent_cleanup(config: &Configuration, app_container: &Arc<AppContainer>, job_manager: &mut JobManager) { if config.core.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job(&config.core, &app_container.torrents_manager)); + let handle = torrent_cleanup::start_job(&config.core, &app_container.tracker_core_container.torrents_manager); + + job_manager.push("torrent_cleanup", handle); + } +} + +fn start_peers_inactivity_update(config: &Configuration, app_container: &Arc<AppContainer>, job_manager: &mut JobManager) { + if config.core.tracker_usage_statistics { + let handle = activity_metrics_updater::start_job(config, app_container); + + job_manager.push("peers_inactivity_update", handle); + } else { + tracing::info!("Peers inactivity update job is disabled."); } +} - // Start Health Check API - jobs.push(health_check_api::start_job(&config.health_check_api, registar.entries()).await); +async fn start_health_check_api(config: &Configuration, app_container: &Arc<AppContainer>, job_manager: &mut JobManager) { + let handle = health_check_api::start_job(&config.health_check_api, app_container.registar.entries()).await; - jobs + job_manager.push("health_check_api", handle); } diff --git a/src/bootstrap/jobs/activity_metrics_updater.rs b/src/bootstrap/jobs/activity_metrics_updater.rs new file mode 100644 index 000000000..9bbdc3f9b --- /dev/null +++ b/src/bootstrap/jobs/activity_metrics_updater.rs @@ -0,0 +1,27 @@ +//! Job that runs a task on intervals to update peers' activity metrics. +use std::sync::Arc; +use std::time::Duration; + +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; +use crate::CurrentClock; + +#[must_use] +pub fn start_job(config: &Configuration, app_container: &Arc<AppContainer>) -> JoinHandle<()> { + torrust_tracker_swarm_coordination_registry::statistics::activity_metrics_updater::start_job( + &app_container.swarm_coordination_registry_container.swarms.clone(), + &app_container.swarm_coordination_registry_container.stats_repository.clone(), + peer_inactivity_cutoff_timestamp(config.core.tracker_policy.max_peer_timeout), + ) +} + +/// Returns the timestamp of the cutoff for inactive peers. +/// +/// Peers that has not been updated for more than `max_peer_timeout` seconds are +/// considered inactive. +fn peer_inactivity_cutoff_timestamp(max_peer_timeout: u32) -> Duration { + CurrentClock::now_sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default() +} diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index 5d342a7f0..7c529fadd 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -3,7 +3,7 @@ //! The [`health_check_api::start_job`](crate::bootstrap::jobs::health_check_api::start_job) //! function starts the Health Check REST API. //! -//! The [`health_check_api::start_job`](crate::bootstrap::jobs::health_check_api::start_job) +//! The [`health_check_api::start_job`](crate::bootstrap::jobs::health_check_api::start_job) //! function spawns a new asynchronous task, that tasks is the "**launcher**". //! The "**launcher**" starts the actual server and sends a message back //! to the main application. diff --git a/src/bootstrap/jobs/http_tracker_core.rs b/src/bootstrap/jobs/http_tracker_core.rs new file mode 100644 index 000000000..ab71b9a0f --- /dev/null +++ b/src/bootstrap/jobs/http_tracker_core.rs @@ -0,0 +1,26 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; + +pub fn start_event_listener( + config: &Configuration, + app_container: &Arc<AppContainer>, + cancellation_token: CancellationToken, +) -> Option<JoinHandle<()>> { + if config.core.tracker_usage_statistics { + let job = bittorrent_http_tracker_core::statistics::event::listener::run_event_listener( + app_container.http_tracker_core_services.event_bus.receiver(), + cancellation_token, + &app_container.http_tracker_core_services.stats_repository, + ); + + Some(job) + } else { + tracing::info!("HTTP tracker core event listener job is disabled."); + None + } +} diff --git a/src/bootstrap/jobs/manager.rs b/src/bootstrap/jobs/manager.rs new file mode 100644 index 000000000..565cd7b73 --- /dev/null +++ b/src/bootstrap/jobs/manager.rs @@ -0,0 +1,119 @@ +use std::time::Duration; + +use tokio::task::JoinHandle; +use tokio::time::timeout; +use tokio_util::sync::CancellationToken; +use tracing::{info, warn}; + +/// Represents a named background job. +#[derive(Debug)] +pub struct Job { + name: String, + handle: JoinHandle<()>, +} + +impl Job { + pub fn new<N: Into<String>>(name: N, handle: JoinHandle<()>) -> Self { + Self { + name: name.into(), + handle, + } + } +} + +/// Manages multiple background jobs. +#[derive(Debug, Default)] +pub struct JobManager { + jobs: Vec<Job>, + cancellation_token: CancellationToken, +} + +impl JobManager { + #[must_use] + pub fn new() -> Self { + Self { + jobs: Vec::new(), + cancellation_token: CancellationToken::new(), + } + } + + pub fn push<N: Into<String>>(&mut self, name: N, handle: JoinHandle<()>) { + self.jobs.push(Job::new(name, handle)); + } + + pub fn push_opt<N: Into<String>>(&mut self, name: N, handle: Option<JoinHandle<()>>) { + if let Some(handle) = handle { + self.push(name, handle); + } + } + + #[must_use] + pub fn new_cancellation_token(&self) -> CancellationToken { + self.cancellation_token.clone() + } + + /// Cancels all jobs using the shared cancellation token. + /// + /// Notice that this does not cancel the jobs immediately, but rather + /// signals them to stop. The jobs themselves must handle the cancellation + /// token appropriately. + /// + /// Notice jobs might be pushed into the manager without a cancellation + /// token, so this method will not cancel those jobs. Some tasks might + /// decide to listen for CTRL+c signal directly, or implement their own + /// cancellation logic. + pub fn cancel(&self) { + self.cancellation_token.cancel(); + } + + /// Waits sequentially for all jobs to complete, with a graceful timeout per + /// job. + pub async fn wait_for_all(mut self, grace_period: Duration) { + for job in self.jobs.drain(..) { + let name = job.name.clone(); + + info!(job = %name, "Waiting for job to finish (timeout of {} seconds) ...", grace_period.as_secs()); + + if let Ok(result) = timeout(grace_period, job.handle).await { + if let Err(e) = result { + warn!(job = %name, "Job return an error: {:?}", e); + } else { + info!(job = %name, "Job completed gracefully"); + } + } else { + warn!(job = %name, "Job did not complete in time"); + } + } + } +} + +#[cfg(test)] +mod tests { + use tokio::time::Duration; + + use super::*; + + #[tokio::test] + async fn it_should_wait_for_all_jobs_to_finish() { + let mut manager = JobManager::new(); + + manager.push("job1", tokio::spawn(async {})); + manager.push("job2", tokio::spawn(async {})); + + manager.wait_for_all(Duration::from_secs(1)).await; + } + + #[tokio::test] + async fn it_should_log_when_a_job_panics() { + let mut manager = JobManager::new(); + + manager.push( + "panic_job", + tokio::spawn(async { + panic!("expected panic"); + }), + ); + + manager.wait_for_all(Duration::from_secs(1)).await; + } +} diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 8c85ba45b..0e9c912af 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -6,8 +6,15 @@ //! 2. Launch all the application services as concurrent jobs. //! //! This modules contains all the functions needed to start those jobs. +pub mod activity_metrics_updater; pub mod health_check_api; pub mod http_tracker; +pub mod http_tracker_core; +pub mod manager; pub mod torrent_cleanup; +pub mod torrent_repository; pub mod tracker_apis; +pub mod tracker_core; pub mod udp_tracker; +pub mod udp_tracker_core; +pub mod udp_tracker_server; diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 7085aa7e2..8a3a71a44 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -28,6 +28,7 @@ use tracing::instrument; pub fn start_job(config: &Core, torrents_manager: &Arc<TorrentsManager>) -> JoinHandle<()> { let weak_torrents_manager = std::sync::Arc::downgrade(torrents_manager); let interval = config.inactive_peer_cleanup_interval; + let interval_in_secs = interval; tokio::spawn(async move { let interval = std::time::Duration::from_secs(interval); @@ -37,15 +38,15 @@ pub fn start_job(config: &Core, torrents_manager: &Arc<TorrentsManager>) -> Join loop { tokio::select! { _ = tokio::signal::ctrl_c() => { - tracing::info!("Stopping torrent cleanup job.."); + tracing::info!("Stopping torrent cleanup job ..."); break; } _ = interval.tick() => { if let Some(torrents_manager) = weak_torrents_manager.upgrade() { let start_time = Utc::now().time(); - tracing::info!("Cleaning up torrents.."); - torrents_manager.cleanup_torrents(); - tracing::info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); + tracing::info!("Cleaning up torrents (executed every {} secs) ...", interval_in_secs); + torrents_manager.cleanup_torrents().await; + tracing::info!("Cleaned up torrents in: {} ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; } diff --git a/src/bootstrap/jobs/torrent_repository.rs b/src/bootstrap/jobs/torrent_repository.rs new file mode 100644 index 000000000..e49323735 --- /dev/null +++ b/src/bootstrap/jobs/torrent_repository.rs @@ -0,0 +1,26 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; + +pub fn start_event_listener( + config: &Configuration, + app_container: &Arc<AppContainer>, + cancellation_token: CancellationToken, +) -> Option<JoinHandle<()>> { + if config.core.tracker_usage_statistics { + let job = torrust_tracker_swarm_coordination_registry::statistics::event::listener::run_event_listener( + app_container.swarm_coordination_registry_container.event_bus.receiver(), + cancellation_token, + &app_container.swarm_coordination_registry_container.stats_repository, + ); + + Some(job) + } else { + tracing::info!("Torrent repository package event listener job is disabled."); + None + } +} diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index d152e853f..9f3964c20 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -7,7 +7,7 @@ //! > versions. API consumers can choose which version to use. The API version is //! > part of the URL, for example: `http://localhost:1212/api/v1/stats`. //! -//! The [`tracker_apis::start_job`](crate::bootstrap::jobs::tracker_apis::start_job) +//! The [`tracker_apis::start_job`](crate::bootstrap::jobs::tracker_apis::start_job) //! function spawns a new asynchronous task, that tasks is the "**launcher**". //! The "**launcher**" starts the actual server and sends a message back //! to the main application. The main application waits until receives diff --git a/src/bootstrap/jobs/tracker_core.rs b/src/bootstrap/jobs/tracker_core.rs new file mode 100644 index 000000000..d881f4cd2 --- /dev/null +++ b/src/bootstrap/jobs/tracker_core.rs @@ -0,0 +1,32 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; + +pub fn start_event_listener( + config: &Configuration, + app_container: &Arc<AppContainer>, + cancellation_token: CancellationToken, +) -> Option<JoinHandle<()>> { + if config.core.tracker_usage_statistics || config.core.tracker_policy.persistent_torrent_completed_stat { + let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( + app_container.swarm_coordination_registry_container.event_bus.receiver(), + cancellation_token, + &app_container.tracker_core_container.stats_repository, + &app_container.tracker_core_container.db_downloads_metric_repository, + app_container + .tracker_core_container + .core_config + .tracker_policy + .persistent_torrent_completed_stat, + ); + + Some(job) + } else { + tracing::info!("Tracker core event listener job is disabled."); + None + } +} diff --git a/src/bootstrap/jobs/udp_tracker_core.rs b/src/bootstrap/jobs/udp_tracker_core.rs new file mode 100644 index 000000000..dd7e8c165 --- /dev/null +++ b/src/bootstrap/jobs/udp_tracker_core.rs @@ -0,0 +1,25 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; + +pub fn start_event_listener( + config: &Configuration, + app_container: &Arc<AppContainer>, + cancellation_token: CancellationToken, +) -> Option<JoinHandle<()>> { + if config.core.tracker_usage_statistics { + let job = bittorrent_udp_tracker_core::statistics::event::listener::run_event_listener( + app_container.udp_tracker_core_services.event_bus.receiver(), + cancellation_token, + &app_container.udp_tracker_core_services.stats_repository, + ); + Some(job) + } else { + tracing::info!("UDP tracker core event listener job is disabled."); + None + } +} diff --git a/src/bootstrap/jobs/udp_tracker_server.rs b/src/bootstrap/jobs/udp_tracker_server.rs new file mode 100644 index 000000000..fc6df9c16 --- /dev/null +++ b/src/bootstrap/jobs/udp_tracker_server.rs @@ -0,0 +1,35 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; + +pub fn start_stats_event_listener( + config: &Configuration, + app_container: &Arc<AppContainer>, + cancellation_token: CancellationToken, +) -> Option<JoinHandle<()>> { + if config.core.tracker_usage_statistics { + let job = torrust_udp_tracker_server::statistics::event::listener::run_event_listener( + app_container.udp_tracker_server_container.event_bus.receiver(), + cancellation_token, + &app_container.udp_tracker_server_container.stats_repository, + ); + Some(job) + } else { + tracing::info!("UDP tracker server event listener job is disabled."); + None + } +} + +#[must_use] +pub fn start_banning_event_listener(app_container: &Arc<AppContainer>, cancellation_token: CancellationToken) -> JoinHandle<()> { + torrust_udp_tracker_server::banning::event::listener::run_event_listener( + app_container.udp_tracker_server_container.event_bus.receiver(), + cancellation_token, + &app_container.udp_tracker_core_services.ban_service, + &app_container.udp_tracker_server_container.stats_repository, + ) +} diff --git a/src/console/ci/e2e/docker.rs b/src/console/ci/e2e/docker.rs index ce2b1aa99..89d258d2c 100644 --- a/src/console/ci/e2e/docker.rs +++ b/src/console/ci/e2e/docker.rs @@ -82,7 +82,7 @@ impl Docker { let mut port_args: Vec<String> = vec![]; for port in &options.ports { port_args.push("--publish".to_string()); - port_args.push(port.to_string()); + port_args.push(port.clone()); } let args = [initial_args, env_var_args, port_args, [image.to_string()].to_vec()].concat(); diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index c406fa7a5..e8b6b3b8f 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -31,8 +31,8 @@ impl RunningServices { /// 2024-06-10T16:07:39.990303Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 /// 2024-06-10T16:07:39.990439Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 /// 2024-06-10T16:07:39.990448Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled - /// 2024-06-10T16:07:39.990563Z INFO API: Starting on http://127.0.0.1:1212 - /// 2024-06-10T16:07:39.990565Z INFO API: Started on http://127.0.0.1:1212 + /// 2024-06-10T16:07:39.990563Z INFO API: Starting on: http://127.0.0.1:1212 + /// 2024-06-10T16:07:39.990565Z INFO API: Started on: http://127.0.0.1:1212 /// 2024-06-10T16:07:39.990577Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 /// 2024-06-10T16:07:39.990638Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 /// ``` @@ -122,8 +122,8 @@ mod tests { 2024-06-10T16:07:39.990303Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 2024-06-10T16:07:39.990439Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 2024-06-10T16:07:39.990448Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled - 2024-06-10T16:07:39.990563Z INFO API: Starting on http://127.0.0.1:1212 - 2024-06-10T16:07:39.990565Z INFO API: Started on http://127.0.0.1:1212 + 2024-06-10T16:07:39.990563Z INFO API: Starting on: http://127.0.0.1:1212 + 2024-06-10T16:07:39.990565Z INFO API: Started on: http://127.0.0.1:1212 2024-06-10T16:07:39.990577Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 2024-06-10T16:07:39.990638Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 "; diff --git a/src/console/ci/e2e/runner.rs b/src/console/ci/e2e/runner.rs index 624878c70..6275c144b 100644 --- a/src/console/ci/e2e/runner.rs +++ b/src/console/ci/e2e/runner.rs @@ -77,7 +77,7 @@ pub fn run() -> anyhow::Result<()> { // Besides, if we don't use port 0 we should get the port numbers from the tracker configuration. // We could not use docker, but the intention was to create E2E tests including containerization. let options = RunOptions { - env_vars: vec![("TORRUST_TRACKER_CONFIG_TOML".to_string(), tracker_config.to_string())], + env_vars: vec![("TORRUST_TRACKER_CONFIG_TOML".to_string(), tracker_config.clone())], ports: vec![ "6969:6969/udp".to_string(), "7070:7070/tcp".to_string(), diff --git a/src/console/ci/e2e/tracker_container.rs b/src/console/ci/e2e/tracker_container.rs index a3845c103..1a7717a41 100644 --- a/src/console/ci/e2e/tracker_container.rs +++ b/src/console/ci/e2e/tracker_container.rs @@ -1,7 +1,7 @@ use std::time::Duration; use rand::distr::Alphanumeric; -use rand::Rng; +use rand::RngExt; use super::docker::{RunOptions, RunningContainer}; use super::logs_parser::RunningServices; diff --git a/src/console/profiling.rs b/src/console/profiling.rs index f3829c073..df44f4009 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -157,12 +157,11 @@ //! kcachegrind callgrind.out //! ``` use std::env; -use std::sync::Arc; use std::time::Duration; use tokio::time::sleep; -use crate::{app, bootstrap}; +use crate::app; pub async fn run() { // Parse command line arguments @@ -180,11 +179,7 @@ pub async fn run() { return; }; - let (config, app_container) = bootstrap::app::setup(); - - let app_container = Arc::new(app_container); - - let jobs = app::start(&config, &app_container).await; + let (_app_container, jobs) = app::run().await; // Run the tracker for a fixed duration let run_duration = sleep(Duration::from_secs(duration_secs)); @@ -194,9 +189,9 @@ pub async fn run() { tracing::info!("Torrust timed shutdown.."); }, _ = tokio::signal::ctrl_c() => { - tracing::info!("Torrust shutting down via Ctrl+C ..."); - // Await for all jobs to shutdown - futures::future::join_all(jobs).await; + tracing::info!("Torrust tracker shutting down via Ctrl+C ..."); + + jobs.wait_for_all(Duration::from_secs(10)).await; } } diff --git a/src/container.rs b/src/container.rs index 07c30d604..7112a54e8 100644 --- a/src/container.rs +++ b/src/container.rs @@ -1,216 +1,209 @@ +use std::collections::HashMap; +use std::net::SocketAddr; use std::sync::Arc; -use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; -use bittorrent_http_tracker_core::services::announce::AnnounceService; -use bittorrent_http_tracker_core::services::scrape::ScrapeService; -use bittorrent_tracker_core::announce_handler::AnnounceHandler; -use bittorrent_tracker_core::authentication::handler::KeysHandler; -use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_http_tracker_core::container::{HttpTrackerCoreContainer, HttpTrackerCoreServices}; use bittorrent_tracker_core::container::TrackerCoreContainer; -use bittorrent_tracker_core::databases::Database; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::torrent::manager::TorrentsManager; -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use bittorrent_tracker_core::whitelist; -use bittorrent_tracker_core::whitelist::manager::WhitelistManager; -use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; -use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; -use bittorrent_udp_tracker_core::services::banning::BanService; -use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; -use tokio::sync::RwLock; +use bittorrent_udp_tracker_core::container::{UdpTrackerCoreContainer, UdpTrackerCoreServices}; +use bittorrent_udp_tracker_core::{self}; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; -use torrust_tracker_configuration::{Configuration, Core, HttpApi, HttpTracker, UdpTracker}; +use torrust_server_lib::registar::Registar; +use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; -/* todo: remove duplicate code. +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + #[error("There is not a HTTP tracker server instance bound to the socket address: {bind_address}")] + MissingHttpTrackerCoreContainer { bind_address: SocketAddr }, - Use containers from packages as AppContainer fields: + #[error("There is not a UDP tracker server instance bound to the socket address: {bind_address}")] + MissingUdpTrackerCoreContainer { bind_address: SocketAddr }, +} - - bittorrent_tracker_core::container::TrackerCoreContainer - - bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer - - bittorrent_http_tracker_core::container::HttpTrackerCoreContainer - - torrust_udp_tracker_server::container::UdpTrackerServerContainer +pub struct AppContainer { + // Configuration + pub http_api_config: Arc<Option<HttpApi>>, - Container initialization is duplicated. -*/ + // Registar + pub registar: Arc<Registar>, -pub struct AppContainer { - // Tracker Core Services - pub core_config: Arc<Core>, - pub database: Arc<Box<dyn Database>>, - pub announce_handler: Arc<AnnounceHandler>, - pub scrape_handler: Arc<ScrapeHandler>, - pub keys_handler: Arc<KeysHandler>, - pub authentication_service: Arc<AuthenticationService>, - pub in_memory_whitelist: Arc<InMemoryWhitelist>, - pub whitelist_authorization: Arc<whitelist::authorization::WhitelistAuthorization>, - pub whitelist_manager: Arc<WhitelistManager>, - pub in_memory_torrent_repository: Arc<InMemoryTorrentRepository>, - pub db_torrent_repository: Arc<DatabasePersistentTorrentRepository>, - pub torrents_manager: Arc<TorrentsManager>, - - // UDP Tracker Core Services - pub udp_core_stats_event_sender: Arc<Option<Box<dyn bittorrent_udp_tracker_core::statistics::event::sender::Sender>>>, - pub udp_core_stats_repository: Arc<bittorrent_udp_tracker_core::statistics::repository::Repository>, - pub udp_ban_service: Arc<RwLock<BanService>>, - pub udp_connect_service: Arc<bittorrent_udp_tracker_core::services::connect::ConnectService>, - pub udp_announce_service: Arc<bittorrent_udp_tracker_core::services::announce::AnnounceService>, - pub udp_scrape_service: Arc<bittorrent_udp_tracker_core::services::scrape::ScrapeService>, - - // HTTP Tracker Core Services - pub http_stats_event_sender: Arc<Option<Box<dyn bittorrent_http_tracker_core::statistics::event::sender::Sender>>>, - pub http_stats_repository: Arc<bittorrent_http_tracker_core::statistics::repository::Repository>, - pub http_announce_service: Arc<bittorrent_http_tracker_core::services::announce::AnnounceService>, - pub http_scrape_service: Arc<bittorrent_http_tracker_core::services::scrape::ScrapeService>, - - // UDP Tracker Server Services - pub udp_server_stats_event_sender: Arc<Option<Box<dyn torrust_udp_tracker_server::statistics::event::sender::Sender>>>, - pub udp_server_stats_repository: Arc<torrust_udp_tracker_server::statistics::repository::Repository>, + // Swarm Coordination Registry Container + pub swarm_coordination_registry_container: Arc<SwarmCoordinationRegistryContainer>, + + // Core + pub tracker_core_container: Arc<TrackerCoreContainer>, + + // HTTP + pub http_tracker_core_services: Arc<HttpTrackerCoreServices>, + pub http_tracker_instance_containers: Arc<HashMap<SocketAddr, Arc<HttpTrackerCoreContainer>>>, + + // UDP + pub udp_tracker_core_services: Arc<UdpTrackerCoreServices>, + pub udp_tracker_server_container: Arc<UdpTrackerServerContainer>, + pub udp_tracker_instance_containers: Arc<HashMap<SocketAddr, Arc<UdpTrackerCoreContainer>>>, } impl AppContainer { - #[instrument(skip())] + #[instrument(skip(configuration))] pub fn initialize(configuration: &Configuration) -> AppContainer { + // Configuration + let core_config = Arc::new(configuration.core.clone()); - let tracker_core_container = TrackerCoreContainer::initialize(&core_config); - - // HTTP Tracker Core Services - let (http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); - let http_stats_repository = Arc::new(http_stats_repository); - let http_announce_service = Arc::new(AnnounceService::new( - tracker_core_container.core_config.clone(), - tracker_core_container.announce_handler.clone(), - tracker_core_container.authentication_service.clone(), - tracker_core_container.whitelist_authorization.clone(), - http_stats_event_sender.clone(), - )); - let http_scrape_service = Arc::new(ScrapeService::new( - tracker_core_container.core_config.clone(), - tracker_core_container.scrape_handler.clone(), - tracker_core_container.authentication_service.clone(), - http_stats_event_sender.clone(), - )); + let http_api_config = Arc::new(configuration.http_api.clone()); - // UDP Tracker Core Services - let (udp_core_stats_event_sender, udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); - let udp_core_stats_repository = Arc::new(udp_core_stats_repository); - let udp_ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let udp_connect_service = Arc::new(bittorrent_udp_tracker_core::services::connect::ConnectService::new( - udp_core_stats_event_sender.clone(), - )); - let udp_announce_service = Arc::new(bittorrent_udp_tracker_core::services::announce::AnnounceService::new( - tracker_core_container.announce_handler.clone(), - tracker_core_container.whitelist_authorization.clone(), - udp_core_stats_event_sender.clone(), + // Registar + + let registar = Arc::new(Registar::default()); + + // Swarm Coordination Registry Container + + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + core_config.tracker_usage_statistics.into(), )); - let udp_scrape_service = Arc::new(bittorrent_udp_tracker_core::services::scrape::ScrapeService::new( - tracker_core_container.scrape_handler.clone(), - udp_core_stats_event_sender.clone(), + + // Core + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &swarm_coordination_registry_container, )); - // UDP Tracker Server Services - let (udp_server_stats_event_sender, udp_server_stats_repository) = - torrust_udp_tracker_server::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); - let udp_server_stats_repository = Arc::new(udp_server_stats_repository); + // HTTP + + let http_tracker_core_services = HttpTrackerCoreServices::initialize_from(&tracker_core_container); + + let http_tracker_instance_containers = Self::initialize_http_tracker_instance_containers( + configuration, + &tracker_core_container, + &http_tracker_core_services, + ); + + // UDP + + let udp_tracker_core_services = UdpTrackerCoreServices::initialize_from(&tracker_core_container); + + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); + + let udp_tracker_instance_containers = + Self::initialize_udp_tracker_instance_containers(configuration, &tracker_core_container, &udp_tracker_core_services); AppContainer { - // Tracker Core Services - core_config, - database: tracker_core_container.database, - announce_handler: tracker_core_container.announce_handler, - scrape_handler: tracker_core_container.scrape_handler, - keys_handler: tracker_core_container.keys_handler, - authentication_service: tracker_core_container.authentication_service, - in_memory_whitelist: tracker_core_container.in_memory_whitelist, - whitelist_authorization: tracker_core_container.whitelist_authorization, - whitelist_manager: tracker_core_container.whitelist_manager, - in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository, - db_torrent_repository: tracker_core_container.db_torrent_repository, - torrents_manager: tracker_core_container.torrents_manager, - - // UDP Tracker Core Services - udp_core_stats_event_sender, - udp_core_stats_repository, - udp_ban_service, - udp_connect_service, - udp_announce_service, - udp_scrape_service, - - // HTTP Tracker Core Services - http_stats_event_sender, - http_stats_repository, - http_announce_service, - http_scrape_service, - - // UDP Tracker Server Services - udp_server_stats_event_sender, - udp_server_stats_repository, + // Configuration + http_api_config, + + // Registar + registar, + + // Swarm Coordination Registry Container + swarm_coordination_registry_container, + + // Core + tracker_core_container, + + // HTTP + http_tracker_core_services, + http_tracker_instance_containers, + + // UDP + udp_tracker_core_services, + udp_tracker_server_container, + udp_tracker_instance_containers, } } #[must_use] - pub fn http_tracker_container(&self, http_tracker_config: &Arc<HttpTracker>) -> HttpTrackerCoreContainer { - HttpTrackerCoreContainer { - core_config: self.core_config.clone(), - announce_handler: self.announce_handler.clone(), - scrape_handler: self.scrape_handler.clone(), - whitelist_authorization: self.whitelist_authorization.clone(), - authentication_service: self.authentication_service.clone(), - - http_tracker_config: http_tracker_config.clone(), - http_stats_event_sender: self.http_stats_event_sender.clone(), - http_stats_repository: self.http_stats_repository.clone(), - announce_service: self.http_announce_service.clone(), - scrape_service: self.http_scrape_service.clone(), + pub fn udp_tracker_server_container(&self) -> Arc<UdpTrackerServerContainer> { + self.udp_tracker_server_container.clone() + } + + /// # Errors + /// + /// Return an error if there is no HTTP tracker server instance bound to the + /// socket address. + pub fn http_tracker_container(&self, bind_address: SocketAddr) -> Result<Arc<HttpTrackerCoreContainer>, Error> { + match self.http_tracker_instance_containers.get(&bind_address) { + Some(http_tracker_container) => Ok(http_tracker_container.clone()), + None => Err(Error::MissingHttpTrackerCoreContainer { bind_address }), } } - #[must_use] - pub fn udp_tracker_container(&self, udp_tracker_config: &Arc<UdpTracker>) -> UdpTrackerCoreContainer { - UdpTrackerCoreContainer { - core_config: self.core_config.clone(), - announce_handler: self.announce_handler.clone(), - scrape_handler: self.scrape_handler.clone(), - whitelist_authorization: self.whitelist_authorization.clone(), - - udp_tracker_config: udp_tracker_config.clone(), - udp_core_stats_event_sender: self.udp_core_stats_event_sender.clone(), - udp_core_stats_repository: self.udp_core_stats_repository.clone(), - ban_service: self.udp_ban_service.clone(), - connect_service: self.udp_connect_service.clone(), - announce_service: self.udp_announce_service.clone(), - scrape_service: self.udp_scrape_service.clone(), + /// # Errors + /// + /// Return an error if there is no UDP tracker server instance bound to the + /// socket address. + pub fn udp_tracker_container(&self, bind_address: SocketAddr) -> Result<Arc<UdpTrackerCoreContainer>, Error> { + match self.udp_tracker_instance_containers.get(&bind_address) { + Some(udp_tracker_container) => Ok(udp_tracker_container.clone()), + None => Err(Error::MissingUdpTrackerCoreContainer { bind_address }), } } #[must_use] - pub fn tracker_http_api_container(&self, http_api_config: &Arc<HttpApi>) -> TrackerHttpApiCoreContainer { + pub fn tracker_http_api_container(&self, http_api_config: &Arc<HttpApi>) -> Arc<TrackerHttpApiCoreContainer> { TrackerHttpApiCoreContainer { http_api_config: http_api_config.clone(), - core_config: self.core_config.clone(), - in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), - keys_handler: self.keys_handler.clone(), - whitelist_manager: self.whitelist_manager.clone(), - ban_service: self.udp_ban_service.clone(), - http_stats_repository: self.http_stats_repository.clone(), - udp_core_stats_repository: self.udp_core_stats_repository.clone(), - udp_server_stats_repository: self.udp_server_stats_repository.clone(), + + swarm_coordination_registry_container: self.swarm_coordination_registry_container.clone(), + + tracker_core_container: self.tracker_core_container.clone(), + + http_stats_repository: self.http_tracker_core_services.stats_repository.clone(), + + ban_service: self.udp_tracker_core_services.ban_service.clone(), + udp_core_stats_repository: self.udp_tracker_core_services.stats_repository.clone(), + udp_server_stats_repository: self.udp_tracker_server_container.stats_repository.clone(), } + .into() } #[must_use] - pub fn udp_tracker_server_container(&self) -> UdpTrackerServerContainer { - UdpTrackerServerContainer { - udp_server_stats_event_sender: self.udp_server_stats_event_sender.clone(), - udp_server_stats_repository: self.udp_server_stats_repository.clone(), + fn initialize_http_tracker_instance_containers( + configuration: &Configuration, + tracker_core_container: &Arc<TrackerCoreContainer>, + http_tracker_core_services: &Arc<HttpTrackerCoreServices>, + ) -> Arc<HashMap<SocketAddr, Arc<HttpTrackerCoreContainer>>> { + let mut http_tracker_instance_containers = HashMap::new(); + + if let Some(http_trackers) = &configuration.http_trackers { + for http_tracker_config in http_trackers { + http_tracker_instance_containers.insert( + http_tracker_config.bind_address, + HttpTrackerCoreContainer::initialize_from_services( + tracker_core_container, + http_tracker_core_services, + &Arc::new(http_tracker_config.clone()), + ), + ); + } } + + Arc::new(http_tracker_instance_containers) + } + + #[must_use] + fn initialize_udp_tracker_instance_containers( + configuration: &Configuration, + tracker_core_container: &Arc<TrackerCoreContainer>, + udp_tracker_core_services: &Arc<UdpTrackerCoreServices>, + ) -> Arc<HashMap<SocketAddr, Arc<UdpTrackerCoreContainer>>> { + let mut udp_tracker_instance_containers = HashMap::new(); + + if let Some(udp_trackers) = &configuration.udp_trackers { + for udp_tracker_config in udp_trackers { + udp_tracker_instance_containers.insert( + udp_tracker_config.bind_address, + UdpTrackerCoreContainer::initialize_from_services( + tracker_core_container, + udp_tracker_core_services, + &Arc::new(udp_tracker_config.clone()), + ), + ); + } + } + + Arc::new(udp_tracker_instance_containers) } } diff --git a/src/lib.rs b/src/lib.rs index 0aaf34fe4..791c0d928 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -88,6 +88,12 @@ //! //! The tracker has some system dependencies: //! +//! First, you need to install the build tools: +//! +//! ```text +//! sudo apt-get install build-essential +//! ``` +//! //! Since we are using the `openssl` crate with the [vendored feature](https://docs.rs/openssl/latest/openssl/#vendored), //! enabled, you will need to install the following dependencies: //! @@ -138,7 +144,6 @@ //! ```text //! git clone https://github.com/torrust/torrust-tracker.git \ //! && cd torrust-tracker \ -//! && cargo build --release \ //! && mkdir -p ./storage/tracker/etc \ //! && mkdir -p ./storage/tracker/lib/database \ //! && mkdir -p ./storage/tracker/lib/tls \ @@ -149,7 +154,7 @@ //! compile and after being compiled it will start running the tracker. //! //! ```text -//! cargo run +//! cargo run --release //! ``` //! //! ## Run with docker diff --git a/src/main.rs b/src/main.rs index 77f6e32a3..7012ecaa7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,23 +1,20 @@ -use std::sync::Arc; +use std::time::Duration; -use torrust_tracker_lib::{app, bootstrap}; +use torrust_tracker_lib::app; #[tokio::main] async fn main() { - let (config, app_container) = bootstrap::app::setup(); + let (_app_container, jobs) = app::run().await; - let app_container = Arc::new(app_container); - - let jobs = app::start(&config, &app_container).await; - - // handle the signals tokio::select! { _ = tokio::signal::ctrl_c() => { - tracing::info!("Torrust shutting down ..."); + tracing::info!("Torrust tracker shutting down ..."); + + jobs.cancel(); + + jobs.wait_for_all(Duration::from_secs(10)).await; - // Await for all jobs to shutdown - futures::future::join_all(jobs).await; - tracing::info!("Torrust successfully shutdown."); + tracing::info!("Torrust tracker successfully shutdown."); } } } diff --git a/tests/integration.rs b/tests/integration.rs index 6a139e047..92289c415 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -1,13 +1,14 @@ //! Scaffolding for integration tests. //! +//! Integration tests are used to test the interaction between multiple modules, +//! multiple running trackers, etc. Tests for one specific module should be in +//! the corresponding package. +//! //! ```text //! cargo test --test integration //! ``` mod servers; -// todo: there is only one test example that was copied from other package. -// We have to add tests for the whole app. - use torrust_tracker_clock::clock; /// This code needs to be copied into each crate. diff --git a/tests/servers/api/contract/mod.rs b/tests/servers/api/contract/mod.rs new file mode 100644 index 000000000..9d34677fc --- /dev/null +++ b/tests/servers/api/contract/mod.rs @@ -0,0 +1 @@ +pub mod stats; diff --git a/tests/servers/api/contract/stats/mod.rs b/tests/servers/api/contract/stats/mod.rs new file mode 100644 index 000000000..d50bc58a5 --- /dev/null +++ b/tests/servers/api/contract/stats/mod.rs @@ -0,0 +1,94 @@ +use std::env; +use std::str::FromStr as _; + +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_client::http::client::requests::announce::QueryBuilder; +use bittorrent_tracker_client::http::client::Client as HttpTrackerClient; +use reqwest::Url; +use serde::Deserialize; +use tokio::time::Duration; +use torrust_rest_tracker_api_client::connection_info::{ConnectionInfo, Origin}; +use torrust_rest_tracker_api_client::v1::client::Client as TrackerApiClient; +use torrust_tracker_lib::app; + +#[tokio::test] +async fn the_stats_api_endpoint_should_return_the_global_stats() { + // Logging must be OFF otherwise your will get the following error: + // `Unable to install global subscriber: SetGlobalDefaultError("a global default trace dispatcher has already been set")` + // That's because we can't initialize the logger twice. + // You can enable it if you run only this test. + let config_with_two_http_trackers = r#" + [metadata] + app = "torrust-tracker" + purpose = "configuration" + schema_version = "2.0.0" + + [logging] + threshold = "off" + + [core] + listed = false + private = false + + [core.database] + driver = "sqlite3" + path = "./integration_tests_sqlite3.db" + + [[http_trackers]] + bind_address = "0.0.0.0:7272" + tracker_usage_statistics = true + + [[http_trackers]] + bind_address = "0.0.0.0:7373" + tracker_usage_statistics = true + + [http_api] + bind_address = "0.0.0.0:1414" + + [http_api.access_tokens] + admin = "MyAccessToken" + "#; + + env::set_var("TORRUST_TRACKER_CONFIG_TOML", config_with_two_http_trackers); + + let (_app_container, _jobs) = app::run().await; + + announce_to_tracker("http://127.0.0.1:7272").await; + announce_to_tracker("http://127.0.0.1:7373").await; + + let global_stats = get_tracker_statistics("http://127.0.0.1:1414", "MyAccessToken").await; + + assert_eq!(global_stats.tcp4_announces_handled, 2); +} + +/// Make a sample announce request to the tracker. +async fn announce_to_tracker(tracker_url: &str) { + let response = HttpTrackerClient::new(Url::parse(tracker_url).unwrap(), Duration::from_secs(1)) + .unwrap() + .announce( + &QueryBuilder::with_default_values() + .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) // DevSkim: ignore DS173237 + .query(), + ) + .await; + + assert!(response.is_ok()); +} + +/// Global statistics with only metrics relevant to the test. +#[derive(Deserialize)] +struct PartialGlobalStatistics { + tcp4_announces_handled: u64, +} + +async fn get_tracker_statistics(aip_url: &str, token: &str) -> PartialGlobalStatistics { + let response = TrackerApiClient::new(ConnectionInfo::authenticated(Origin::new(aip_url).unwrap(), token)) + .unwrap() + .get_tracker_statistics(None) + .await; + + response + .json::<PartialGlobalStatistics>() + .await + .expect("Failed to parse JSON response") +} diff --git a/tests/servers/api/mod.rs b/tests/servers/api/mod.rs new file mode 100644 index 000000000..2943dbb50 --- /dev/null +++ b/tests/servers/api/mod.rs @@ -0,0 +1 @@ +pub mod contract; diff --git a/tests/servers/health_check_api.rs b/tests/servers/health_check_api.rs deleted file mode 100644 index 0e66014da..000000000 --- a/tests/servers/health_check_api.rs +++ /dev/null @@ -1,32 +0,0 @@ -use reqwest::Response; -use torrust_axum_health_check_api_server::environment::Started; -use torrust_axum_health_check_api_server::resources::{Report, Status}; -use torrust_server_lib::registar::Registar; -use torrust_tracker_test_helpers::{configuration, logging}; - -pub async fn get(path: &str) -> Response { - reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap() -} - -#[tokio::test] -async fn the_health_check_endpoint_should_return_status_ok_when_there_is_not_any_service_registered() { - logging::setup(); - - let configuration = configuration::ephemeral_with_no_services(); - - let env = Started::new(&configuration.health_check_api.into(), Registar::default()).await; - - let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 - - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - - let report = response - .json::<Report>() - .await - .expect("it should be able to get the report as json"); - - assert_eq!(report.status, Status::None); - - env.stop().await.expect("it should stop the service"); -} diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs index 7aeefeec4..e5fdf85ee 100644 --- a/tests/servers/mod.rs +++ b/tests/servers/mod.rs @@ -1 +1 @@ -pub mod health_check_api; +pub mod api;