diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 2cfff78853..e4964e8909 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -32,7 +32,6 @@
"mutantdino.resourcemonitor",
"oderwat.indent-rainbow",
"redhat.vscode-yaml",
- "spmeesseman.vscode-taskexplorer",
"ms-python.pylint",
"charliermarsh.ruff"
],
diff --git a/.devcontainer/docker-compose.extend.yml b/.devcontainer/docker-compose.extend.yml
index a92f42bc6d..ce1ce259fd 100644
--- a/.devcontainer/docker-compose.extend.yml
+++ b/.devcontainer/docker-compose.extend.yml
@@ -14,8 +14,8 @@ services:
network_mode: service:db
blobstore:
ports:
- - '9000'
- - '9001'
+ - '9000:9000'
+ - '9001:9001'
volumes:
datatracker-vscode-ext:
diff --git a/.github/workflows/build-base-app.yml b/.github/workflows/build-base-app.yml
index 4a4394fca0..35172aa299 100644
--- a/.github/workflows/build-base-app.yml
+++ b/.github/workflows/build-base-app.yml
@@ -18,7 +18,7 @@ jobs:
packages: write
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
with:
token: ${{ secrets.GH_COMMON_TOKEN }}
@@ -28,20 +28,20 @@ jobs:
echo "IMGVERSION=$CURDATE" >> $GITHUB_ENV
- name: Set up QEMU
- uses: docker/setup-qemu-action@v3
+ uses: docker/setup-qemu-action@v4
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v3
+ uses: docker/setup-buildx-action@v4
- name: Login to GitHub Container Registry
- uses: docker/login-action@v3
+ uses: docker/login-action@v4
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker Build & Push
- uses: docker/build-push-action@v6
+ uses: docker/build-push-action@v7
env:
DOCKER_BUILD_SUMMARY: false
with:
@@ -60,7 +60,7 @@ jobs:
echo "${{ env.IMGVERSION }}" > dev/build/TARGET_BASE
- name: Commit CHANGELOG.md
- uses: stefanzweifel/git-auto-commit-action@v6
+ uses: stefanzweifel/git-auto-commit-action@v7
with:
branch: ${{ github.ref_name }}
commit_message: 'ci: update base image target version to ${{ env.IMGVERSION }}'
diff --git a/.github/workflows/build-devblobstore.yml b/.github/workflows/build-devblobstore.yml
index f49a11af19..14c4b1a135 100644
--- a/.github/workflows/build-devblobstore.yml
+++ b/.github/workflows/build-devblobstore.yml
@@ -20,20 +20,20 @@ jobs:
packages: write
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v3
+ uses: docker/setup-buildx-action@v4
- name: Login to GitHub Container Registry
- uses: docker/login-action@v3
+ uses: docker/login-action@v4
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker Build & Push
- uses: docker/build-push-action@v6
+ uses: docker/build-push-action@v7
env:
DOCKER_BUILD_SUMMARY: false
with:
diff --git a/.github/workflows/build-mq-broker.yml b/.github/workflows/build-mq-broker.yml
index 4de861dbcd..b297e34b47 100644
--- a/.github/workflows/build-mq-broker.yml
+++ b/.github/workflows/build-mq-broker.yml
@@ -24,23 +24,32 @@ jobs:
packages: write
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Set up QEMU
- uses: docker/setup-qemu-action@v3
+ uses: docker/setup-qemu-action@v4
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v3
+ uses: docker/setup-buildx-action@v4
- name: Login to GitHub Container Registry
- uses: docker/login-action@v3
+ uses: docker/login-action@v4
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Set rabbitmq version
+ id: rabbitmq-version
+ run: |
+ if [[ "${{ inputs.rabbitmq_version }}" == "" ]]; then
+ echo "RABBITMQ_VERSION=3.13-alpine" >> $GITHUB_OUTPUT
+ else
+ echo "RABBITMQ_VERSION=${{ inputs.rabbitmq_version }}" >> $GITHUB_OUTPUT
+ fi
+
- name: Docker Build & Push
- uses: docker/build-push-action@v6
+ uses: docker/build-push-action@v7
env:
DOCKER_BUILD_SUMMARY: false
with:
@@ -48,7 +57,7 @@ jobs:
file: dev/mq/Dockerfile
platforms: linux/amd64,linux/arm64
push: true
- build-args: RABBITMQ_VERSION=${{ inputs.rabbitmq_version }}
+ build-args: RABBITMQ_VERSION=${{ steps.rabbitmq-version.outputs.RABBITMQ_VERSION }}
tags: |
- ghcr.io/ietf-tools/datatracker-mq:${{ inputs.rabbitmq_version }}
+ ghcr.io/ietf-tools/datatracker-mq:${{ steps.rabbitmq-version.outputs.RABBITMQ_VERSION }}
ghcr.io/ietf-tools/datatracker-mq:latest
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 5e91445202..49a0e5b53b 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -66,7 +66,7 @@ jobs:
base_image_version: ${{ steps.baseimgversion.outputs.base_image_version }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
with:
fetch-depth: 1
fetch-tags: false
@@ -98,7 +98,7 @@ jobs:
echo "IS_RELEASE=true" >> $GITHUB_ENV
- name: Create Draft Release
- uses: ncipollo/release-action@v1.18.0
+ uses: ncipollo/release-action@v1.21.0
if: ${{ github.ref_name == 'release' }}
with:
prerelease: true
@@ -164,7 +164,7 @@ jobs:
TARGET_BASE: ${{needs.prepare.outputs.base_image_version}}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
with:
fetch-depth: 1
fetch-tags: false
@@ -175,7 +175,7 @@ jobs:
node-version: 18.x
- name: Setup Python
- uses: actions/setup-python@v5
+ uses: actions/setup-python@v6
with:
python-version: "3.x"
@@ -186,7 +186,7 @@ jobs:
- name: Download a Coverage Results
if: ${{ github.event.inputs.skiptests == 'false' || github.ref_name == 'release' }}
- uses: actions/download-artifact@v4.3.0
+ uses: actions/download-artifact@v8.0.1
with:
name: coverage
@@ -253,10 +253,10 @@ jobs:
EOL
- name: Setup Docker Buildx
- uses: docker/setup-buildx-action@v3
+ uses: docker/setup-buildx-action@v4
- name: Login to GitHub Container Registry
- uses: docker/login-action@v3
+ uses: docker/login-action@v4
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -267,7 +267,7 @@ jobs:
run: echo "FEATURE_LATEST_TAG=$(echo $GITHUB_REF_NAME | tr / -)" >> $GITHUB_ENV
- name: Build Images
- uses: docker/build-push-action@v6
+ uses: docker/build-push-action@v7
env:
DOCKER_BUILD_SUMMARY: false
with:
@@ -291,7 +291,7 @@ jobs:
- name: Download Coverage Results
if: ${{ github.event.inputs.skiptests == 'false' || github.ref_name == 'release' }}
- uses: actions/download-artifact@v4.3.0
+ uses: actions/download-artifact@v8.0.1
with:
name: coverage
@@ -315,7 +315,7 @@ jobs:
histCoveragePath: historical-coverage.json
- name: Create Release
- uses: ncipollo/release-action@v1.18.0
+ uses: ncipollo/release-action@v1.21.0
if: ${{ env.SHOULD_DEPLOY == 'true' }}
with:
allowUpdates: true
@@ -328,7 +328,7 @@ jobs:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Update Baseline Coverage
- uses: ncipollo/release-action@v1.18.0
+ uses: ncipollo/release-action@v1.21.0
if: ${{ github.event.inputs.updateCoverage == 'true' || github.ref_name == 'release' }}
with:
allowUpdates: true
@@ -341,7 +341,7 @@ jobs:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Upload Build Artifacts
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v7
with:
name: release-${{ env.PKG_VERSION }}
path: /home/runner/work/release/release.tar.gz
@@ -360,7 +360,7 @@ jobs:
steps:
- name: Notify on Slack (Success)
if: ${{ !contains(join(needs.*.result, ','), 'failure') }}
- uses: slackapi/slack-github-action@v2
+ uses: slackapi/slack-github-action@v3
with:
token: ${{ secrets.SLACK_GH_BOT }}
method: chat.postMessage
@@ -375,7 +375,7 @@ jobs:
value: "Completed"
- name: Notify on Slack (Failure)
if: ${{ contains(join(needs.*.result, ','), 'failure') }}
- uses: slackapi/slack-github-action@v2
+ uses: slackapi/slack-github-action@v3
with:
token: ${{ secrets.SLACK_GH_BOT }}
method: chat.postMessage
@@ -403,7 +403,7 @@ jobs:
PKG_VERSION: ${{needs.prepare.outputs.pkg_version}}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
with:
ref: main
@@ -426,7 +426,7 @@ jobs:
token: ${{ secrets.GH_INFRA_K8S_TOKEN }}
inputs: '{ "app":"datatracker", "appVersion":"${{ env.PKG_VERSION }}", "remoteRef":"${{ github.sha }}", "namespace":"${{ env.DEPLOY_NAMESPACE }}", "disableDailyDbRefresh":${{ inputs.devNoDbRefresh }} }'
wait-for-completion: true
- wait-for-completion-timeout: 30m
+ wait-for-completion-timeout: 60m
wait-for-completion-interval: 30s
display-workflow-run-url: false
@@ -453,7 +453,7 @@ jobs:
token: ${{ secrets.GH_INFRA_K8S_TOKEN }}
inputs: '{ "environment":"${{ secrets.GHA_K8S_CLUSTER }}", "app":"datatracker", "manifest":"postgres", "forceRecreate":true, "restoreToLastFullSnapshot":true, "waitClusterReady":true }'
wait-for-completion: true
- wait-for-completion-timeout: 60m
+ wait-for-completion-timeout: 120m
wait-for-completion-interval: 20s
display-workflow-run-url: false
diff --git a/.github/workflows/ci-run-tests.yml b/.github/workflows/ci-run-tests.yml
index 278bd8af2f..5349f1ac7a 100644
--- a/.github/workflows/ci-run-tests.yml
+++ b/.github/workflows/ci-run-tests.yml
@@ -23,7 +23,7 @@ jobs:
base_image_version: ${{ steps.baseimgversion.outputs.base_image_version }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
with:
fetch-depth: 1
fetch-tags: false
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 3444c03b5e..bc20779ae6 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -26,12 +26,12 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
- name: Initialize CodeQL
- uses: github/codeql-action/init@v3
+ uses: github/codeql-action/init@v4
with:
languages: ${{ matrix.language }}
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v3
+ uses: github/codeql-action/analyze@v4
diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml
index 6d0683c471..e255b270ff 100644
--- a/.github/workflows/dependency-review.yml
+++ b/.github/workflows/dependency-review.yml
@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
- name: 'Dependency Review'
uses: actions/dependency-review-action@v4
with:
diff --git a/.github/workflows/dev-assets-sync-nightly.yml b/.github/workflows/dev-assets-sync-nightly.yml
index 4cfbf6365b..cd986f06f3 100644
--- a/.github/workflows/dev-assets-sync-nightly.yml
+++ b/.github/workflows/dev-assets-sync-nightly.yml
@@ -29,17 +29,17 @@ jobs:
contents: read
packages: write
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Login to GitHub Container Registry
- uses: docker/login-action@v3
+ uses: docker/login-action@v4
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker Build & Push
- uses: docker/build-push-action@v6
+ uses: docker/build-push-action@v7
env:
DOCKER_BUILD_SUMMARY: false
with:
diff --git a/.github/workflows/tests-az.yml b/.github/workflows/tests-az.yml
index 8553563a19..833ca89bef 100644
--- a/.github/workflows/tests-az.yml
+++ b/.github/workflows/tests-az.yml
@@ -38,7 +38,7 @@ jobs:
ssh-keyscan -t rsa $vminfo >> ~/.ssh/known_hosts
- name: Remote SSH into VM
- uses: appleboy/ssh-action@2ead5e36573f08b82fbfce1504f1a4b05a647c6f
+ uses: appleboy/ssh-action@0ff4204d59e8e51228ff73bce53f80d53301dee2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 836314bac0..ad2e35408d 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -32,7 +32,7 @@ jobs:
image: ghcr.io/ietf-tools/datatracker-devblobstore:latest
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Prepare for tests
run: |
@@ -68,14 +68,14 @@ jobs:
coverage xml
- name: Upload geckodriver.log
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v7
if: ${{ failure() }}
with:
name: geckodriverlog
path: geckodriver.log
- name: Upload Coverage Results to Codecov
- uses: codecov/codecov-action@v5
+ uses: codecov/codecov-action@v6
with:
disable_search: true
files: coverage.xml
@@ -87,7 +87,7 @@ jobs:
mv latest-coverage.json coverage.json
- name: Upload Coverage Results as Build Artifact
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v7
if: ${{ always() }}
with:
name: coverage
@@ -102,7 +102,7 @@ jobs:
project: [chromium, firefox]
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- uses: actions/setup-node@v6
with:
@@ -121,7 +121,7 @@ jobs:
npx playwright test --project=${{ matrix.project }}
- name: Upload Report
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v7
if: ${{ always() }}
continue-on-error: true
with:
@@ -130,6 +130,7 @@ jobs:
if-no-files-found: ignore
tests-playwright-legacy:
+ if: ${{ false }} # disable until we sort out suspected test runner issue
name: Playwright Legacy Tests
runs-on: ubuntu-latest
container: ghcr.io/ietf-tools/datatracker-app-base:${{ inputs.targetBaseVersion }}
@@ -143,7 +144,7 @@ jobs:
image: ghcr.io/ietf-tools/datatracker-db:latest
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Prepare for tests
run: |
@@ -180,7 +181,7 @@ jobs:
npx playwright test --project=${{ matrix.project }} -c playwright-legacy.config.js
- name: Upload Report
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v7
if: ${{ always() }}
continue-on-error: true
with:
diff --git a/.gitignore b/.gitignore
index 84bc800e3b..ccc7a46b08 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
.DS_store
datatracker.sublime-project
datatracker.sublime-workspace
+/.claude
/.coverage
/.factoryboy_random_state
/.mypy_cache
diff --git a/client/agenda/AgendaDetailsModal.vue b/client/agenda/AgendaDetailsModal.vue
index 2582bf2159..69c8ef8b53 100644
--- a/client/agenda/AgendaDetailsModal.vue
+++ b/client/agenda/AgendaDetailsModal.vue
@@ -274,6 +274,7 @@ async function fetchSessionMaterials () {
diff --git a/client/components/Polls.vue b/client/components/Polls.vue
index 30cc9e8f36..0846d4ed16 100644
--- a/client/components/Polls.vue
+++ b/client/components/Polls.vue
@@ -90,3 +90,21 @@ onMounted(() => {
})
+
diff --git a/dev/build/Dockerfile b/dev/build/Dockerfile
index d3b186e1f5..e57fecd5f2 100644
--- a/dev/build/Dockerfile
+++ b/dev/build/Dockerfile
@@ -1,4 +1,4 @@
-FROM ghcr.io/ietf-tools/datatracker-app-base:20250903T2216
+FROM ghcr.io/ietf-tools/datatracker-app-base:20260410T1557
LABEL maintainer="IETF Tools Team "
ENV DEBIAN_FRONTEND=noninteractive
diff --git a/dev/build/TARGET_BASE b/dev/build/TARGET_BASE
index 9d8427efdb..f430037c09 100644
--- a/dev/build/TARGET_BASE
+++ b/dev/build/TARGET_BASE
@@ -1 +1 @@
-20250903T2216
+20260410T1557
diff --git a/dev/build/gunicorn.conf.py b/dev/build/gunicorn.conf.py
index c54b24a054..9af4478685 100644
--- a/dev/build/gunicorn.conf.py
+++ b/dev/build/gunicorn.conf.py
@@ -135,21 +135,30 @@ def post_request(worker, req, environ, resp):
def post_fork(server, worker):
server.log.info("Worker spawned (pid: %s)", worker.pid)
- resource = Resource.create(attributes={
- "service.name": "datatracker",
- "service.version": ietf.__version__,
- "service.instance.id": worker.pid,
- "service.namespace": "datatracker",
- "deployment.environment.name": os.environ.get("DATATRACKER_SERVICE_ENV", "dev")
- })
-
- trace.set_tracer_provider(TracerProvider(resource=resource))
- otlp_exporter = OTLPSpanExporter(endpoint="https://heimdall-otlp.ietf.org/v1/traces")
-
- trace.get_tracer_provider().add_span_processor(BatchSpanProcessor(otlp_exporter))
-
- # Instrumentations
- DjangoInstrumentor().instrument()
- Psycopg2Instrumentor().instrument()
- PymemcacheInstrumentor().instrument()
- RequestsInstrumentor().instrument()
+ # Setting DATATRACKER_OPENTELEMETRY_ENABLE=all in the environment will enable all
+ # opentelemetry instrumentations. Individual instrumentations can be selected by
+ # using a space-separated list. See the code below for available instrumentations.
+ telemetry_env = os.environ.get("DATATRACKER_OPENTELEMETRY_ENABLE", "").strip()
+ if telemetry_env != "":
+ enabled_telemetry = [tok.strip().lower() for tok in telemetry_env.split()]
+ resource = Resource.create(attributes={
+ "service.name": "datatracker",
+ "service.version": ietf.__version__,
+ "service.instance.id": worker.pid,
+ "service.namespace": "datatracker",
+ "deployment.environment.name": os.environ.get("DATATRACKER_SERVICE_ENV", "dev")
+ })
+ trace.set_tracer_provider(TracerProvider(resource=resource))
+ otlp_exporter = OTLPSpanExporter(endpoint="https://heimdall-otlp.ietf.org/v1/traces")
+
+ trace.get_tracer_provider().add_span_processor(BatchSpanProcessor(otlp_exporter))
+
+ # Instrumentations
+ if "all" in enabled_telemetry or "django" in enabled_telemetry:
+ DjangoInstrumentor().instrument()
+ if "all" in enabled_telemetry or "psycopg2" in enabled_telemetry:
+ Psycopg2Instrumentor().instrument()
+ if "all" in enabled_telemetry or "pymemcache" in enabled_telemetry:
+ PymemcacheInstrumentor().instrument()
+ if "all" in enabled_telemetry or "requests" in enabled_telemetry:
+ RequestsInstrumentor().instrument()
diff --git a/dev/build/migration-start.sh b/dev/build/migration-start.sh
index 901026e53b..578daf5cef 100644
--- a/dev/build/migration-start.sh
+++ b/dev/build/migration-start.sh
@@ -3,7 +3,11 @@
echo "Running Datatracker migrations..."
./ietf/manage.py migrate --settings=settings_local
-echo "Running Blobdb migrations ..."
-./ietf/manage.py migrate --settings=settings_local --database=blobdb
+# Check whether the blobdb database exists - inspectdb will return a false
+# status if not.
+if ./ietf/manage.py inspectdb --database blobdb > /dev/null 2>&1; then
+ echo "Running Blobdb migrations ..."
+ ./ietf/manage.py migrate --settings=settings_local --database=blobdb
+fi
echo "Done!"
diff --git a/dev/deploy-to-container/package-lock.json b/dev/deploy-to-container/package-lock.json
index 0954ec9af4..5d5bef5604 100644
--- a/dev/deploy-to-container/package-lock.json
+++ b/dev/deploy-to-container/package-lock.json
@@ -6,12 +6,12 @@
"": {
"name": "deploy-to-container",
"dependencies": {
- "dockerode": "^4.0.6",
- "fs-extra": "^11.3.0",
- "nanoid": "5.1.5",
+ "dockerode": "^4.0.10",
+ "fs-extra": "^11.3.4",
+ "nanoid": "5.1.7",
"nanoid-dictionary": "5.0.0",
- "slugify": "1.6.6",
- "tar": "^7.4.3",
+ "slugify": "1.6.9",
+ "tar": "^7.5.13",
"yargs": "^17.7.2"
},
"engines": {
@@ -52,95 +52,6 @@
"node": ">=6"
}
},
- "node_modules/@isaacs/cliui": {
- "version": "8.0.2",
- "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
- "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==",
- "dependencies": {
- "string-width": "^5.1.2",
- "string-width-cjs": "npm:string-width@^4.2.0",
- "strip-ansi": "^7.0.1",
- "strip-ansi-cjs": "npm:strip-ansi@^6.0.1",
- "wrap-ansi": "^8.1.0",
- "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0"
- },
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@isaacs/cliui/node_modules/ansi-regex": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
- "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==",
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/ansi-regex?sponsor=1"
- }
- },
- "node_modules/@isaacs/cliui/node_modules/ansi-styles": {
- "version": "6.2.1",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
- "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/ansi-styles?sponsor=1"
- }
- },
- "node_modules/@isaacs/cliui/node_modules/emoji-regex": {
- "version": "9.2.2",
- "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
- "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="
- },
- "node_modules/@isaacs/cliui/node_modules/string-width": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
- "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
- "dependencies": {
- "eastasianwidth": "^0.2.0",
- "emoji-regex": "^9.2.2",
- "strip-ansi": "^7.0.1"
- },
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
- }
- },
- "node_modules/@isaacs/cliui/node_modules/strip-ansi": {
- "version": "7.1.0",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
- "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
- "dependencies": {
- "ansi-regex": "^6.0.1"
- },
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/strip-ansi?sponsor=1"
- }
- },
- "node_modules/@isaacs/cliui/node_modules/wrap-ansi": {
- "version": "8.1.0",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
- "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
- "dependencies": {
- "ansi-styles": "^6.1.0",
- "string-width": "^5.0.1",
- "strip-ansi": "^7.0.1"
- },
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
- }
- },
"node_modules/@isaacs/fs-minipass": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz",
@@ -161,15 +72,6 @@
"url": "https://opencollective.com/js-sdsl"
}
},
- "node_modules/@pkgjs/parseargs": {
- "version": "0.11.0",
- "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
- "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==",
- "optional": true,
- "engines": {
- "node": ">=14"
- }
- },
"node_modules/@protobufjs/aspromise": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
@@ -258,16 +160,10 @@
"version": "0.2.6",
"resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz",
"integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==",
- "license": "MIT",
"dependencies": {
"safer-buffer": "~2.1.0"
}
},
- "node_modules/balanced-match": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
- "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
- },
"node_modules/base64-js": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
@@ -285,14 +181,12 @@
"type": "consulting",
"url": "https://feross.org/support"
}
- ],
- "license": "MIT"
+ ]
},
"node_modules/bcrypt-pbkdf": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz",
"integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==",
- "license": "BSD-3-Clause",
"dependencies": {
"tweetnacl": "^0.14.3"
}
@@ -301,21 +195,12 @@
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz",
"integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==",
- "license": "MIT",
"dependencies": {
"buffer": "^5.5.0",
"inherits": "^2.0.4",
"readable-stream": "^3.4.0"
}
},
- "node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
- "dependencies": {
- "balanced-match": "^1.0.0"
- }
- },
"node_modules/buffer": {
"version": "5.7.1",
"resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz",
@@ -334,16 +219,15 @@
"url": "https://feross.org/support"
}
],
- "license": "MIT",
"dependencies": {
"base64-js": "^1.3.1",
"ieee754": "^1.1.13"
}
},
"node_modules/buildcheck": {
- "version": "0.0.6",
- "resolved": "https://registry.npmjs.org/buildcheck/-/buildcheck-0.0.6.tgz",
- "integrity": "sha512-8f9ZJCUXyT1M35Jx7MkBgmBMo3oHTTBIPLiY9xyL0pl3T5RwcPEY8cUHr5LBNfu/fk6c2T4DJZuVM/8ZZT2D2A==",
+ "version": "0.0.7",
+ "resolved": "https://registry.npmjs.org/buildcheck/-/buildcheck-0.0.7.tgz",
+ "integrity": "sha512-lHblz4ahamxpTmnsk+MNTRWsjYKv965MwOrSJyeD588rR3Jcu7swE+0wN5F+PbL5cjgu/9ObkhfzEPuofEMwLA==",
"optional": true,
"engines": {
"node": ">=10.0.0"
@@ -352,8 +236,7 @@
"node_modules/chownr": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
- "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==",
- "license": "ISC"
+ "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg=="
},
"node_modules/cliui": {
"version": "8.0.1",
@@ -398,24 +281,10 @@
"node": ">=10.0.0"
}
},
- "node_modules/cross-spawn": {
- "version": "7.0.3",
- "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
- "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
- "dependencies": {
- "path-key": "^3.1.0",
- "shebang-command": "^2.0.0",
- "which": "^2.0.1"
- },
- "engines": {
- "node": ">= 8"
- }
- },
"node_modules/debug": {
- "version": "4.4.0",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz",
- "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==",
- "license": "MIT",
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+ "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
"dependencies": {
"ms": "^2.1.3"
},
@@ -429,10 +298,9 @@
}
},
"node_modules/docker-modem": {
- "version": "5.0.6",
- "resolved": "https://registry.npmjs.org/docker-modem/-/docker-modem-5.0.6.tgz",
- "integrity": "sha512-ens7BiayssQz/uAxGzH8zGXCtiV24rRWXdjNha5V4zSOcxmAZsfGVm/PPFbwQdqEkDnhG+SyR9E3zSHUbOKXBQ==",
- "license": "Apache-2.0",
+ "version": "5.0.7",
+ "resolved": "https://registry.npmjs.org/docker-modem/-/docker-modem-5.0.7.tgz",
+ "integrity": "sha512-XJgGhoR/CLpqshm4d3L7rzH6t8NgDFUIIpztYlLHIApeJjMZKYJMz2zxPsYxnejq5h3ELYSw/RBsi3t5h7gNTA==",
"dependencies": {
"debug": "^4.1.1",
"readable-stream": "^3.5.0",
@@ -444,38 +312,31 @@
}
},
"node_modules/dockerode": {
- "version": "4.0.6",
- "resolved": "https://registry.npmjs.org/dockerode/-/dockerode-4.0.6.tgz",
- "integrity": "sha512-FbVf3Z8fY/kALB9s+P9epCpWhfi/r0N2DgYYcYpsAUlaTxPjdsitsFobnltb+lyCgAIvf9C+4PSWlTnHlJMf1w==",
- "license": "Apache-2.0",
+ "version": "4.0.10",
+ "resolved": "https://registry.npmjs.org/dockerode/-/dockerode-4.0.10.tgz",
+ "integrity": "sha512-8L/P9JynLBiG7/coiA4FlQXegHltRqS0a+KqI44P1zgQh8QLHTg7FKOwhkBgSJwZTeHsq30WRoVFLuwkfK0YFg==",
"dependencies": {
"@balena/dockerignore": "^1.0.2",
"@grpc/grpc-js": "^1.11.1",
"@grpc/proto-loader": "^0.7.13",
- "docker-modem": "^5.0.6",
+ "docker-modem": "^5.0.7",
"protobufjs": "^7.3.2",
- "tar-fs": "~2.1.2",
+ "tar-fs": "^2.1.4",
"uuid": "^10.0.0"
},
"engines": {
"node": ">= 8.0"
}
},
- "node_modules/eastasianwidth": {
- "version": "0.2.0",
- "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
- "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="
- },
"node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
},
"node_modules/end-of-stream": {
- "version": "1.4.4",
- "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
- "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
- "license": "MIT",
+ "version": "1.4.5",
+ "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz",
+ "integrity": "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==",
"dependencies": {
"once": "^1.4.0"
}
@@ -488,32 +349,15 @@
"node": ">=6"
}
},
- "node_modules/foreground-child": {
- "version": "3.1.1",
- "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz",
- "integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==",
- "dependencies": {
- "cross-spawn": "^7.0.0",
- "signal-exit": "^4.0.1"
- },
- "engines": {
- "node": ">=14"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
"node_modules/fs-constants": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz",
- "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==",
- "license": "MIT"
+ "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow=="
},
"node_modules/fs-extra": {
- "version": "11.3.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.0.tgz",
- "integrity": "sha512-Z4XaCL6dUDHfP/jT25jJKMmtxvuwbkrD1vNSMFlo9lNLY2c5FHYSQgHPRZUjAB26TpDEoW9HCOgplrdbaPV/ew==",
- "license": "MIT",
+ "version": "11.3.4",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.4.tgz",
+ "integrity": "sha512-CTXd6rk/M3/ULNQj8FBqBWHYBVYybQ3VPBw0xGKFe3tuH7ytT6ACnvzpIQ3UZtB8yvUKC2cXn1a+x+5EVQLovA==",
"dependencies": {
"graceful-fs": "^4.2.0",
"jsonfile": "^6.0.1",
@@ -531,27 +375,6 @@
"node": "6.* || 8.* || >= 10.*"
}
},
- "node_modules/glob": {
- "version": "10.3.12",
- "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.12.tgz",
- "integrity": "sha512-TCNv8vJ+xz4QiqTpfOJA7HvYv+tNIRHKfUWw/q+v2jdgN4ebz+KY9tGx5J4rHP0o84mNP+ApH66HRX8us3Khqg==",
- "dependencies": {
- "foreground-child": "^3.1.0",
- "jackspeak": "^2.3.6",
- "minimatch": "^9.0.1",
- "minipass": "^7.0.4",
- "path-scurry": "^1.10.2"
- },
- "bin": {
- "glob": "dist/esm/bin.mjs"
- },
- "engines": {
- "node": ">=16 || 14 >=14.17"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
"node_modules/graceful-fs": {
"version": "4.2.10",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz",
@@ -574,8 +397,7 @@
"type": "consulting",
"url": "https://feross.org/support"
}
- ],
- "license": "BSD-3-Clause"
+ ]
},
"node_modules/inherits": {
"version": "2.0.4",
@@ -590,28 +412,6 @@
"node": ">=8"
}
},
- "node_modules/isexe": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
- "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="
- },
- "node_modules/jackspeak": {
- "version": "2.3.6",
- "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz",
- "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==",
- "dependencies": {
- "@isaacs/cliui": "^8.0.2"
- },
- "engines": {
- "node": ">=14"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- },
- "optionalDependencies": {
- "@pkgjs/parseargs": "^0.11.0"
- }
- },
"node_modules/jsonfile": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
@@ -633,28 +433,6 @@
"resolved": "https://registry.npmjs.org/long/-/long-5.2.4.tgz",
"integrity": "sha512-qtzLbJE8hq7VabR3mISmVGtoXP8KGc2Z/AT8OuqlYD7JTR3oqrgwdjnk07wpj1twXxYmgDXgoKVWUG/fReSzHg=="
},
- "node_modules/lru-cache": {
- "version": "10.2.2",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.2.tgz",
- "integrity": "sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ==",
- "engines": {
- "node": "14 || >=16.14"
- }
- },
- "node_modules/minimatch": {
- "version": "9.0.4",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.4.tgz",
- "integrity": "sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw==",
- "dependencies": {
- "brace-expansion": "^2.0.1"
- },
- "engines": {
- "node": ">=16 || 14 >=14.17"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
"node_modules/minipass": {
"version": "7.1.2",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
@@ -664,61 +442,42 @@
}
},
"node_modules/minizlib": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.1.tgz",
- "integrity": "sha512-umcy022ILvb5/3Djuu8LWeqUa8D68JaBzlttKeMWen48SjabqS3iY5w/vzeMzMUNhLDifyhbOwKDSznB1vvrwg==",
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.1.0.tgz",
+ "integrity": "sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==",
"dependencies": {
- "minipass": "^7.0.4",
- "rimraf": "^5.0.5"
+ "minipass": "^7.1.2"
},
"engines": {
"node": ">= 18"
}
},
- "node_modules/mkdirp": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz",
- "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==",
- "bin": {
- "mkdirp": "dist/cjs/src/bin.js"
- },
- "engines": {
- "node": ">=10"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
"node_modules/mkdirp-classic": {
"version": "0.5.3",
"resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz",
- "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==",
- "license": "MIT"
+ "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A=="
},
"node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
- "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
- "license": "MIT"
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
},
"node_modules/nan": {
- "version": "2.22.0",
- "resolved": "https://registry.npmjs.org/nan/-/nan-2.22.0.tgz",
- "integrity": "sha512-nbajikzWTMwsW+eSsNm3QwlOs7het9gGJU5dDZzRTQGk03vyBOauxgI4VakDzE0PtsGTmXPsXTbbjVhRwR5mpw==",
- "license": "MIT",
+ "version": "2.26.2",
+ "resolved": "https://registry.npmjs.org/nan/-/nan-2.26.2.tgz",
+ "integrity": "sha512-0tTvBTYkt3tdGw22nrAy50x7gpbGCCFH3AFcyS5WiUu7Eu4vWlri1woE6qHBSfy11vksDqkiwjOnlR7WV8G1Hw==",
"optional": true
},
"node_modules/nanoid": {
- "version": "5.1.5",
- "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.1.5.tgz",
- "integrity": "sha512-Ir/+ZpE9fDsNH0hQ3C68uyThDXzYcim2EqcZ8zn8Chtt1iylPT9xXJB0kPCnqzgcEGikO9RxSrh63MsmVCU7Fw==",
+ "version": "5.1.7",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.1.7.tgz",
+ "integrity": "sha512-ua3NDgISf6jdwezAheMOk4mbE1LXjm1DfMUDMuJf4AqxLFK3ccGpgWizwa5YV7Yz9EpXwEaWoRXSb/BnV0t5dQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
- "license": "MIT",
"bin": {
"nanoid": "bin/nanoid.js"
},
@@ -736,34 +495,10 @@
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
- "license": "ISC",
"dependencies": {
"wrappy": "1"
}
},
- "node_modules/path-key": {
- "version": "3.1.1",
- "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
- "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/path-scurry": {
- "version": "1.10.2",
- "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.10.2.tgz",
- "integrity": "sha512-7xTavNy5RQXnsjANvVvMkEjvloOinkAjv/Z6Ildz9v2RinZ4SBKTWFOVRbaF8p0vpHnyjV/UwNDdKuUv6M5qcA==",
- "dependencies": {
- "lru-cache": "^10.2.0",
- "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0"
- },
- "engines": {
- "node": ">=16 || 14 >=14.17"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
"node_modules/protobufjs": {
"version": "7.4.0",
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.4.0.tgz",
@@ -788,10 +523,9 @@
}
},
"node_modules/pump": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.2.tgz",
- "integrity": "sha512-tUPXtzlGM8FE3P0ZL6DVs/3P58k9nk8/jZeQCurTJylQA8qFYzHFfhBJkuqyE0FifOsQ0uKWekiZ5g8wtr28cw==",
- "license": "MIT",
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.4.tgz",
+ "integrity": "sha512-VS7sjc6KR7e1ukRFhQSY5LM2uBWAUPiOPa/A3mkKmiMwSmRFUITt0xuj+/lesgnCv+dPIEYlkzrcyXgquIHMcA==",
"dependencies": {
"end-of-stream": "^1.1.0",
"once": "^1.3.1"
@@ -818,23 +552,6 @@
"node": ">=0.10.0"
}
},
- "node_modules/rimraf": {
- "version": "5.0.5",
- "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-5.0.5.tgz",
- "integrity": "sha512-CqDakW+hMe/Bz202FPEymy68P+G50RfMQK+Qo5YUqc9SPipvbGjCGKd0RSKEelbsfQuw3g5NZDSrlZZAJurH1A==",
- "dependencies": {
- "glob": "^10.3.7"
- },
- "bin": {
- "rimraf": "dist/esm/bin.mjs"
- },
- "engines": {
- "node": ">=14"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
@@ -857,43 +574,12 @@
"node_modules/safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
- "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
- "license": "MIT"
- },
- "node_modules/shebang-command": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
- "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
- "dependencies": {
- "shebang-regex": "^3.0.0"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/shebang-regex": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
- "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/signal-exit": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
- "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
- "engines": {
- "node": ">=14"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
},
"node_modules/slugify": {
- "version": "1.6.6",
- "resolved": "https://registry.npmjs.org/slugify/-/slugify-1.6.6.tgz",
- "integrity": "sha512-h+z7HKHYXj6wJU+AnS/+IH8Uh9fdcX1Lrhg1/VMdf9PwoBQXFcXiAdsy2tSK0P6gKwJLXp02r90ahUCqHk9rrw==",
+ "version": "1.6.9",
+ "resolved": "https://registry.npmjs.org/slugify/-/slugify-1.6.9.tgz",
+ "integrity": "sha512-vZ7rfeehZui7wQs438JXBckYLkIIdfHOXsaVEUMyS5fHo1483l1bMdo0EDSWYclY0yZKFOipDy4KHuKs6ssvdg==",
"engines": {
"node": ">=8.0.0"
}
@@ -901,13 +587,12 @@
"node_modules/split-ca": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/split-ca/-/split-ca-1.0.1.tgz",
- "integrity": "sha512-Q5thBSxp5t8WPTTJQS59LrGqOZqOsrhDGDVm8azCqIBjSBd7nd9o2PM+mDulQQkh8h//4U6hFZnc/mul8t5pWQ==",
- "license": "ISC"
+ "integrity": "sha512-Q5thBSxp5t8WPTTJQS59LrGqOZqOsrhDGDVm8azCqIBjSBd7nd9o2PM+mDulQQkh8h//4U6hFZnc/mul8t5pWQ=="
},
"node_modules/ssh2": {
- "version": "1.16.0",
- "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.16.0.tgz",
- "integrity": "sha512-r1X4KsBGedJqo7h8F5c4Ybpcr5RjyP+aWIG007uBPRjmdQWfEiVLzSK71Zji1B9sKxwaCvD8y8cwSkYrlLiRRg==",
+ "version": "1.17.0",
+ "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.17.0.tgz",
+ "integrity": "sha512-wPldCk3asibAjQ/kziWQQt1Wh3PgDFpC0XpwclzKcdT1vql6KeYxf5LIt4nlFkUeR8WuphYMKqUA56X4rjbfgQ==",
"hasInstallScript": true,
"dependencies": {
"asn1": "^0.2.6",
@@ -918,7 +603,7 @@
},
"optionalDependencies": {
"cpu-features": "~0.0.10",
- "nan": "^2.20.0"
+ "nan": "^2.23.0"
}
},
"node_modules/string_decoder": {
@@ -942,20 +627,6 @@
"node": ">=8"
}
},
- "node_modules/string-width-cjs": {
- "name": "string-width",
- "version": "4.2.3",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
- "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
- "dependencies": {
- "emoji-regex": "^8.0.0",
- "is-fullwidth-code-point": "^3.0.0",
- "strip-ansi": "^6.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/strip-ansi": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
@@ -967,28 +638,15 @@
"node": ">=8"
}
},
- "node_modules/strip-ansi-cjs": {
- "name": "strip-ansi",
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
- "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
- "dependencies": {
- "ansi-regex": "^5.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/tar": {
- "version": "7.4.3",
- "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz",
- "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==",
+ "version": "7.5.13",
+ "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.13.tgz",
+ "integrity": "sha512-tOG/7GyXpFevhXVh8jOPJrmtRpOTsYqUIkVdVooZYJS/z8WhfQUX8RJILmeuJNinGAMSu1veBr4asSHFt5/hng==",
"dependencies": {
"@isaacs/fs-minipass": "^4.0.0",
"chownr": "^3.0.0",
"minipass": "^7.1.2",
- "minizlib": "^3.0.1",
- "mkdirp": "^3.0.1",
+ "minizlib": "^3.1.0",
"yallist": "^5.0.0"
},
"engines": {
@@ -996,10 +654,9 @@
}
},
"node_modules/tar-fs": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.2.tgz",
- "integrity": "sha512-EsaAXwxmx8UB7FRKqeozqEPop69DXcmYwTQwXvyAPF352HJsPdkVhvTaDPYqfNgruveJIJy3TA2l+2zj8LJIJA==",
- "license": "MIT",
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.4.tgz",
+ "integrity": "sha512-mDAjwmZdh7LTT6pNleZ05Yt65HC3E+NiQzl672vQG38jIrehtJk/J3mNwIg+vShQPcLF/LV7CMnDW6vjj6sfYQ==",
"dependencies": {
"chownr": "^1.1.1",
"mkdirp-classic": "^0.5.2",
@@ -1011,7 +668,6 @@
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz",
"integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==",
- "license": "MIT",
"dependencies": {
"bl": "^4.0.3",
"end-of-stream": "^1.4.1",
@@ -1034,8 +690,7 @@
"node_modules/tweetnacl": {
"version": "0.14.5",
"resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz",
- "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==",
- "license": "Unlicense"
+ "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA=="
},
"node_modules/undici-types": {
"version": "6.20.0",
@@ -1067,20 +722,6 @@
"uuid": "dist/bin/uuid"
}
},
- "node_modules/which": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
- "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
- "dependencies": {
- "isexe": "^2.0.0"
- },
- "bin": {
- "node-which": "bin/node-which"
- },
- "engines": {
- "node": ">= 8"
- }
- },
"node_modules/wrap-ansi": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
@@ -1097,28 +738,10 @@
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
- "node_modules/wrap-ansi-cjs": {
- "name": "wrap-ansi",
- "version": "7.0.0",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
- "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
- "dependencies": {
- "ansi-styles": "^4.0.0",
- "string-width": "^4.1.0",
- "strip-ansi": "^6.0.0"
- },
- "engines": {
- "node": ">=10"
- },
- "funding": {
- "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
- }
- },
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
- "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
- "license": "ISC"
+ "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
},
"node_modules/y18n": {
"version": "5.0.8",
@@ -1188,64 +811,6 @@
"yargs": "^17.7.2"
}
},
- "@isaacs/cliui": {
- "version": "8.0.2",
- "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
- "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==",
- "requires": {
- "string-width": "^5.1.2",
- "string-width-cjs": "npm:string-width@^4.2.0",
- "strip-ansi": "^7.0.1",
- "strip-ansi-cjs": "npm:strip-ansi@^6.0.1",
- "wrap-ansi": "^8.1.0",
- "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0"
- },
- "dependencies": {
- "ansi-regex": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
- "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA=="
- },
- "ansi-styles": {
- "version": "6.2.1",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
- "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug=="
- },
- "emoji-regex": {
- "version": "9.2.2",
- "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
- "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="
- },
- "string-width": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
- "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
- "requires": {
- "eastasianwidth": "^0.2.0",
- "emoji-regex": "^9.2.2",
- "strip-ansi": "^7.0.1"
- }
- },
- "strip-ansi": {
- "version": "7.1.0",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
- "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
- "requires": {
- "ansi-regex": "^6.0.1"
- }
- },
- "wrap-ansi": {
- "version": "8.1.0",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
- "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
- "requires": {
- "ansi-styles": "^6.1.0",
- "string-width": "^5.0.1",
- "strip-ansi": "^7.0.1"
- }
- }
- }
- },
"@isaacs/fs-minipass": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz",
@@ -1259,12 +824,6 @@
"resolved": "https://registry.npmjs.org/@js-sdsl/ordered-map/-/ordered-map-4.4.2.tgz",
"integrity": "sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw=="
},
- "@pkgjs/parseargs": {
- "version": "0.11.0",
- "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
- "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==",
- "optional": true
- },
"@protobufjs/aspromise": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
@@ -1348,11 +907,6 @@
"safer-buffer": "~2.1.0"
}
},
- "balanced-match": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
- "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
- },
"base64-js": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
@@ -1376,14 +930,6 @@
"readable-stream": "^3.4.0"
}
},
- "brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
- "requires": {
- "balanced-match": "^1.0.0"
- }
- },
"buffer": {
"version": "5.7.1",
"resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz",
@@ -1394,9 +940,9 @@
}
},
"buildcheck": {
- "version": "0.0.6",
- "resolved": "https://registry.npmjs.org/buildcheck/-/buildcheck-0.0.6.tgz",
- "integrity": "sha512-8f9ZJCUXyT1M35Jx7MkBgmBMo3oHTTBIPLiY9xyL0pl3T5RwcPEY8cUHr5LBNfu/fk6c2T4DJZuVM/8ZZT2D2A==",
+ "version": "0.0.7",
+ "resolved": "https://registry.npmjs.org/buildcheck/-/buildcheck-0.0.7.tgz",
+ "integrity": "sha512-lHblz4ahamxpTmnsk+MNTRWsjYKv965MwOrSJyeD588rR3Jcu7swE+0wN5F+PbL5cjgu/9ObkhfzEPuofEMwLA==",
"optional": true
},
"chownr": {
@@ -1437,28 +983,18 @@
"nan": "^2.19.0"
}
},
- "cross-spawn": {
- "version": "7.0.3",
- "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
- "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
- "requires": {
- "path-key": "^3.1.0",
- "shebang-command": "^2.0.0",
- "which": "^2.0.1"
- }
- },
"debug": {
- "version": "4.4.0",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz",
- "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==",
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+ "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
"requires": {
"ms": "^2.1.3"
}
},
"docker-modem": {
- "version": "5.0.6",
- "resolved": "https://registry.npmjs.org/docker-modem/-/docker-modem-5.0.6.tgz",
- "integrity": "sha512-ens7BiayssQz/uAxGzH8zGXCtiV24rRWXdjNha5V4zSOcxmAZsfGVm/PPFbwQdqEkDnhG+SyR9E3zSHUbOKXBQ==",
+ "version": "5.0.7",
+ "resolved": "https://registry.npmjs.org/docker-modem/-/docker-modem-5.0.7.tgz",
+ "integrity": "sha512-XJgGhoR/CLpqshm4d3L7rzH6t8NgDFUIIpztYlLHIApeJjMZKYJMz2zxPsYxnejq5h3ELYSw/RBsi3t5h7gNTA==",
"requires": {
"debug": "^4.1.1",
"readable-stream": "^3.5.0",
@@ -1467,33 +1003,28 @@
}
},
"dockerode": {
- "version": "4.0.6",
- "resolved": "https://registry.npmjs.org/dockerode/-/dockerode-4.0.6.tgz",
- "integrity": "sha512-FbVf3Z8fY/kALB9s+P9epCpWhfi/r0N2DgYYcYpsAUlaTxPjdsitsFobnltb+lyCgAIvf9C+4PSWlTnHlJMf1w==",
+ "version": "4.0.10",
+ "resolved": "https://registry.npmjs.org/dockerode/-/dockerode-4.0.10.tgz",
+ "integrity": "sha512-8L/P9JynLBiG7/coiA4FlQXegHltRqS0a+KqI44P1zgQh8QLHTg7FKOwhkBgSJwZTeHsq30WRoVFLuwkfK0YFg==",
"requires": {
"@balena/dockerignore": "^1.0.2",
"@grpc/grpc-js": "^1.11.1",
"@grpc/proto-loader": "^0.7.13",
- "docker-modem": "^5.0.6",
+ "docker-modem": "^5.0.7",
"protobufjs": "^7.3.2",
- "tar-fs": "~2.1.2",
+ "tar-fs": "^2.1.4",
"uuid": "^10.0.0"
}
},
- "eastasianwidth": {
- "version": "0.2.0",
- "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
- "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="
- },
"emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
},
"end-of-stream": {
- "version": "1.4.4",
- "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
- "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
+ "version": "1.4.5",
+ "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz",
+ "integrity": "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==",
"requires": {
"once": "^1.4.0"
}
@@ -1503,24 +1034,15 @@
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
"integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw=="
},
- "foreground-child": {
- "version": "3.1.1",
- "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz",
- "integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==",
- "requires": {
- "cross-spawn": "^7.0.0",
- "signal-exit": "^4.0.1"
- }
- },
"fs-constants": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz",
"integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow=="
},
"fs-extra": {
- "version": "11.3.0",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.0.tgz",
- "integrity": "sha512-Z4XaCL6dUDHfP/jT25jJKMmtxvuwbkrD1vNSMFlo9lNLY2c5FHYSQgHPRZUjAB26TpDEoW9HCOgplrdbaPV/ew==",
+ "version": "11.3.4",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.4.tgz",
+ "integrity": "sha512-CTXd6rk/M3/ULNQj8FBqBWHYBVYybQ3VPBw0xGKFe3tuH7ytT6ACnvzpIQ3UZtB8yvUKC2cXn1a+x+5EVQLovA==",
"requires": {
"graceful-fs": "^4.2.0",
"jsonfile": "^6.0.1",
@@ -1532,18 +1054,6 @@
"resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
"integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg=="
},
- "glob": {
- "version": "10.3.12",
- "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.12.tgz",
- "integrity": "sha512-TCNv8vJ+xz4QiqTpfOJA7HvYv+tNIRHKfUWw/q+v2jdgN4ebz+KY9tGx5J4rHP0o84mNP+ApH66HRX8us3Khqg==",
- "requires": {
- "foreground-child": "^3.1.0",
- "jackspeak": "^2.3.6",
- "minimatch": "^9.0.1",
- "minipass": "^7.0.4",
- "path-scurry": "^1.10.2"
- }
- },
"graceful-fs": {
"version": "4.2.10",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz",
@@ -1564,20 +1074,6 @@
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="
},
- "isexe": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
- "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="
- },
- "jackspeak": {
- "version": "2.3.6",
- "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz",
- "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==",
- "requires": {
- "@isaacs/cliui": "^8.0.2",
- "@pkgjs/parseargs": "^0.11.0"
- }
- },
"jsonfile": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
@@ -1597,38 +1093,19 @@
"resolved": "https://registry.npmjs.org/long/-/long-5.2.4.tgz",
"integrity": "sha512-qtzLbJE8hq7VabR3mISmVGtoXP8KGc2Z/AT8OuqlYD7JTR3oqrgwdjnk07wpj1twXxYmgDXgoKVWUG/fReSzHg=="
},
- "lru-cache": {
- "version": "10.2.2",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.2.tgz",
- "integrity": "sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ=="
- },
- "minimatch": {
- "version": "9.0.4",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.4.tgz",
- "integrity": "sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw==",
- "requires": {
- "brace-expansion": "^2.0.1"
- }
- },
"minipass": {
"version": "7.1.2",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
"integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw=="
},
"minizlib": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.1.tgz",
- "integrity": "sha512-umcy022ILvb5/3Djuu8LWeqUa8D68JaBzlttKeMWen48SjabqS3iY5w/vzeMzMUNhLDifyhbOwKDSznB1vvrwg==",
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.1.0.tgz",
+ "integrity": "sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==",
"requires": {
- "minipass": "^7.0.4",
- "rimraf": "^5.0.5"
+ "minipass": "^7.1.2"
}
},
- "mkdirp": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz",
- "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg=="
- },
"mkdirp-classic": {
"version": "0.5.3",
"resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz",
@@ -1640,15 +1117,15 @@
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
},
"nan": {
- "version": "2.22.0",
- "resolved": "https://registry.npmjs.org/nan/-/nan-2.22.0.tgz",
- "integrity": "sha512-nbajikzWTMwsW+eSsNm3QwlOs7het9gGJU5dDZzRTQGk03vyBOauxgI4VakDzE0PtsGTmXPsXTbbjVhRwR5mpw==",
+ "version": "2.26.2",
+ "resolved": "https://registry.npmjs.org/nan/-/nan-2.26.2.tgz",
+ "integrity": "sha512-0tTvBTYkt3tdGw22nrAy50x7gpbGCCFH3AFcyS5WiUu7Eu4vWlri1woE6qHBSfy11vksDqkiwjOnlR7WV8G1Hw==",
"optional": true
},
"nanoid": {
- "version": "5.1.5",
- "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.1.5.tgz",
- "integrity": "sha512-Ir/+ZpE9fDsNH0hQ3C68uyThDXzYcim2EqcZ8zn8Chtt1iylPT9xXJB0kPCnqzgcEGikO9RxSrh63MsmVCU7Fw=="
+ "version": "5.1.7",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.1.7.tgz",
+ "integrity": "sha512-ua3NDgISf6jdwezAheMOk4mbE1LXjm1DfMUDMuJf4AqxLFK3ccGpgWizwa5YV7Yz9EpXwEaWoRXSb/BnV0t5dQ=="
},
"nanoid-dictionary": {
"version": "5.0.0",
@@ -1663,20 +1140,6 @@
"wrappy": "1"
}
},
- "path-key": {
- "version": "3.1.1",
- "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
- "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="
- },
- "path-scurry": {
- "version": "1.10.2",
- "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.10.2.tgz",
- "integrity": "sha512-7xTavNy5RQXnsjANvVvMkEjvloOinkAjv/Z6Ildz9v2RinZ4SBKTWFOVRbaF8p0vpHnyjV/UwNDdKuUv6M5qcA==",
- "requires": {
- "lru-cache": "^10.2.0",
- "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0"
- }
- },
"protobufjs": {
"version": "7.4.0",
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.4.0.tgz",
@@ -1697,9 +1160,9 @@
}
},
"pump": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.2.tgz",
- "integrity": "sha512-tUPXtzlGM8FE3P0ZL6DVs/3P58k9nk8/jZeQCurTJylQA8qFYzHFfhBJkuqyE0FifOsQ0uKWekiZ5g8wtr28cw==",
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.4.tgz",
+ "integrity": "sha512-VS7sjc6KR7e1ukRFhQSY5LM2uBWAUPiOPa/A3mkKmiMwSmRFUITt0xuj+/lesgnCv+dPIEYlkzrcyXgquIHMcA==",
"requires": {
"end-of-stream": "^1.1.0",
"once": "^1.3.1"
@@ -1720,14 +1183,6 @@
"resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
"integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q=="
},
- "rimraf": {
- "version": "5.0.5",
- "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-5.0.5.tgz",
- "integrity": "sha512-CqDakW+hMe/Bz202FPEymy68P+G50RfMQK+Qo5YUqc9SPipvbGjCGKd0RSKEelbsfQuw3g5NZDSrlZZAJurH1A==",
- "requires": {
- "glob": "^10.3.7"
- }
- },
"safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
@@ -1738,28 +1193,10 @@
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
},
- "shebang-command": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
- "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
- "requires": {
- "shebang-regex": "^3.0.0"
- }
- },
- "shebang-regex": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
- "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="
- },
- "signal-exit": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
- "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="
- },
"slugify": {
- "version": "1.6.6",
- "resolved": "https://registry.npmjs.org/slugify/-/slugify-1.6.6.tgz",
- "integrity": "sha512-h+z7HKHYXj6wJU+AnS/+IH8Uh9fdcX1Lrhg1/VMdf9PwoBQXFcXiAdsy2tSK0P6gKwJLXp02r90ahUCqHk9rrw=="
+ "version": "1.6.9",
+ "resolved": "https://registry.npmjs.org/slugify/-/slugify-1.6.9.tgz",
+ "integrity": "sha512-vZ7rfeehZui7wQs438JXBckYLkIIdfHOXsaVEUMyS5fHo1483l1bMdo0EDSWYclY0yZKFOipDy4KHuKs6ssvdg=="
},
"split-ca": {
"version": "1.0.1",
@@ -1767,14 +1204,14 @@
"integrity": "sha512-Q5thBSxp5t8WPTTJQS59LrGqOZqOsrhDGDVm8azCqIBjSBd7nd9o2PM+mDulQQkh8h//4U6hFZnc/mul8t5pWQ=="
},
"ssh2": {
- "version": "1.16.0",
- "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.16.0.tgz",
- "integrity": "sha512-r1X4KsBGedJqo7h8F5c4Ybpcr5RjyP+aWIG007uBPRjmdQWfEiVLzSK71Zji1B9sKxwaCvD8y8cwSkYrlLiRRg==",
+ "version": "1.17.0",
+ "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.17.0.tgz",
+ "integrity": "sha512-wPldCk3asibAjQ/kziWQQt1Wh3PgDFpC0XpwclzKcdT1vql6KeYxf5LIt4nlFkUeR8WuphYMKqUA56X4rjbfgQ==",
"requires": {
"asn1": "^0.2.6",
"bcrypt-pbkdf": "^1.0.2",
"cpu-features": "~0.0.10",
- "nan": "^2.20.0"
+ "nan": "^2.23.0"
}
},
"string_decoder": {
@@ -1795,16 +1232,6 @@
"strip-ansi": "^6.0.1"
}
},
- "string-width-cjs": {
- "version": "npm:string-width@4.2.3",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
- "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
- "requires": {
- "emoji-regex": "^8.0.0",
- "is-fullwidth-code-point": "^3.0.0",
- "strip-ansi": "^6.0.1"
- }
- },
"strip-ansi": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
@@ -1813,24 +1240,15 @@
"ansi-regex": "^5.0.1"
}
},
- "strip-ansi-cjs": {
- "version": "npm:strip-ansi@6.0.1",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
- "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
- "requires": {
- "ansi-regex": "^5.0.1"
- }
- },
"tar": {
- "version": "7.4.3",
- "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz",
- "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==",
+ "version": "7.5.13",
+ "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.13.tgz",
+ "integrity": "sha512-tOG/7GyXpFevhXVh8jOPJrmtRpOTsYqUIkVdVooZYJS/z8WhfQUX8RJILmeuJNinGAMSu1veBr4asSHFt5/hng==",
"requires": {
"@isaacs/fs-minipass": "^4.0.0",
"chownr": "^3.0.0",
"minipass": "^7.1.2",
- "minizlib": "^3.0.1",
- "mkdirp": "^3.0.1",
+ "minizlib": "^3.1.0",
"yallist": "^5.0.0"
},
"dependencies": {
@@ -1842,9 +1260,9 @@
}
},
"tar-fs": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.2.tgz",
- "integrity": "sha512-EsaAXwxmx8UB7FRKqeozqEPop69DXcmYwTQwXvyAPF352HJsPdkVhvTaDPYqfNgruveJIJy3TA2l+2zj8LJIJA==",
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.4.tgz",
+ "integrity": "sha512-mDAjwmZdh7LTT6pNleZ05Yt65HC3E+NiQzl672vQG38jIrehtJk/J3mNwIg+vShQPcLF/LV7CMnDW6vjj6sfYQ==",
"requires": {
"chownr": "^1.1.1",
"mkdirp-classic": "^0.5.2",
@@ -1889,14 +1307,6 @@
"resolved": "https://registry.npmjs.org/uuid/-/uuid-10.0.0.tgz",
"integrity": "sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ=="
},
- "which": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
- "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
- "requires": {
- "isexe": "^2.0.0"
- }
- },
"wrap-ansi": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
@@ -1907,16 +1317,6 @@
"strip-ansi": "^6.0.0"
}
},
- "wrap-ansi-cjs": {
- "version": "npm:wrap-ansi@7.0.0",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
- "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
- "requires": {
- "ansi-styles": "^4.0.0",
- "string-width": "^4.1.0",
- "strip-ansi": "^6.0.0"
- }
- },
"wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
diff --git a/dev/deploy-to-container/package.json b/dev/deploy-to-container/package.json
index 09716c3094..ccc78fc63b 100644
--- a/dev/deploy-to-container/package.json
+++ b/dev/deploy-to-container/package.json
@@ -2,12 +2,12 @@
"name": "deploy-to-container",
"type": "module",
"dependencies": {
- "dockerode": "^4.0.6",
- "fs-extra": "^11.3.0",
- "nanoid": "5.1.5",
+ "dockerode": "^4.0.10",
+ "fs-extra": "^11.3.4",
+ "nanoid": "5.1.7",
"nanoid-dictionary": "5.0.0",
- "slugify": "1.6.6",
- "tar": "^7.4.3",
+ "slugify": "1.6.9",
+ "tar": "^7.5.13",
"yargs": "^17.7.2"
},
"engines": {
diff --git a/dev/deploy-to-container/settings_local.py b/dev/deploy-to-container/settings_local.py
index aacf000093..055b48d0f5 100644
--- a/dev/deploy-to-container/settings_local.py
+++ b/dev/deploy-to-container/settings_local.py
@@ -71,11 +71,11 @@
DE_GFM_BINARY = '/usr/local/bin/de-gfm'
-# No real secrets here, these are public testing values _only_
APP_API_TOKENS = {
- "ietf.api.views.ingest_email_test": ["ingestion-test-token"]
+ "ietf.api.red_api" : ["devtoken", "redtoken"], # Not a real secret
+ "ietf.api.views.ingest_email_test": ["ingestion-test-token"], # Not a real secret
+ "ietf.api.views_rpc" : ["devtoken"], # Not a real secret
}
-
# OIDC configuration
SITE_URL = 'https://__HOSTNAME__'
diff --git a/docker-compose.yml b/docker-compose.yml
index 2440faf121..073d04b896 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -13,9 +13,10 @@ services:
# network_mode: service:db
depends_on:
+ - blobdb
+ - blobstore
- db
- mq
- - blobstore
ipc: host
@@ -79,7 +80,10 @@ services:
command:
- '--loglevel=INFO'
depends_on:
+ - blobdb
+ - blobstore
- db
+ - mq
restart: unless-stopped
stop_grace_period: 1m
volumes:
@@ -102,7 +106,10 @@ services:
- '--concurrency=1'
depends_on:
+ - blobdb
+ - blobstore
- db
+ - mq
restart: unless-stopped
stop_grace_period: 1m
volumes:
@@ -125,11 +132,23 @@ services:
volumes:
- blobdb-data:/var/lib/postgresql/data
+# typesense:
+# image: typesense/typesense:30.1
+# restart: on-failure
+# ports:
+# - "8108:8108"
+# volumes:
+# - ./typesense-data:/data
+# command:
+# - '--data-dir=/data'
+# - '--api-key=typesense-api-key'
+# - '--enable-cors'
+
# Celery Beat is a periodic task runner. It is not normally needed for development,
# but can be enabled by uncommenting the following.
#
# beat:
-# image: ghcr.io/ietf-tools/datatracker-celery:latest
+# image: "${COMPOSE_PROJECT_NAME}-celery"
# init: true
# environment:
# CELERY_APP: ietf
diff --git a/docker/app.Dockerfile b/docker/app.Dockerfile
index fee3833733..dd4cf72ffd 100644
--- a/docker/app.Dockerfile
+++ b/docker/app.Dockerfile
@@ -10,12 +10,7 @@ ARG USER_GID=$USER_UID
COPY docker/scripts/app-setup-debian.sh /tmp/library-scripts/docker-setup-debian.sh
RUN sed -i 's/\r$//' /tmp/library-scripts/docker-setup-debian.sh && chmod +x /tmp/library-scripts/docker-setup-debian.sh
-# Add Postgresql Apt Repository to get 14
-RUN echo "deb http://apt.postgresql.org/pub/repos/apt $(. /etc/os-release && echo "$VERSION_CODENAME")-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list
-RUN wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
-
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
- && apt-get install -y --no-install-recommends postgresql-client-14 pgloader \
# Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131
&& apt-get purge -y imagemagick imagemagick-6-common \
# Install common packages, non-root user
diff --git a/docker/base.Dockerfile b/docker/base.Dockerfile
index c1fe5b093e..2501636049 100644
--- a/docker/base.Dockerfile
+++ b/docker/base.Dockerfile
@@ -11,21 +11,22 @@ RUN apt-get update \
# Add Node.js Source
RUN apt-get install -y --no-install-recommends ca-certificates curl gnupg \
- && mkdir -p /etc/apt/keyrings\
- && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg
-RUN echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list
-RUN echo "Package: nodejs" >> /etc/apt/preferences.d/preferences && \
- echo "Pin: origin deb.nodesource.com" >> /etc/apt/preferences.d/preferences && \
- echo "Pin-Priority: 1001" >> /etc/apt/preferences.d/preferences
+ && mkdir -p /etc/apt/keyrings \
+ && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \
+ && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list
+RUN echo "Package: nodejs" >> /etc/apt/preferences.d/preferences \
+ && echo "Pin: origin deb.nodesource.com" >> /etc/apt/preferences.d/preferences \
+ && echo "Pin-Priority: 1001" >> /etc/apt/preferences.d/preferences
# Add Docker Source
-RUN curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
-RUN echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian \
- $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
-
-# Add PostgreSQL Source
-RUN echo "deb http://apt.postgresql.org/pub/repos/apt $(. /etc/os-release && echo "$VERSION_CODENAME")-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list
-RUN wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
+RUN mkdir -p /etc/apt/keyrings \
+ && curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker-archive-keyring.gpg \
+ && echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | tee /etc/apt/sources.list.d/docker.list
+
+# Add PostgreSQL Source
+RUN mkdir -p /etc/apt/keyrings \
+ && curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /etc/apt/keyrings/apt.postgresql.org.gpg \
+ && echo "deb [signed-by=/etc/apt/keyrings/apt.postgresql.org.gpg] https://apt.postgresql.org/pub/repos/apt $(. /etc/os-release && echo "$VERSION_CODENAME")-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list
# Install the packages we need
RUN apt-get update --fix-missing && apt-get install -qy --no-install-recommends \
diff --git a/docker/celery.Dockerfile b/docker/celery.Dockerfile
index e7c7b9cc3f..e93ca3cf77 100644
--- a/docker/celery.Dockerfile
+++ b/docker/celery.Dockerfile
@@ -10,12 +10,7 @@ ARG USER_GID=$USER_UID
COPY docker/scripts/app-setup-debian.sh /tmp/library-scripts/docker-setup-debian.sh
RUN sed -i 's/\r$//' /tmp/library-scripts/docker-setup-debian.sh && chmod +x /tmp/library-scripts/docker-setup-debian.sh
-# Add Postgresql Apt Repository to get 14
-RUN echo "deb http://apt.postgresql.org/pub/repos/apt $(. /etc/os-release && echo "$VERSION_CODENAME")-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list
-RUN wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
-
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
- && apt-get install -y --no-install-recommends postgresql-client-14 pgloader \
# Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131
&& apt-get purge -y imagemagick imagemagick-6-common \
# Install common packages, non-root user
diff --git a/docker/configs/nginx-proxy.conf b/docker/configs/nginx-proxy.conf
index 3068cc71d7..5a9ae31ad0 100644
--- a/docker/configs/nginx-proxy.conf
+++ b/docker/configs/nginx-proxy.conf
@@ -4,6 +4,7 @@ server {
proxy_read_timeout 1d;
proxy_send_timeout 1d;
+ client_max_body_size 0; # disable checking
root /var/www/html;
index index.html index.htm index.nginx-debian.html;
diff --git a/docker/configs/settings_local.py b/docker/configs/settings_local.py
index 3ee7a4295d..94adc516a4 100644
--- a/docker/configs/settings_local.py
+++ b/docker/configs/settings_local.py
@@ -100,3 +100,23 @@
bucket_name=f"{storagename}",
),
}
+
+# For dev on rfc-index generation, create a red_bucket/ directory in the project root
+# and uncomment these settings. Generated files will appear in this directory. To
+# generate an accurate index, put up-to-date copies of unusable-rfc-numbers.json,
+# april-first-rfc-numbers.json, and publication-std-levels.json in this directory
+# before generating the index.
+#
+# STORAGES["red_bucket"] = {
+# "BACKEND": "django.core.files.storage.FileSystemStorage",
+# "OPTIONS": {"location": "red_bucket"},
+# }
+
+APP_API_TOKENS = {
+ "ietf.api.red_api" : ["devtoken", "redtoken"], # Not a real secret
+ "ietf.api.views_rpc" : ["devtoken"], # Not a real secret
+}
+
+# Errata system api configuration
+ERRATA_METADATA_NOTIFICATION_URL = "http://host.docker.internal:8808/api/rfc_metadata_update/"
+ERRATA_METADATA_NOTIFICATION_API_KEY = "not a real secret"
diff --git a/docker/docker-compose.extend.yml b/docker/docker-compose.extend.yml
index a69a453110..12ebe447d5 100644
--- a/docker/docker-compose.extend.yml
+++ b/docker/docker-compose.extend.yml
@@ -18,8 +18,8 @@ services:
- '5433'
blobstore:
ports:
- - '9000'
- - '9001'
+ - '9000:9000'
+ - '9001:9001'
celery:
volumes:
- .:/workspace
diff --git a/docker/scripts/app-configure-blobstore.py b/docker/scripts/app-configure-blobstore.py
index 3140e39306..9ae64e0041 100755
--- a/docker/scripts/app-configure-blobstore.py
+++ b/docker/scripts/app-configure-blobstore.py
@@ -24,10 +24,13 @@ def init_blobstore():
),
)
for bucketname in ARTIFACT_STORAGE_NAMES:
+ adjusted_bucket_name = (
+ os.environ.get("BLOB_STORE_BUCKET_PREFIX", "")
+ + bucketname
+ + os.environ.get("BLOB_STORE_BUCKET_SUFFIX", "")
+ ).strip()
try:
- blobstore.create_bucket(
- Bucket=f"{os.environ.get('BLOB_STORE_BUCKET_PREFIX', '')}{bucketname}".strip()
- )
+ blobstore.create_bucket(Bucket=adjusted_bucket_name)
except botocore.exceptions.ClientError as err:
if err.response["Error"]["Code"] == "BucketAlreadyExists":
print(f"Bucket {bucketname} already exists")
@@ -36,5 +39,6 @@ def init_blobstore():
else:
print(f"Bucket {bucketname} created")
+
if __name__ == "__main__":
sys.exit(init_blobstore())
diff --git a/ietf/api/routers.py b/ietf/api/routers.py
index 745ddaa811..99afdb242a 100644
--- a/ietf/api/routers.py
+++ b/ietf/api/routers.py
@@ -3,14 +3,29 @@
from django.core.exceptions import ImproperlyConfigured
from rest_framework import routers
-class PrefixedSimpleRouter(routers.SimpleRouter):
- """SimpleRouter that adds a dot-separated prefix to its basename"""
+
+class PrefixedBasenameMixin:
+ """Mixin to add a prefix to the basename of a rest_framework BaseRouter"""
def __init__(self, name_prefix="", *args, **kwargs):
self.name_prefix = name_prefix
if len(self.name_prefix) == 0 or self.name_prefix[-1] == ".":
raise ImproperlyConfigured("Cannot use a name_prefix that is empty or ends with '.'")
super().__init__(*args, **kwargs)
- def get_default_basename(self, viewset):
- basename = super().get_default_basename(viewset)
- return f"{self.name_prefix}.{basename}"
+ def register(self, prefix, viewset, basename=None):
+ # Get the superclass "register" method from the class this is mixed-in with.
+ # This avoids typing issues with calling super().register() directly in a
+ # mixin class.
+ super_register = getattr(super(), "register")
+ if not super_register or not callable(super_register):
+ raise TypeError("Must mixin with superclass that has register() method")
+ super_register(prefix, viewset, basename=f"{self.name_prefix}.{basename}")
+
+
+class PrefixedSimpleRouter(PrefixedBasenameMixin, routers.SimpleRouter):
+ """SimpleRouter that adds a dot-separated prefix to its basename"""
+
+
+class PrefixedDefaultRouter(PrefixedBasenameMixin, routers.DefaultRouter):
+ """DefaultRouter that adds a dot-separated prefix to its basename"""
+
diff --git a/ietf/api/serializers_rpc.py b/ietf/api/serializers_rpc.py
new file mode 100644
index 0000000000..d888de4586
--- /dev/null
+++ b/ietf/api/serializers_rpc.py
@@ -0,0 +1,804 @@
+# Copyright The IETF Trust 2025-2026, All Rights Reserved
+import datetime
+from pathlib import Path
+from typing import Literal, Optional
+
+from django.db import transaction
+from django.urls import reverse as urlreverse
+from django.utils import timezone
+from drf_spectacular.types import OpenApiTypes
+from drf_spectacular.utils import extend_schema_field
+from rest_framework import serializers
+
+from ietf.doc.expire import move_draft_files_to_archive
+from ietf.doc.models import (
+ DocumentAuthor,
+ Document,
+ RelatedDocument,
+ State,
+ DocEvent,
+ RfcAuthor,
+)
+from ietf.doc.serializers import RfcAuthorSerializer
+from ietf.doc.tasks import trigger_red_precomputer_task, update_rfc_searchindex_task
+from ietf.doc.utils import (
+ default_consensus,
+ prettify_std_name,
+ update_action_holders,
+ update_rfcauthors,
+)
+from ietf.group.models import Group, Role
+from ietf.group.serializers import AreaSerializer
+from ietf.name.models import StreamName, StdLevelName
+from ietf.person.models import Person
+from ietf.utils import log
+
+
+class PersonSerializer(serializers.ModelSerializer):
+ email = serializers.EmailField(read_only=True)
+ picture = serializers.URLField(source="cdn_photo_url", read_only=True)
+ url = serializers.SerializerMethodField(
+ help_text="relative URL for datatracker person page"
+ )
+
+ class Meta:
+ model = Person
+ fields = ["id", "plain_name", "email", "picture", "url"]
+ read_only_fields = ["id", "plain_name", "email", "picture", "url"]
+
+ @extend_schema_field(OpenApiTypes.URI)
+ def get_url(self, object: Person):
+ return urlreverse(
+ "ietf.person.views.profile",
+ kwargs={"email_or_name": object.email_address() or object.name},
+ )
+
+
+class EmailPersonSerializer(serializers.Serializer):
+ email = serializers.EmailField(source="address")
+ person_pk = serializers.IntegerField(source="person.pk")
+ name = serializers.CharField(source="person.name")
+ last_name = serializers.CharField(source="person.last_name")
+ initials = serializers.CharField(source="person.initials")
+
+
+class LowerCaseEmailField(serializers.EmailField):
+ def to_representation(self, value):
+ return super().to_representation(value).lower()
+
+
+class AuthorPersonSerializer(serializers.ModelSerializer):
+ person_pk = serializers.IntegerField(source="pk", read_only=True)
+ last_name = serializers.CharField()
+ initials = serializers.CharField()
+ email_addresses = serializers.ListField(
+ source="email_set.all", child=LowerCaseEmailField()
+ )
+
+ class Meta:
+ model = Person
+ fields = ["person_pk", "name", "last_name", "initials", "email_addresses"]
+
+
+class RfcWithAuthorsSerializer(serializers.ModelSerializer):
+ authors = AuthorPersonSerializer(many=True, source="author_persons")
+
+ class Meta:
+ model = Document
+ fields = ["rfc_number", "authors"]
+
+
+class DraftWithAuthorsSerializer(serializers.ModelSerializer):
+ draft_name = serializers.CharField(source="name")
+ authors = AuthorPersonSerializer(many=True, source="author_persons")
+
+ class Meta:
+ model = Document
+ fields = ["draft_name", "authors"]
+
+
+class WgChairSerializer(serializers.Serializer):
+ """Serialize a WG chair's name and email from a Role"""
+
+ name = serializers.SerializerMethodField()
+ email = serializers.SerializerMethodField()
+
+ @extend_schema_field(serializers.CharField)
+ def get_name(self, role: Role) -> str:
+ return role.person.plain_name()
+
+ @extend_schema_field(serializers.EmailField)
+ def get_email(self, role: Role) -> str:
+ return role.email.email_address()
+
+
+class DocumentAuthorSerializer(serializers.ModelSerializer):
+ """Serializer for a Person in a response"""
+
+ plain_name = serializers.SerializerMethodField()
+
+ class Meta:
+ model = DocumentAuthor
+ fields = ["person", "plain_name", "affiliation"]
+
+ def get_plain_name(self, document_author: DocumentAuthor) -> str:
+ return document_author.person.plain_name()
+
+
+class FullDraftSerializer(serializers.ModelSerializer):
+ # Redefine these fields so they don't pick up the regex validator patterns.
+ # There seem to be some non-compliant drafts in the system! If this serializer
+ # is used for a writeable view, the validation will need to be added back.
+ name = serializers.CharField(max_length=255)
+ title = serializers.CharField(max_length=255)
+ group = serializers.SlugRelatedField(slug_field="acronym", read_only=True)
+ area = AreaSerializer(read_only=True)
+
+ # Other fields we need to add / adjust
+ source_format = serializers.SerializerMethodField()
+ authors = DocumentAuthorSerializer(many=True, source="documentauthor_set")
+ shepherd = serializers.PrimaryKeyRelatedField(
+ source="shepherd.person", read_only=True
+ )
+ consensus = serializers.SerializerMethodField()
+ wg_chairs = serializers.SerializerMethodField()
+
+ class Meta:
+ model = Document
+ fields = [
+ "id",
+ "name",
+ "rev",
+ "stream",
+ "title",
+ "group",
+ "area",
+ "abstract",
+ "pages",
+ "source_format",
+ "authors",
+ "intended_std_level",
+ "consensus",
+ "shepherd",
+ "ad",
+ "wg_chairs",
+ ]
+
+ def get_consensus(self, doc: Document) -> Optional[bool]:
+ return default_consensus(doc)
+
+ @extend_schema_field(WgChairSerializer(many=True))
+ def get_wg_chairs(self, doc: Document):
+ if doc.group is None:
+ return []
+ chairs = doc.group.role_set.filter(name_id="chair").select_related(
+ "person", "email"
+ )
+ return WgChairSerializer(chairs, many=True).data
+
+ def get_source_format(
+ self, doc: Document
+ ) -> Literal["unknown", "xml-v2", "xml-v3", "txt"]:
+ submission = doc.submission()
+ if submission is None:
+ return "unknown"
+ if ".xml" in submission.file_types:
+ if submission.xml_version == "3":
+ return "xml-v3"
+ else:
+ return "xml-v2"
+ elif ".txt" in submission.file_types:
+ return "txt"
+ return "unknown"
+
+
+class DraftSerializer(FullDraftSerializer):
+ class Meta:
+ model = Document
+ fields = [
+ "id",
+ "name",
+ "rev",
+ "stream",
+ "title",
+ "group",
+ "pages",
+ "source_format",
+ "authors",
+ "consensus",
+ ]
+
+
+class SubmittedToQueueSerializer(FullDraftSerializer):
+ submitted = serializers.SerializerMethodField()
+ consensus = serializers.SerializerMethodField()
+
+ class Meta:
+ model = Document
+ fields = [
+ "id",
+ "name",
+ "stream",
+ "submitted",
+ "consensus",
+ ]
+
+ def get_submitted(self, doc) -> Optional[datetime.datetime]:
+ event = doc.sent_to_rfc_editor_event()
+ return None if event is None else event.time
+
+ def get_consensus(self, doc) -> Optional[bool]:
+ return default_consensus(doc)
+
+
+class OriginalStreamSerializer(serializers.ModelSerializer):
+ stream = serializers.CharField(read_only=True, source="orig_stream_id")
+
+ class Meta:
+ model = Document
+ fields = ["rfc_number", "stream"]
+
+
+class ReferenceSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = Document
+ fields = ["id", "name"]
+ read_only_fields = ["id", "name"]
+
+
+def _update_authors(rfc, authors_data):
+ # Construct unsaved instances from validated author data
+ new_authors = [RfcAuthor(**authdata) for authdata in authors_data]
+ # Update the RFC with the new author set
+ with transaction.atomic():
+ change_events = update_rfcauthors(rfc, new_authors)
+ for event in change_events:
+ event.save()
+ return change_events
+
+
+class SubseriesNameField(serializers.RegexField):
+
+ def __init__(self, **kwargs):
+ # pattern: no leading 0, finite length (arbitrarily set to 5 digits)
+ regex = r"^(bcp|std|fyi)[1-9][0-9]{0,4}$"
+ super().__init__(regex, **kwargs)
+
+
+
+class RfcPubSerializer(serializers.ModelSerializer):
+ """Write-only serializer for RFC publication"""
+ # publication-related fields
+ published = serializers.DateTimeField(default_timezone=datetime.timezone.utc)
+ draft_name = serializers.RegexField(
+ required=False, regex=r"^draft-[a-zA-Z0-9-]+$"
+ )
+ draft_rev = serializers.RegexField(
+ required=False, regex=r"^[0-9][0-9]$"
+ )
+
+ # fields on the RFC Document that need tweaking from ModelSerializer defaults
+ rfc_number = serializers.IntegerField(min_value=1, required=True)
+ group = serializers.SlugRelatedField(
+ slug_field="acronym", queryset=Group.objects.all(), required=False
+ )
+ stream = serializers.PrimaryKeyRelatedField(
+ queryset=StreamName.objects.filter(used=True)
+ )
+ std_level = serializers.PrimaryKeyRelatedField(
+ queryset=StdLevelName.objects.filter(used=True),
+ )
+ ad = serializers.PrimaryKeyRelatedField(
+ queryset=Person.objects.all(),
+ allow_null=True,
+ required=False,
+ )
+ obsoletes = serializers.SlugRelatedField(
+ many=True,
+ required=False,
+ slug_field="rfc_number",
+ queryset=Document.objects.filter(type_id="rfc"),
+ )
+ updates = serializers.SlugRelatedField(
+ many=True,
+ required=False,
+ slug_field="rfc_number",
+ queryset=Document.objects.filter(type_id="rfc"),
+ )
+ subseries = serializers.ListField(child=SubseriesNameField(required=False))
+ # N.b., authors is _not_ a field on Document!
+ authors = RfcAuthorSerializer(many=True)
+
+ class Meta:
+ model = Document
+ fields = [
+ "published",
+ "draft_name",
+ "draft_rev",
+ "rfc_number",
+ "title",
+ "authors",
+ "group",
+ "stream",
+ "abstract",
+ "pages",
+ "std_level",
+ "ad",
+ "obsoletes",
+ "updates",
+ "subseries",
+ "keywords",
+ ]
+
+ def validate(self, data):
+ if "draft_name" in data or "draft_rev" in data:
+ if "draft_name" not in data:
+ raise serializers.ValidationError(
+ {"draft_name": "Missing draft_name"},
+ code="invalid-draft-spec",
+ )
+ if "draft_rev" not in data:
+ raise serializers.ValidationError(
+ {"draft_rev": "Missing draft_rev"},
+ code="invalid-draft-spec",
+ )
+ return data
+
+ def update(self, instance, validated_data):
+ raise RuntimeError("Cannot update with this serializer")
+
+ def create(self, validated_data):
+ """Publish an RFC"""
+ published = validated_data.pop("published")
+ draft_name = validated_data.pop("draft_name", None)
+ draft_rev = validated_data.pop("draft_rev", None)
+ obsoletes = validated_data.pop("obsoletes", [])
+ updates = validated_data.pop("updates", [])
+ subseries = validated_data.pop("subseries", [])
+
+ system_person = Person.objects.get(name="(System)")
+
+ # If specified, retrieve draft and extract RFC default values from it
+ if draft_name is None:
+ draft = None
+ else:
+ # validation enforces that draft_name and draft_rev are both present
+ draft = Document.objects.filter(
+ type_id="draft",
+ name=draft_name,
+ rev=draft_rev,
+ ).first()
+ if draft is None:
+ raise serializers.ValidationError(
+ {
+ "draft_name": "No such draft",
+ "draft_rev": "No such draft",
+ },
+ code="invalid-draft"
+ )
+ elif draft.get_state_slug() == "rfc":
+ raise serializers.ValidationError(
+ {
+ "draft_name": "Draft already published as RFC",
+ },
+ code="already-published-draft",
+ )
+
+ # Transaction to clean up if something fails
+ with transaction.atomic():
+ # create rfc, letting validated request data override draft defaults
+ rfc = self._create_rfc(validated_data)
+ DocEvent.objects.create(
+ doc=rfc,
+ rev=rfc.rev,
+ type="published_rfc",
+ time=published,
+ by=system_person,
+ desc="RFC published",
+ )
+ rfc.set_state(State.objects.get(used=True, type_id="rfc", slug="published"))
+
+ # create updates / obsoletes relations
+ for obsoleted_rfc_pk in obsoletes:
+ RelatedDocument.objects.get_or_create(
+ source=rfc, target=obsoleted_rfc_pk, relationship_id="obs"
+ )
+ for updated_rfc_pk in updates:
+ RelatedDocument.objects.get_or_create(
+ source=rfc, target=updated_rfc_pk, relationship_id="updates"
+ )
+
+ # create subseries relations
+ for subseries_doc_name in subseries:
+ ss_slug = subseries_doc_name[:3]
+ subseries_doc, ss_doc_created = Document.objects.get_or_create(
+ type_id=ss_slug, name=subseries_doc_name
+ )
+ if ss_doc_created:
+ subseries_doc.docevent_set.create(
+ type=f"{ss_slug}_doc_created",
+ by=system_person,
+ desc=f"Created {subseries_doc_name} via publication of {rfc.name}",
+ )
+ _, ss_rel_created = subseries_doc.relateddocument_set.get_or_create(
+ relationship_id="contains", target=rfc
+ )
+ if ss_rel_created:
+ subseries_doc.docevent_set.create(
+ type="sync_from_rfc_editor",
+ by=system_person,
+ desc=f"Added {rfc.name} to {subseries_doc.name}",
+ )
+ rfc.docevent_set.create(
+ type="sync_from_rfc_editor",
+ by=system_person,
+ desc=f"Added {rfc.name} to {subseries_doc.name}",
+ )
+
+
+ # create relation with draft and update draft state
+ if draft is not None:
+ draft_changes = []
+ draft_events = []
+ if draft.get_state_slug() != "rfc":
+ draft.set_state(
+ State.objects.get(used=True, type="draft", slug="rfc")
+ )
+ move_draft_files_to_archive(draft, draft.rev)
+ draft_changes.append(f"changed state to {draft.get_state()}")
+
+ r, created_relateddoc = RelatedDocument.objects.get_or_create(
+ source=draft, target=rfc, relationship_id="became_rfc",
+ )
+ if created_relateddoc:
+ change = "created {rel_name} relationship between {pretty_draft_name} and {pretty_rfc_name}".format(
+ rel_name=r.relationship.name.lower(),
+ pretty_draft_name=prettify_std_name(draft_name),
+ pretty_rfc_name=prettify_std_name(rfc.name),
+ )
+ draft_changes.append(change)
+
+ # Always set the "draft-iesg" state. This state should be set for all drafts, so
+ # log a warning if it is not set. What should happen here is that ietf stream
+ # RFCs come in as "rfcqueue" and are set to "pub" when they appear in the RFC index.
+ # Other stream documents should normally be "idexists" and be left that way. The
+ # code here *actually* leaves "draft-iesg" state alone if it is "idexists" or "pub",
+ # and changes any other state to "pub". If unset, it changes it to "idexists".
+ # This reflects historical behavior and should probably be updated, but a migration
+ # of existing drafts (and validation of the change) is needed before we change the
+ # handling.
+ prev_iesg_state = draft.get_state("draft-iesg")
+ if prev_iesg_state is None:
+ log.log(f'Warning while processing {rfc.name}: {draft.name} has no "draft-iesg" state')
+ new_iesg_state = State.objects.get(type_id="draft-iesg", slug="idexists")
+ elif prev_iesg_state.slug not in ("pub", "idexists"):
+ if prev_iesg_state.slug != "rfcqueue":
+ log.log(
+ 'Warning while processing {}: {} is in "draft-iesg" state {} (expected "rfcqueue")'.format(
+ rfc.name, draft.name, prev_iesg_state.slug
+ )
+ )
+ new_iesg_state = State.objects.get(type_id="draft-iesg", slug="pub")
+ else:
+ new_iesg_state = prev_iesg_state
+
+ if new_iesg_state != prev_iesg_state:
+ draft.set_state(new_iesg_state)
+ draft_changes.append(f"changed {new_iesg_state.type.label} to {new_iesg_state}")
+ e = update_action_holders(draft, prev_iesg_state, new_iesg_state)
+ if e:
+ draft_events.append(e)
+
+ # If the draft and RFC streams agree, move draft to "pub" stream state. If not, complain.
+ if draft.stream != rfc.stream:
+ log.log("Warning while processing {}: draft {} stream is {} but RFC stream is {}".format(
+ rfc.name, draft.name, draft.stream, rfc.stream
+ ))
+ elif draft.stream.slug in ["iab", "irtf", "ise", "editorial"]:
+ stream_slug = f"draft-stream-{draft.stream.slug}"
+ prev_state = draft.get_state(stream_slug)
+ if prev_state is not None and prev_state.slug != "pub":
+ new_state = State.objects.select_related("type").get(used=True, type__slug=stream_slug, slug="pub")
+ draft.set_state(new_state)
+ draft_changes.append(
+ f"changed {new_state.type.label} to {new_state}"
+ )
+ e = update_action_holders(draft, prev_state, new_state)
+ if e:
+ draft_events.append(e)
+ if draft_changes:
+ draft_events.append(
+ DocEvent.objects.create(
+ doc=draft,
+ rev=draft.rev,
+ by=system_person,
+ type="sync_from_rfc_editor",
+ desc=f"Updated while publishing {rfc.name} ({', '.join(draft_changes)})",
+ )
+ )
+ draft.save_with_history(draft_events)
+
+ return rfc
+
+ def _create_rfc(self, validated_data):
+ authors_data = validated_data.pop("authors")
+ rfc = Document.objects.create(
+ type_id="rfc",
+ name=f"rfc{validated_data['rfc_number']}",
+ **validated_data,
+ )
+ for order, author_data in enumerate(authors_data):
+ rfc.rfcauthor_set.create(
+ order=order,
+ **author_data,
+ )
+ return rfc
+
+
+class EditableRfcSerializer(serializers.ModelSerializer):
+ # Would be nice to reconcile this with ietf.doc.serializers.RfcSerializer.
+ # The purposes of that serializer (representing data for Red) and this one
+ # (accepting updates from Purple) are different enough that separate formats
+ # may be needed, but if not it'd be nice to have a single RfcSerializer that
+ # can serve both.
+ #
+ # Should also consider whether this and RfcPubSerializer should merge.
+ #
+ # Treats published and subseries fields as write-only. This isn't quite correct,
+ # but makes it easier and we don't currently use the serialized value except for
+ # debugging.
+ published = serializers.DateTimeField(
+ default_timezone=datetime.timezone.utc,
+ write_only=True,
+ )
+ authors = RfcAuthorSerializer(many=True, min_length=1, source="rfcauthor_set")
+ subseries = serializers.ListField(
+ child=SubseriesNameField(required=False),
+ write_only=True,
+ )
+
+ class Meta:
+ model = Document
+ fields = [
+ "published",
+ "title",
+ "authors",
+ "stream",
+ "abstract",
+ "pages",
+ "std_level",
+ "subseries",
+ "keywords",
+ ]
+
+ def create(self, validated_data):
+ raise RuntimeError("Cannot create with this serializer")
+
+ def update(self, instance, validated_data):
+ assert isinstance(instance, Document)
+ assert instance.type_id == "rfc"
+ rfc = instance # get better name
+
+ system_person = Person.objects.get(name="(System)")
+
+ # Remove data that needs special handling. Use a singleton object to detect
+ # missing values in case we ever support a value that needs None as an option.
+ omitted = object()
+ published = validated_data.pop("published", omitted)
+ subseries = validated_data.pop("subseries", omitted)
+ authors_data = validated_data.pop("rfcauthor_set", omitted)
+
+ # Transaction to clean up if something fails
+ with transaction.atomic():
+ # update the rfc Document itself
+ rfc_changes = []
+ rfc_events = []
+
+ for attr, new_value in validated_data.items():
+ old_value = getattr(rfc, attr)
+ if new_value != old_value:
+ rfc_changes.append(
+ f"changed {attr} to '{new_value}' from '{old_value}'"
+ )
+ setattr(rfc, attr, new_value)
+ if len(rfc_changes) > 0:
+ rfc_change_summary = f"{', '.join(rfc_changes)}"
+ rfc_events.append(
+ DocEvent.objects.create(
+ doc=rfc,
+ rev=rfc.rev,
+ by=system_person,
+ type="sync_from_rfc_editor",
+ desc=f"Changed metadata: {rfc_change_summary}",
+ )
+ )
+ if authors_data is not omitted:
+ rfc_events.extend(_update_authors(instance, authors_data))
+
+ if published is not omitted:
+ published_event = rfc.latest_event(type="published_rfc")
+ if published_event is None:
+ # unexpected, but possible in theory
+ rfc_events.append(
+ DocEvent.objects.create(
+ doc=rfc,
+ rev=rfc.rev,
+ type="published_rfc",
+ time=published,
+ by=system_person,
+ desc="RFC published",
+ )
+ )
+ rfc_events.append(
+ DocEvent.objects.create(
+ doc=rfc,
+ rev=rfc.rev,
+ type="sync_from_rfc_editor",
+ by=system_person,
+ desc=(
+ f"Set publication timestamp to {published.isoformat()}"
+ ),
+ )
+ )
+ else:
+ original_pub_time = published_event.time
+ if published != original_pub_time:
+ published_event.time = published
+ published_event.save()
+ rfc_events.append(
+ DocEvent.objects.create(
+ doc=rfc,
+ rev=rfc.rev,
+ type="sync_from_rfc_editor",
+ by=system_person,
+ desc=(
+ f"Changed publication time to "
+ f"{published.isoformat()} from "
+ f"{original_pub_time.isoformat()}"
+ )
+ )
+ )
+
+ # update subseries relations
+ if subseries is not omitted:
+ for subseries_doc_name in subseries:
+ ss_slug = subseries_doc_name[:3]
+ subseries_doc, ss_doc_created = Document.objects.get_or_create(
+ type_id=ss_slug, name=subseries_doc_name
+ )
+ if ss_doc_created:
+ subseries_doc.docevent_set.create(
+ type=f"{ss_slug}_doc_created",
+ by=system_person,
+ desc=f"Created {subseries_doc_name} via update of {rfc.name}",
+ )
+ _, ss_rel_created = subseries_doc.relateddocument_set.get_or_create(
+ relationship_id="contains", target=rfc
+ )
+ if ss_rel_created:
+ subseries_doc.docevent_set.create(
+ type="sync_from_rfc_editor",
+ by=system_person,
+ desc=f"Added {rfc.name} to {subseries_doc.name}",
+ )
+ rfc_events.append(
+ rfc.docevent_set.create(
+ type="sync_from_rfc_editor",
+ by=system_person,
+ desc=f"Added {rfc.name} to {subseries_doc.name}",
+ )
+ )
+ # Delete subseries relations that are no longer current
+ stale_subseries_relations = rfc.relations_that("contains").exclude(
+ source__name__in=subseries
+ )
+ for stale_relation in stale_subseries_relations:
+ stale_subseries_doc = stale_relation.source
+ rfc_events.append(
+ rfc.docevent_set.create(
+ type="sync_from_rfc_editor",
+ by=system_person,
+ desc=f"Removed {rfc.name} from {stale_subseries_doc.name}",
+ )
+ )
+ stale_subseries_doc.docevent_set.create(
+ type="sync_from_rfc_editor",
+ by=system_person,
+ desc=f"Removed {rfc.name} from {stale_subseries_doc.name}",
+ )
+ stale_subseries_relations.delete()
+ if len(rfc_events) > 0:
+ rfc.save_with_history(rfc_events)
+ # Gather obs and updates in both directions as a title/author change to
+ # this doc affects the info rendering of all of the other RFCs
+ needs_updating = sorted(
+ [
+ d.rfc_number
+ for d in [rfc]
+ + rfc.related_that_doc(("obs", "updates"))
+ + rfc.related_that(("obs", "updates"))
+ ]
+ )
+ trigger_red_precomputer_task.delay(rfc_number_list=needs_updating)
+ # Update the search index also
+ update_rfc_searchindex_task.delay(rfc.rfc_number)
+ return rfc
+
+
+class RfcFileSerializer(serializers.Serializer):
+ # The structure of this serializer is constrained by what openapi-generator-cli's
+ # python generator can correctly serialize as multipart/form-data. It does not
+ # handle nested serializers well (or perhaps at all). ListFields with child
+ # ChoiceField or RegexField do not serialize correctly. DictFields don't seem
+ # to work.
+ #
+ # It does seem to correctly send filenames along with FileFields, even as a child
+ # in a ListField, so we use that to convey the file format of each item. There
+ # are other options we could consider (e.g., a structured CharField) but this
+ # works.
+ allowed_extensions = (
+ ".html",
+ ".json",
+ ".notprepped.xml",
+ ".pdf",
+ ".txt",
+ ".xml",
+ )
+
+ rfc = serializers.SlugRelatedField(
+ slug_field="rfc_number",
+ queryset=Document.objects.filter(type_id="rfc"),
+ help_text="RFC number to which the contents belong",
+ )
+ contents = serializers.ListField(
+ child=serializers.FileField(
+ allow_empty_file=False,
+ use_url=False,
+ ),
+ help_text=(
+ "List of content files. Filename extensions are used to identify "
+ "file types, but filenames are otherwise ignored."
+ ),
+ )
+ mtime = serializers.DateTimeField(
+ required=False,
+ default=timezone.now,
+ default_timezone=datetime.UTC,
+ help_text="Modification timestamp to apply to uploaded files",
+ )
+ replace = serializers.BooleanField(
+ required=False,
+ default=False,
+ help_text=(
+ "Replace existing files for this RFC. Defaults to false. When false, "
+ "if _any_ files already exist for the specified RFC the upload will be "
+ "rejected regardless of which files are being uploaded. When true,"
+ "existing files will be removed and new ones will be put in place. BE"
+ "VERY CAREFUL WITH THIS OPTION IN PRODUCTION."
+ ),
+ )
+
+ def validate_contents(self, data):
+ found_extensions = []
+ for uploaded_file in data:
+ if not hasattr(uploaded_file, "name"):
+ raise serializers.ValidationError(
+ "filename not specified for uploaded file",
+ code="missing-filename",
+ )
+ ext = "".join(Path(uploaded_file.name).suffixes)
+ if ext not in self.allowed_extensions:
+ raise serializers.ValidationError(
+ f"File uploaded with invalid extension '{ext}'",
+ code="invalid-filename-ext",
+ )
+ if ext in found_extensions:
+ raise serializers.ValidationError(
+ f"More than one file uploaded with extension '{ext}'",
+ code="duplicate-filename-ext",
+ )
+ return data
+
+
+class NotificationAckSerializer(serializers.Serializer):
+ message = serializers.CharField(default="ack")
diff --git a/ietf/api/tests_serializers_rpc.py b/ietf/api/tests_serializers_rpc.py
new file mode 100644
index 0000000000..167ffcd3ee
--- /dev/null
+++ b/ietf/api/tests_serializers_rpc.py
@@ -0,0 +1,217 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+
+from unittest import mock
+
+from django.utils import timezone
+
+from ietf.utils.test_utils import TestCase
+from ietf.doc.models import Document
+from ietf.doc.factories import WgRfcFactory
+from .serializers_rpc import EditableRfcSerializer
+
+
+class EditableRfcSerializerTests(TestCase):
+ def test_create(self):
+ serializer = EditableRfcSerializer(
+ data={
+ "published": timezone.now(),
+ "title": "Yadda yadda yadda",
+ "authors": [
+ {
+ "titlepage_name": "B. Fett",
+ "is_editor": False,
+ "affiliation": "DBA Galactic Empire",
+ "country": "",
+ },
+ ],
+ "stream": "ietf",
+ "abstract": "A long time ago in a galaxy far, far away...",
+ "pages": 3,
+ "std_level": "inf",
+ "subseries": ["fyi999"],
+ }
+ )
+ self.assertTrue(serializer.is_valid())
+ with self.assertRaises(RuntimeError, msg="serializer does not allow create()"):
+ serializer.save()
+
+ @mock.patch("ietf.api.serializers_rpc.update_rfc_searchindex_task")
+ @mock.patch("ietf.api.serializers_rpc.trigger_red_precomputer_task")
+ def test_update(self, mock_trigger_red_task, mock_update_searchindex_task):
+ updates = WgRfcFactory.create_batch(2)
+ obsoletes = WgRfcFactory.create_batch(2)
+ rfc = WgRfcFactory(pages=10)
+ updated_by = WgRfcFactory.create_batch(2)
+ obsoleted_by = WgRfcFactory.create_batch(2)
+ for d in updates:
+ rfc.relateddocument_set.create(relationship_id="updates",target=d)
+ for d in obsoletes:
+ rfc.relateddocument_set.create(relationship_id="updates",target=d)
+ for d in updated_by:
+ d.relateddocument_set.create(relationship_id="updates",target=rfc)
+ for d in obsoleted_by:
+ d.relateddocument_set.create(relationship_id="updates",target=rfc)
+ serializer = EditableRfcSerializer(
+ instance=rfc,
+ data={
+ "published": timezone.now(),
+ "title": "Yadda yadda yadda",
+ "authors": [
+ {
+ "titlepage_name": "B. Fett",
+ "is_editor": False,
+ "affiliation": "DBA Galactic Empire",
+ "country": "",
+ },
+ ],
+ "stream": "ise",
+ "abstract": "A long time ago in a galaxy far, far away...",
+ "pages": 3,
+ "std_level": "inf",
+ "subseries": ["fyi999"],
+ },
+ )
+ self.assertTrue(serializer.is_valid())
+ result = serializer.save()
+ result.refresh_from_db()
+ self.assertEqual(result.title, "Yadda yadda yadda")
+ self.assertEqual(
+ list(
+ result.rfcauthor_set.values(
+ "titlepage_name", "is_editor", "affiliation", "country"
+ )
+ ),
+ [
+ {
+ "titlepage_name": "B. Fett",
+ "is_editor": False,
+ "affiliation": "DBA Galactic Empire",
+ "country": "",
+ },
+ ],
+ )
+ self.assertEqual(result.stream_id, "ise")
+ self.assertEqual(
+ result.abstract, "A long time ago in a galaxy far, far away..."
+ )
+ self.assertEqual(result.pages, 3)
+ self.assertEqual(result.std_level_id, "inf")
+ self.assertEqual(
+ result.part_of(),
+ [Document.objects.get(name="fyi999")],
+ )
+ # Confirm that red precomputer was triggered correctly
+ self.assertTrue(mock_trigger_red_task.delay.called)
+ _, mock_kwargs = mock_trigger_red_task.delay.call_args
+ self.assertIn("rfc_number_list", mock_kwargs)
+ expected_numbers = sorted(
+ [
+ d.rfc_number
+ for d in [rfc] + updates + obsoletes + updated_by + obsoleted_by
+ ]
+ )
+ self.assertEqual(mock_kwargs["rfc_number_list"], expected_numbers)
+ # Confirm that the search index update task was triggered correctly
+ self.assertTrue(mock_update_searchindex_task.delay.called)
+ self.assertEqual(
+ mock_update_searchindex_task.delay.call_args,
+ mock.call(rfc.rfc_number),
+ )
+
+ @mock.patch("ietf.api.serializers_rpc.update_rfc_searchindex_task")
+ @mock.patch("ietf.api.serializers_rpc.trigger_red_precomputer_task")
+ def test_partial_update(self, mock_trigger_red_task, mock_update_searchindex_task):
+ # We could test other permutations of fields, but authors is a partial update
+ # we know we are going to use, so verifying that one in particular.
+ updates = WgRfcFactory.create_batch(2)
+ obsoletes = WgRfcFactory.create_batch(2)
+ rfc = WgRfcFactory(pages=10, abstract="do or do not", title="padawan")
+ updated_by = WgRfcFactory.create_batch(2)
+ obsoleted_by = WgRfcFactory.create_batch(2)
+ for d in updates:
+ rfc.relateddocument_set.create(relationship_id="updates",target=d)
+ for d in obsoletes:
+ rfc.relateddocument_set.create(relationship_id="updates",target=d)
+ for d in updated_by:
+ d.relateddocument_set.create(relationship_id="updates",target=rfc)
+ for d in obsoleted_by:
+ d.relateddocument_set.create(relationship_id="updates",target=rfc)
+ serializer = EditableRfcSerializer(
+ partial=True,
+ instance=rfc,
+ data={
+ "authors": [
+ {
+ "titlepage_name": "B. Fett",
+ "is_editor": False,
+ "affiliation": "DBA Galactic Empire",
+ "country": "",
+ },
+ ],
+ },
+ )
+ self.assertTrue(serializer.is_valid())
+ result = serializer.save()
+ result.refresh_from_db()
+ self.assertEqual(rfc.title, "padawan")
+ self.assertEqual(
+ list(
+ result.rfcauthor_set.values(
+ "titlepage_name", "is_editor", "affiliation", "country"
+ )
+ ),
+ [
+ {
+ "titlepage_name": "B. Fett",
+ "is_editor": False,
+ "affiliation": "DBA Galactic Empire",
+ "country": "",
+ },
+ ],
+ )
+ self.assertEqual(result.stream_id, "ietf")
+ self.assertEqual(result.abstract, "do or do not")
+ self.assertEqual(result.pages, 10)
+ self.assertEqual(result.std_level_id, "ps")
+ self.assertEqual(result.part_of(), [])
+ # Confirm that the red precomputer was triggered correctly
+ self.assertTrue(mock_trigger_red_task.delay.called)
+ _, mock_kwargs = mock_trigger_red_task.delay.call_args
+ self.assertIn("rfc_number_list", mock_kwargs)
+ expected_numbers = sorted(
+ [
+ d.rfc_number
+ for d in [rfc] + updates + obsoletes + updated_by + obsoleted_by
+ ]
+ )
+ self.assertEqual(mock_kwargs["rfc_number_list"], expected_numbers)
+ # Confirm that the search index update task was called correctly
+ self.assertTrue(mock_update_searchindex_task.delay.called)
+ self.assertEqual(
+ mock_update_searchindex_task.delay.call_args,
+ mock.call(rfc.rfc_number),
+ )
+
+ # Test only a field on the Document itself to be sure that it works
+ mock_trigger_red_task.delay.reset_mock()
+ mock_update_searchindex_task.delay.reset_mock()
+ serializer = EditableRfcSerializer(
+ partial=True,
+ instance=rfc,
+ data={"title": "jedi master"},
+ )
+ self.assertTrue(serializer.is_valid())
+ result = serializer.save()
+ result.refresh_from_db()
+ self.assertEqual(rfc.title, "jedi master")
+ # Confirm that the red precomputer was triggered correctly
+ self.assertTrue(mock_trigger_red_task.delay.called)
+ _, mock_kwargs = mock_trigger_red_task.delay.call_args
+ self.assertIn("rfc_number_list", mock_kwargs)
+ self.assertEqual(mock_kwargs["rfc_number_list"], expected_numbers)
+ # Confirm that the search index update task was called correctly
+ self.assertTrue(mock_update_searchindex_task.delay.called)
+ self.assertEqual(
+ mock_update_searchindex_task.delay.call_args,
+ mock.call(rfc.rfc_number),
+ )
diff --git a/ietf/api/tests_views_rpc.py b/ietf/api/tests_views_rpc.py
new file mode 100644
index 0000000000..180221cffc
--- /dev/null
+++ b/ietf/api/tests_views_rpc.py
@@ -0,0 +1,432 @@
+# Copyright The IETF Trust 2025, All Rights Reserved
+import datetime
+from io import StringIO
+from pathlib import Path
+from tempfile import TemporaryDirectory
+
+from django.conf import settings
+from django.core.files.base import ContentFile
+from django.db.models import Max
+from django.db.models.functions import Coalesce
+from django.test.utils import override_settings
+from django.urls import reverse as urlreverse
+import mock
+from django.utils import timezone
+
+from ietf.blobdb.models import Blob
+from ietf.doc.factories import IndividualDraftFactory, RfcFactory, WgDraftFactory, WgRfcFactory
+from ietf.doc.models import RelatedDocument, Document
+from ietf.group.factories import RoleFactory, GroupFactory
+from ietf.person.factories import PersonFactory
+from ietf.sync.rfcindex import rfcindex_is_dirty
+from ietf.utils.models import DirtyBits
+from ietf.utils.test_utils import APITestCase, reload_db_objects
+
+
+class RpcApiTests(APITestCase):
+ @override_settings(APP_API_TOKENS={"ietf.api.views_rpc": ["valid-token"]})
+ def test_draftviewset_references(self):
+ viewname = "ietf.api.purple_api.draft-references"
+
+ # non-existent draft
+ bad_id = Document.objects.aggregate(unused_id=Coalesce(Max("id"), 0) + 100)[
+ "unused_id"
+ ]
+ url = urlreverse(viewname, kwargs={"doc_id": bad_id})
+ # Without credentials
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 403)
+ # Add credentials
+ r = self.client.get(url, headers={"X-Api-Key": "valid-token"})
+ self.assertEqual(r.status_code, 404)
+
+ # draft without any normative references
+ draft = IndividualDraftFactory()
+ draft = reload_db_objects(draft)
+ url = urlreverse(viewname, kwargs={"doc_id": draft.id})
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 403)
+ r = self.client.get(url, headers={"X-Api-Key": "valid-token"})
+ self.assertEqual(r.status_code, 200)
+ refs = r.json()
+ self.assertEqual(refs, [])
+
+ # draft without any normative references but with an informative reference
+ draft_foo = IndividualDraftFactory()
+ draft_foo = reload_db_objects(draft_foo)
+ RelatedDocument.objects.create(
+ source=draft, target=draft_foo, relationship_id="refinfo"
+ )
+ url = urlreverse(viewname, kwargs={"doc_id": draft.id})
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 403)
+ r = self.client.get(url, headers={"X-Api-Key": "valid-token"})
+ self.assertEqual(r.status_code, 200)
+ refs = r.json()
+ self.assertEqual(refs, [])
+
+ # draft with a normative reference
+ draft_bar = IndividualDraftFactory()
+ draft_bar = reload_db_objects(draft_bar)
+ RelatedDocument.objects.create(
+ source=draft, target=draft_bar, relationship_id="refnorm"
+ )
+ url = urlreverse(viewname, kwargs={"doc_id": draft.id})
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 403)
+ r = self.client.get(url, headers={"X-Api-Key": "valid-token"})
+ self.assertEqual(r.status_code, 200)
+ refs = r.json()
+ self.assertEqual(len(refs), 1)
+ self.assertEqual(refs[0]["id"], draft_bar.id)
+ self.assertEqual(refs[0]["name"], draft_bar.name)
+
+ @override_settings(APP_API_TOKENS={"ietf.api.views_rpc": ["valid-token"]})
+ @mock.patch("ietf.doc.tasks.signal_update_rfc_metadata_task.delay")
+ def test_notify_rfc_published(self, mock_task_delay):
+ url = urlreverse("ietf.api.purple_api.notify_rfc_published")
+ area = GroupFactory(type_id="area")
+ rfc_group = GroupFactory(type_id="wg")
+ draft_ad = RoleFactory(group=area, name_id="ad").person
+ rfc_ad = PersonFactory()
+ draft_authors = PersonFactory.create_batch(2)
+ rfc_authors = PersonFactory.create_batch(3)
+ draft = WgDraftFactory(
+ group__parent=area, authors=draft_authors, ad=draft_ad, stream_id="ietf"
+ )
+ rfc_stream_id = "ise"
+ assert isinstance(draft, Document), "WgDraftFactory should generate a Document"
+ updates = RfcFactory.create_batch(2)
+ obsoletes = RfcFactory.create_batch(2)
+ unused_rfc_number = (
+ Document.objects.filter(rfc_number__isnull=False).aggregate(
+ unused_rfc_number=Max("rfc_number") + 1
+ )["unused_rfc_number"]
+ or 10000
+ )
+
+ post_data = {
+ "published": "2025-12-17T20:29:00Z",
+ "draft_name": draft.name,
+ "draft_rev": draft.rev,
+ "rfc_number": unused_rfc_number,
+ "title": "RFC " + draft.title,
+ "authors": [
+ {
+ "titlepage_name": f"titlepage {author.name}",
+ "is_editor": False,
+ "person": author.pk,
+ "email": author.email_address(),
+ "affiliation": "Some Affiliation",
+ "country": "CA",
+ }
+ for author in rfc_authors
+ ],
+ "group": rfc_group.acronym,
+ "stream": rfc_stream_id,
+ "abstract": "RFC version of " + draft.abstract,
+ "pages": draft.pages + 10,
+ "std_level": "ps",
+ "ad": rfc_ad.pk,
+ "obsoletes": [o.rfc_number for o in obsoletes],
+ "updates": [o.rfc_number for o in updates],
+ "subseries": [],
+ }
+ r = self.client.post(url, data=post_data, format="json")
+ self.assertEqual(r.status_code, 403)
+
+ r = self.client.post(
+ url, data=post_data, format="json", headers={"X-Api-Key": "valid-token"}
+ )
+ self.assertEqual(r.status_code, 200)
+ rfc = Document.objects.filter(rfc_number=unused_rfc_number).first()
+ self.assertIsNotNone(rfc)
+ self.assertEqual(rfc.came_from_draft(), draft)
+ self.assertEqual(
+ rfc.docevent_set.filter(
+ type="published_rfc", time="2025-12-17T20:29:00Z"
+ ).count(),
+ 1,
+ )
+ self.assertEqual(rfc.title, "RFC " + draft.title)
+ self.assertEqual(rfc.documentauthor_set.count(), 0)
+ self.assertEqual(
+ [
+ {
+ "titlepage_name": ra.titlepage_name,
+ "is_editor": ra.is_editor,
+ "person": ra.person,
+ "email": ra.email,
+ "affiliation": ra.affiliation,
+ "country": ra.country,
+ }
+ for ra in rfc.rfcauthor_set.all()
+ ],
+ [
+ {
+ "titlepage_name": f"titlepage {author.name}",
+ "is_editor": False,
+ "person": author,
+ "email": author.email(),
+ "affiliation": "Some Affiliation",
+ "country": "CA",
+ }
+ for author in rfc_authors
+ ],
+ )
+ self.assertEqual(rfc.group, rfc_group)
+ self.assertEqual(rfc.stream_id, rfc_stream_id)
+ self.assertEqual(rfc.abstract, "RFC version of " + draft.abstract)
+ self.assertEqual(rfc.pages, draft.pages + 10)
+ self.assertEqual(rfc.std_level_id, "ps")
+ self.assertEqual(rfc.ad, rfc_ad)
+ self.assertEqual(set(rfc.related_that_doc("obs")), set([o for o in obsoletes]))
+ self.assertEqual(
+ set(rfc.related_that_doc("updates")), set([o for o in updates])
+ )
+ self.assertEqual(rfc.part_of(), [])
+ self.assertEqual(draft.get_state().slug, "rfc")
+ # todo test non-empty relationships
+ # todo test references (when updating that is part of the handling)
+
+ self.assertTrue(mock_task_delay.called)
+ mock_args, mock_kwargs = mock_task_delay.call_args
+ self.assertIn("rfc_number_list", mock_kwargs)
+ expected_rfc_number_list = [rfc.rfc_number]
+ expected_rfc_number_list.extend(
+ [d.rfc_number for d in updates + obsoletes]
+ )
+ expected_rfc_number_list = sorted(set(expected_rfc_number_list))
+ self.assertEqual(mock_kwargs["rfc_number_list"], expected_rfc_number_list)
+
+ @override_settings(APP_API_TOKENS={"ietf.api.views_rpc": ["valid-token"]})
+ @mock.patch("ietf.api.views_rpc.rebuild_reference_relations_task")
+ @mock.patch("ietf.api.views_rpc.update_rfc_searchindex_task")
+ @mock.patch("ietf.api.views_rpc.trigger_red_precomputer_task")
+ def test_upload_rfc_files(
+ self,
+ mock_trigger_red_task,
+ mock_update_searchindex_task,
+ mock_rebuild_relations,
+ ):
+ def _valid_post_data():
+ """Generate a valid post data dict
+
+ Each API call needs a fresh set of files, so don't reuse the return
+ value from this for multiple calls!
+ """
+ return {
+ "rfc": rfc.rfc_number,
+ "contents": [
+ ContentFile(b"This is .xml", "myfile.xml"),
+ ContentFile(b"This is .txt", "myfile.txt"),
+ ContentFile(b"This is .html", "myfile.html"),
+ ContentFile(b"This is .pdf", "myfile.pdf"),
+ ContentFile(b"This is .json", "myfile.json"),
+ ContentFile(b"This is .notprepped.xml", "myfile.notprepped.xml"),
+ ],
+ "replace": False,
+ }
+
+ url = urlreverse("ietf.api.purple_api.upload_rfc_files")
+ updates = RfcFactory.create_batch(2)
+ obsoletes = RfcFactory.create_batch(2)
+
+ rfc = WgRfcFactory()
+ for r in obsoletes:
+ rfc.relateddocument_set.create(relationship_id="obs", target=r)
+ for r in updates:
+ rfc.relateddocument_set.create(relationship_id="updates", target=r)
+ assert isinstance(rfc, Document), "WgRfcFactory should generate a Document"
+ with TemporaryDirectory() as rfc_dir:
+ settings.RFC_PATH = rfc_dir # affects overridden settings
+ rfc_path = Path(rfc_dir)
+ (rfc_path / "prerelease").mkdir()
+ content = StringIO("XML content\n")
+ content.name = "myrfc.xml"
+
+ # no api key
+ r = self.client.post(url, _valid_post_data(), format="multipart")
+ self.assertEqual(r.status_code, 403)
+ self.assertFalse(mock_update_searchindex_task.delay.called)
+
+ # invalid RFC
+ r = self.client.post(
+ url,
+ _valid_post_data() | {"rfc": rfc.rfc_number + 10},
+ format="multipart",
+ headers={"X-Api-Key": "valid-token"},
+ )
+ self.assertEqual(r.status_code, 400)
+ self.assertFalse(mock_update_searchindex_task.delay.called)
+
+ # empty files
+ r = self.client.post(
+ url,
+ _valid_post_data() | {
+ "contents": [
+ ContentFile(b"", "myfile.xml"),
+ ContentFile(b"", "myfile.txt"),
+ ContentFile(b"", "myfile.html"),
+ ContentFile(b"", "myfile.pdf"),
+ ContentFile(b"", "myfile.json"),
+ ContentFile(b"", "myfile.notprepped.xml"),
+ ]
+ },
+ format="multipart",
+ headers={"X-Api-Key": "valid-token"},
+ )
+ self.assertEqual(r.status_code, 400)
+ self.assertFalse(mock_update_searchindex_task.delay.called)
+
+ # bad file type
+ r = self.client.post(
+ url,
+ _valid_post_data() | {
+ "contents": [
+ ContentFile(b"Some content", "myfile.jpg"),
+ ]
+ },
+ format="multipart",
+ headers={"X-Api-Key": "valid-token"},
+ )
+ self.assertEqual(r.status_code, 400)
+ self.assertFalse(mock_update_searchindex_task.delay.called)
+
+ # Put a file in the way. Post should fail because replace = False
+ file_in_the_way = (rfc_path / f"{rfc.name}.txt")
+ file_in_the_way.touch()
+ r = self.client.post(
+ url,
+ _valid_post_data(),
+ format="multipart",
+ headers={"X-Api-Key": "valid-token"},
+ )
+ self.assertEqual(r.status_code, 409) # conflict
+ self.assertFalse(mock_update_searchindex_task.delay.called)
+ file_in_the_way.unlink()
+
+ # Put a blob in the way. Post should fail because replace = False
+ blob_in_the_way = Blob.objects.create(
+ bucket="rfc", name=f"txt/{rfc.name}.txt", content=b""
+ )
+ r = self.client.post(
+ url,
+ _valid_post_data(),
+ format="multipart",
+ headers={"X-Api-Key": "valid-token"},
+ )
+ self.assertEqual(r.status_code, 409) # conflict
+ self.assertFalse(mock_update_searchindex_task.delay.called)
+ blob_in_the_way.delete()
+
+ # valid post
+ mock_trigger_red_task.delay.reset_mock()
+ r = self.client.post(
+ url,
+ _valid_post_data(),
+ format="multipart",
+ headers={"X-Api-Key": "valid-token"},
+ )
+ self.assertEqual(r.status_code, 200)
+ self.assertEqual(
+ mock_update_searchindex_task.delay.call_args,
+ mock.call(rfc.rfc_number),
+ )
+ for extension in ["xml", "txt", "html", "pdf", "json"]:
+ filename = f"{rfc.name}.{extension}"
+ self.assertEqual(
+ (rfc_path / filename)
+ .read_text(),
+ f"This is .{extension}",
+ f"{extension} file should contain the expected content",
+ )
+ self.assertEqual(
+ bytes(
+ Blob.objects.get(
+ bucket="rfc", name=f"{extension}/{filename}"
+ ).content
+ ),
+ f"This is .{extension}".encode("utf-8"),
+ f"{extension} blob should contain the expected content",
+ )
+ # special case for notprepped
+ notprepped_fn = f"{rfc.name}.notprepped.xml"
+ self.assertEqual(
+ (
+ rfc_path / "prerelease" / notprepped_fn
+ ).read_text(),
+ "This is .notprepped.xml",
+ ".notprepped.xml file should contain the expected content",
+ )
+ self.assertEqual(
+ bytes(
+ Blob.objects.get(
+ bucket="rfc", name=f"notprepped/{notprepped_fn}"
+ ).content
+ ),
+ b"This is .notprepped.xml",
+ ".notprepped.xml blob should contain the expected content",
+ )
+ # Confirm that the red precomputer was triggered correctly
+ self.assertTrue(mock_trigger_red_task.delay.called)
+ _, mock_kwargs = mock_trigger_red_task.delay.call_args
+ self.assertIn("rfc_number_list", mock_kwargs)
+ expected_rfc_number_list = [rfc.rfc_number]
+ expected_rfc_number_list.extend(
+ [d.rfc_number for d in updates + obsoletes]
+ )
+ expected_rfc_number_list = sorted(set(expected_rfc_number_list))
+ self.assertEqual(mock_kwargs["rfc_number_list"], expected_rfc_number_list)
+ # Confirm that the search index update task was called correctly
+ self.assertTrue(mock_update_searchindex_task.delay.called)
+ # Confirm reference relations rebuild task was called correctly
+ self.assertTrue(mock_rebuild_relations.delay.called)
+ _, mock_kwargs = mock_rebuild_relations.delay.call_args
+ self.assertIn("doc_names", mock_kwargs)
+ self.assertEqual(mock_kwargs["doc_names"], [rfc.name])
+
+ # re-post with replace = False should now fail
+ mock_update_searchindex_task.reset_mock()
+ r = self.client.post(
+ url,
+ _valid_post_data(),
+ format="multipart",
+ headers={"X-Api-Key": "valid-token"},
+ )
+ self.assertEqual(r.status_code, 409) # conflict
+ self.assertFalse(mock_update_searchindex_task.delay.called)
+
+ # re-post with replace = True should succeed
+ r = self.client.post(
+ url,
+ _valid_post_data() | {"replace": True},
+ format="multipart",
+ headers={"X-Api-Key": "valid-token"},
+ )
+ self.assertEqual(r.status_code, 200)
+ self.assertTrue(mock_update_searchindex_task.delay.called)
+ self.assertEqual(
+ mock_update_searchindex_task.delay.call_args,
+ mock.call(rfc.rfc_number),
+ )
+
+ @override_settings(APP_API_TOKENS={"ietf.api.views_rpc": ["valid-token"]})
+ def test_refresh_rfc_index(self):
+ DirtyBits.objects.create(
+ slug=DirtyBits.Slugs.RFCINDEX,
+ dirty_time=timezone.now() - datetime.timedelta(days=1),
+ processed_time=timezone.now() - datetime.timedelta(hours=12),
+ )
+ self.assertFalse(rfcindex_is_dirty())
+ url = urlreverse("ietf.api.purple_api.refresh_rfc_index")
+ response = self.client.get(url)
+ self.assertEqual(response.status_code, 403)
+ response = self.client.get(url, headers={"X-Api-Key": "invalid-token"})
+ self.assertEqual(response.status_code, 403)
+ response = self.client.get(url, headers={"X-Api-Key": "valid-token"})
+ self.assertEqual(response.status_code, 405)
+ self.assertFalse(rfcindex_is_dirty())
+ response = self.client.post(url, headers={"X-Api-Key": "valid-token"})
+ self.assertEqual(response.status_code, 202)
+ self.assertTrue(rfcindex_is_dirty())
diff --git a/ietf/api/urls.py b/ietf/api/urls.py
index 04575b34cb..7a082567b8 100644
--- a/ietf/api/urls.py
+++ b/ietf/api/urls.py
@@ -1,26 +1,31 @@
# Copyright The IETF Trust 2017-2024, All Rights Reserved
+from drf_spectacular.views import SpectacularAPIView
+
from django.conf import settings
-from django.urls import include
+from django.urls import include, path
from django.views.generic import TemplateView
from ietf import api
-from ietf.doc import views_ballot
+from ietf.doc import views_ballot, api as doc_api
from ietf.meeting import views as meeting_views
from ietf.submit import views as submit_views
from ietf.utils.urls import url
from . import views as api_views
+from .routers import PrefixedSimpleRouter
# DRF API routing - disabled until we plan to use it
-# from drf_spectacular.views import SpectacularAPIView
-# from django.urls import path
# from ietf.person import api as person_api
-# from .routers import PrefixedSimpleRouter
# core_router = PrefixedSimpleRouter(name_prefix="ietf.api.core_api") # core api router
# core_router.register("email", person_api.EmailViewSet)
# core_router.register("person", person_api.PersonViewSet)
+# todo more general name for this API?
+red_router = PrefixedSimpleRouter(name_prefix="ietf.api.red_api") # red api router
+red_router.register("doc", doc_api.RfcViewSet)
+red_router.register("subseries", doc_api.SubseriesViewSet, basename="subseries")
+
api.autodiscover()
urlpatterns = [
@@ -32,7 +37,9 @@
url(r'^v2/person/person', api_views.ApiV2PersonExportView.as_view()),
# --- DRF API ---
# path("core/", include(core_router.urls)),
- # path("schema/", SpectacularAPIView.as_view()),
+ path("purple/", include("ietf.api.urls_rpc")),
+ path("red/", include(red_router.urls)),
+ path("schema/", SpectacularAPIView.as_view()),
#
# --- Custom API endpoints, sorted alphabetically ---
# Email alias information for drafts
diff --git a/ietf/api/urls_rpc.py b/ietf/api/urls_rpc.py
new file mode 100644
index 0000000000..8555610dc3
--- /dev/null
+++ b/ietf/api/urls_rpc.py
@@ -0,0 +1,47 @@
+# Copyright The IETF Trust 2023-2026, All Rights Reserved
+from django.urls import include, path
+
+from ietf.api import views_rpc
+from ietf.api.routers import PrefixedDefaultRouter
+from ietf.utils.urls import url
+
+router = PrefixedDefaultRouter(use_regex_path=False, name_prefix="ietf.api.purple_api")
+router.include_format_suffixes = False
+router.register(r"draft", views_rpc.DraftViewSet, basename="draft")
+router.register(r"person", views_rpc.PersonViewSet)
+router.register(r"rfc", views_rpc.RfcViewSet, basename="rfc")
+
+router.register(
+ r"rfc//authors",
+ views_rpc.RfcAuthorViewSet,
+ basename="rfc-authors",
+)
+
+urlpatterns = [
+ url(r"^doc/drafts_by_names/", views_rpc.DraftsByNamesView.as_view()),
+ url(r"^persons/search/", views_rpc.RpcPersonSearch.as_view()),
+ path(
+ r"rfc/publish/",
+ views_rpc.RfcPubNotificationView.as_view(),
+ name="ietf.api.purple_api.notify_rfc_published",
+ ),
+ path(
+ r"rfc/publish/files/",
+ views_rpc.RfcPubFilesView.as_view(),
+ name="ietf.api.purple_api.upload_rfc_files",
+ ),
+ path(
+ r"rfc_index/refresh/",
+ views_rpc.RfcIndexView.as_view(),
+ name="ietf.api.purple_api.refresh_rfc_index",
+ ),
+ path(r"subject//person/", views_rpc.SubjectPersonView.as_view()),
+]
+
+# add routers at the end so individual routes can steal parts of their address
+# space (e.g., ^rfc/publish/ superseding the ^rfc/ routes of RfcViewSet)
+urlpatterns.extend(
+ [
+ path("", include(router.urls)),
+ ]
+)
diff --git a/ietf/api/views.py b/ietf/api/views.py
index 22523b2f17..420bc39693 100644
--- a/ietf/api/views.py
+++ b/ietf/api/views.py
@@ -97,7 +97,7 @@ class PersonalInformationExportView(DetailView, JsonExportMixin):
def get(self, request):
person = get_object_or_404(self.model, user=request.user)
- expand = ['searchrule', 'documentauthor', 'ad_document_set', 'ad_dochistory_set', 'docevent',
+ expand = ['searchrule', 'documentauthor', 'rfcauthor', 'ad_document_set', 'ad_dochistory_set', 'docevent',
'ballotpositiondocevent', 'deletedevent', 'email_set', 'groupevent', 'role', 'rolehistory', 'iprdisclosurebase',
'iprevent', 'liaisonstatementevent', 'allowlisted', 'schedule', 'constraint', 'schedulingevent', 'message',
'sendqueue', 'nominee', 'topicfeedbacklastseen', 'alias', 'email', 'apikeys', 'personevent',
diff --git a/ietf/api/views_rpc.py b/ietf/api/views_rpc.py
new file mode 100644
index 0000000000..6bc45fe3da
--- /dev/null
+++ b/ietf/api/views_rpc.py
@@ -0,0 +1,552 @@
+# Copyright The IETF Trust 2023-2026, All Rights Reserved
+import os
+import shutil
+from pathlib import Path
+from tempfile import TemporaryDirectory
+
+from django.conf import settings
+from django.db import IntegrityError
+from drf_spectacular.utils import OpenApiParameter
+from rest_framework import mixins, parsers, serializers, viewsets, status
+from rest_framework.decorators import action
+from rest_framework.exceptions import APIException
+from rest_framework.views import APIView
+from rest_framework.response import Response
+
+from django.db.models import CharField as ModelCharField, OuterRef, Subquery, Q
+from django.db.models.functions import Coalesce
+from django.http import Http404
+from drf_spectacular.utils import extend_schema_view, extend_schema
+from rest_framework import generics
+from rest_framework.fields import CharField as DrfCharField
+from rest_framework.filters import SearchFilter
+from rest_framework.pagination import LimitOffsetPagination
+
+from ietf.api.serializers_rpc import (
+ PersonSerializer,
+ FullDraftSerializer,
+ DraftSerializer,
+ SubmittedToQueueSerializer,
+ OriginalStreamSerializer,
+ ReferenceSerializer,
+ EmailPersonSerializer,
+ RfcWithAuthorsSerializer,
+ DraftWithAuthorsSerializer,
+ NotificationAckSerializer,
+ RfcPubSerializer,
+ RfcFileSerializer,
+ EditableRfcSerializer,
+)
+from ietf.doc.models import Document, DocHistory, RfcAuthor, DocEvent
+from ietf.doc.serializers import RfcAuthorSerializer
+from ietf.doc.storage_utils import remove_from_storage, store_file, exists_in_storage
+from ietf.doc.tasks import (
+ signal_update_rfc_metadata_task,
+ rebuild_reference_relations_task,
+ trigger_red_precomputer_task,
+ update_rfc_searchindex_task,
+)
+from ietf.person.models import Email, Person
+from ietf.sync.rfcindex import mark_rfcindex_as_dirty
+
+
+class Conflict(APIException):
+ status_code = status.HTTP_409_CONFLICT
+ default_detail = "Conflict."
+ default_code = "conflict"
+
+
+@extend_schema_view(
+ retrieve=extend_schema(
+ operation_id="get_person_by_id",
+ summary="Find person by ID",
+ description="Returns a single person",
+ parameters=[
+ OpenApiParameter(
+ name="person_id",
+ type=int,
+ location="path",
+ description="Person ID identifying this person.",
+ ),
+ ],
+ ),
+)
+class PersonViewSet(mixins.RetrieveModelMixin, viewsets.GenericViewSet):
+ queryset = Person.objects.all()
+ serializer_class = PersonSerializer
+ api_key_endpoint = "ietf.api.views_rpc"
+ lookup_url_kwarg = "person_id"
+
+ @extend_schema(
+ operation_id="get_persons",
+ summary="Get a batch of persons",
+ description="Returns a list of persons matching requested ids. Omits any that are missing.",
+ request=list[int],
+ responses=PersonSerializer(many=True),
+ )
+ @action(detail=False, methods=["post"])
+ def batch(self, request):
+ """Get a batch of rpc person names"""
+ pks = request.data
+ return Response(
+ self.get_serializer(Person.objects.filter(pk__in=pks), many=True).data
+ )
+
+ @extend_schema(
+ operation_id="persons_by_email",
+ summary="Get a batch of persons by email addresses",
+ description=(
+ "Returns a list of persons matching requested ids. "
+ "Omits any that are missing."
+ ),
+ request=list[str],
+ responses=EmailPersonSerializer(many=True),
+ )
+ @action(detail=False, methods=["post"], serializer_class=EmailPersonSerializer)
+ def batch_by_email(self, request):
+ emails = Email.objects.filter(address__in=request.data, person__isnull=False)
+ serializer = self.get_serializer(emails, many=True)
+ return Response(serializer.data)
+
+
+class SubjectPersonView(APIView):
+ api_key_endpoint = "ietf.api.views_rpc"
+
+ @extend_schema(
+ operation_id="get_subject_person_by_id",
+ summary="Find person for OIDC subject by ID",
+ description="Returns a single person",
+ responses=PersonSerializer,
+ parameters=[
+ OpenApiParameter(
+ name="subject_id",
+ type=str,
+ description="subject ID of person to return",
+ location="path",
+ ),
+ ],
+ )
+ def get(self, request, subject_id: str):
+ try:
+ user_id = int(subject_id)
+ except ValueError:
+ raise serializers.ValidationError(
+ {"subject_id": "This field must be an integer value."}
+ )
+ person = Person.objects.filter(user__pk=user_id).first()
+ if person:
+ return Response(PersonSerializer(person).data)
+ raise Http404
+
+
+class RpcLimitOffsetPagination(LimitOffsetPagination):
+ default_limit = 10
+ max_limit = 100
+
+
+class SingleTermSearchFilter(SearchFilter):
+ """SearchFilter backend that does not split terms
+
+ The default SearchFilter treats comma or whitespace-separated terms as individual
+ search terms. This backend instead searches for the exact term.
+ """
+
+ def get_search_terms(self, request):
+ value = request.query_params.get(self.search_param, "")
+ field = DrfCharField(trim_whitespace=False, allow_blank=True)
+ cleaned_value = field.run_validation(value)
+ return [cleaned_value]
+
+
+@extend_schema_view(
+ get=extend_schema(
+ operation_id="search_person",
+ description="Get a list of persons, matching by partial name or email",
+ ),
+)
+class RpcPersonSearch(generics.ListAPIView):
+ # n.b. the OpenAPI schema for this can be generated by running
+ # ietf/manage.py spectacular --file spectacular.yaml
+ # and extracting / touching up the rpc_person_search_list operation
+ api_key_endpoint = "ietf.api.views_rpc"
+ queryset = Person.objects.all()
+ serializer_class = PersonSerializer
+ pagination_class = RpcLimitOffsetPagination
+
+ # Searchable on all name-like fields or email addresses
+ filter_backends = [SingleTermSearchFilter]
+ search_fields = ["name", "plain", "email__address"]
+
+
+@extend_schema_view(
+ retrieve=extend_schema(
+ operation_id="get_draft_by_id",
+ summary="Get a draft",
+ description="Returns the draft for the requested ID",
+ parameters=[
+ OpenApiParameter(
+ name="doc_id",
+ type=int,
+ location="path",
+ description="Doc ID identifying this draft.",
+ ),
+ ],
+ ),
+ submitted_to_rpc=extend_schema(
+ operation_id="submitted_to_rpc",
+ summary="List documents ready to enter the RFC Editor Queue",
+ description="List documents ready to enter the RFC Editor Queue",
+ responses=SubmittedToQueueSerializer(many=True),
+ ),
+)
+class DraftViewSet(mixins.RetrieveModelMixin, viewsets.GenericViewSet):
+ queryset = Document.objects.filter(type_id="draft")
+ serializer_class = FullDraftSerializer
+ api_key_endpoint = "ietf.api.views_rpc"
+ lookup_url_kwarg = "doc_id"
+
+ @action(detail=False, serializer_class=SubmittedToQueueSerializer)
+ def submitted_to_rpc(self, request):
+ """Return documents in datatracker that have been submitted to the RPC but are not yet in the queue
+
+ Those queries overreturn - there may be things, particularly not from the IETF stream that are already in the queue.
+ """
+ ietf_docs = Q(states__type_id="draft-iesg", states__slug__in=["ann"])
+ irtf_iab_ise_editorial_docs = Q(
+ states__type_id__in=[
+ "draft-stream-iab",
+ "draft-stream-irtf",
+ "draft-stream-ise",
+ "draft-stream-editorial",
+ ],
+ states__slug__in=["rfc-edit"],
+ )
+ docs = (
+ self.get_queryset()
+ .filter(type_id="draft")
+ .filter(ietf_docs | irtf_iab_ise_editorial_docs)
+ )
+ serializer = self.get_serializer(docs, many=True)
+ return Response(serializer.data)
+
+ @extend_schema(
+ operation_id="get_draft_references",
+ summary="Get normative references to I-Ds",
+ description=(
+ "Returns the id and name of each normatively "
+ "referenced Internet-Draft for the given docId"
+ ),
+ parameters=[
+ OpenApiParameter(
+ name="doc_id",
+ type=int,
+ location="path",
+ description="Doc ID identifying this draft.",
+ ),
+ ],
+ responses=ReferenceSerializer(many=True),
+ )
+ @action(detail=True, serializer_class=ReferenceSerializer)
+ def references(self, request, doc_id=None):
+ doc = self.get_object()
+ serializer = self.get_serializer(
+ [
+ reference
+ for reference in doc.related_that_doc("refnorm")
+ if reference.type_id == "draft"
+ ],
+ many=True,
+ )
+ return Response(serializer.data)
+
+ @extend_schema(
+ operation_id="get_draft_authors",
+ summary="Gather authors of the drafts with the given names",
+ description="returns a list mapping draft names to objects describing authors",
+ request=list[str],
+ responses=DraftWithAuthorsSerializer(many=True),
+ )
+ @action(detail=False, methods=["post"], serializer_class=DraftWithAuthorsSerializer)
+ def bulk_authors(self, request):
+ drafts = self.get_queryset().filter(name__in=request.data)
+ serializer = self.get_serializer(drafts, many=True)
+ return Response(serializer.data)
+
+
+@extend_schema_view(
+ rfc_original_stream=extend_schema(
+ operation_id="get_rfc_original_streams",
+ summary="Get the streams RFCs were originally published into",
+ description="returns a list of dicts associating an RFC with its originally published stream",
+ responses=OriginalStreamSerializer(many=True),
+ )
+)
+class RfcViewSet(mixins.UpdateModelMixin, viewsets.GenericViewSet):
+ queryset = Document.objects.filter(type_id="rfc")
+ api_key_endpoint = "ietf.api.views_rpc"
+ lookup_field = "rfc_number"
+ serializer_class = EditableRfcSerializer
+
+ def perform_update(self, serializer):
+ DocEvent.objects.create(
+ doc=serializer.instance,
+ rev=serializer.instance.rev,
+ by=Person.objects.get(name="(System)"),
+ type="sync_from_rfc_editor",
+ desc="Metadata update from RFC Editor",
+ )
+ super().perform_update(serializer)
+
+ @action(detail=False, serializer_class=OriginalStreamSerializer)
+ def rfc_original_stream(self, request):
+ rfcs = self.get_queryset().annotate(
+ orig_stream_id=Coalesce(
+ Subquery(
+ DocHistory.objects.filter(doc=OuterRef("pk"))
+ .exclude(stream__isnull=True)
+ .order_by("time")
+ .values_list("stream_id", flat=True)[:1]
+ ),
+ "stream_id",
+ output_field=ModelCharField(),
+ ),
+ )
+ serializer = self.get_serializer(rfcs, many=True)
+ return Response(serializer.data)
+
+ @extend_schema(
+ operation_id="get_rfc_authors",
+ summary="Gather authors of the RFCs with the given numbers",
+ description="returns a list mapping rfc numbers to objects describing authors",
+ request=list[int],
+ responses=RfcWithAuthorsSerializer(many=True),
+ )
+ @action(detail=False, methods=["post"], serializer_class=RfcWithAuthorsSerializer)
+ def bulk_authors(self, request):
+ rfcs = self.get_queryset().filter(rfc_number__in=request.data)
+ serializer = self.get_serializer(rfcs, many=True)
+ return Response(serializer.data)
+
+
+class DraftsByNamesView(APIView):
+ api_key_endpoint = "ietf.api.views_rpc"
+
+ @extend_schema(
+ operation_id="get_drafts_by_names",
+ summary="Get a batch of drafts by draft names",
+ description="returns a list of drafts with matching names",
+ request=list[str],
+ responses=DraftSerializer(many=True),
+ )
+ def post(self, request):
+ names = request.data
+ docs = Document.objects.filter(type_id="draft", name__in=names)
+ return Response(DraftSerializer(docs, many=True).data)
+
+
+class RfcAuthorViewSet(viewsets.ReadOnlyModelViewSet):
+ """ViewSet for RfcAuthor model
+
+ Router needs to provide rfc_number as a kwarg
+ """
+
+ api_key_endpoint = "ietf.api.views_rpc"
+
+ queryset = RfcAuthor.objects.all()
+ serializer_class = RfcAuthorSerializer
+ lookup_url_kwarg = "author_id"
+ rfc_number_param = "rfc_number"
+
+ def get_queryset(self):
+ return (
+ super()
+ .get_queryset()
+ .filter(
+ document__type_id="rfc",
+ document__rfc_number=self.kwargs[self.rfc_number_param],
+ )
+ )
+
+
+class RfcPubNotificationView(APIView):
+ api_key_endpoint = "ietf.api.views_rpc"
+
+ @extend_schema(
+ operation_id="notify_rfc_published",
+ summary="Notify datatracker of RFC publication",
+ request=RfcPubSerializer,
+ responses=NotificationAckSerializer,
+ )
+ def post(self, request):
+ serializer = RfcPubSerializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+ # Create RFC
+ try:
+ rfc = serializer.save()
+ except IntegrityError as err:
+ if Document.objects.filter(
+ rfc_number=serializer.validated_data["rfc_number"]
+ ):
+ raise serializers.ValidationError(
+ "RFC with that number already exists",
+ code="rfc-number-in-use",
+ )
+ raise serializers.ValidationError(
+ f"Unable to publish: {err}",
+ code="unknown-integrity-error",
+ )
+ rfc_number_list = [rfc.rfc_number]
+ rfc_number_list.extend(
+ [d.rfc_number for d in rfc.related_that_doc(("updates", "obs"))]
+ )
+ rfc_number_list = sorted(set(rfc_number_list))
+ signal_update_rfc_metadata_task.delay(rfc_number_list=rfc_number_list)
+ return Response(NotificationAckSerializer().data)
+
+
+class RfcPubFilesView(APIView):
+ api_key_endpoint = "ietf.api.views_rpc"
+ parser_classes = [parsers.MultiPartParser]
+
+ def _fs_destination(self, filename: str | Path) -> Path:
+ """Destination for an uploaded RFC file in the filesystem
+
+ Strips any path components in filename and returns an absolute Path.
+ """
+ rfc_path = Path(settings.RFC_PATH)
+ filename = Path(filename) # could potentially have directory components
+ extension = "".join(filename.suffixes)
+ if extension == ".notprepped.xml":
+ return rfc_path / "prerelease" / filename.name
+ return rfc_path / filename.name
+
+ def _blob_destination(self, filename: str | Path) -> str:
+ """Destination name for an uploaded RFC file in the blob store
+
+ Strips any path components in filename and returns an absolute Path.
+ """
+ filename = Path(filename) # could potentially have directory components
+ extension = "".join(filename.suffixes)
+ if extension == ".notprepped.xml":
+ file_type = "notprepped"
+ elif extension[0] == ".":
+ file_type = extension[1:]
+ else:
+ raise serializers.ValidationError(
+ f"Extension does not begin with '.'!? ({filename})",
+ )
+ return f"{file_type}/{filename.name}"
+
+ @extend_schema(
+ operation_id="upload_rfc_files",
+ summary="Upload files for a published RFC",
+ request=RfcFileSerializer,
+ responses=NotificationAckSerializer,
+ )
+ def post(self, request):
+ serializer = RfcFileSerializer(
+ # many=True,
+ data=request.data,
+ )
+ serializer.is_valid(raise_exception=True)
+ rfc = serializer.validated_data["rfc"]
+ uploaded_files = serializer.validated_data["contents"] # list[UploadedFile]
+ replace = serializer.validated_data["replace"]
+ dest_stem = f"rfc{rfc.rfc_number}"
+ mtime = serializer.validated_data["mtime"]
+ mtimestamp = mtime.timestamp()
+ blob_kind = "rfc"
+
+ # List of files that might exist for an RFC
+ possible_rfc_files = [
+ self._fs_destination(dest_stem + ext)
+ for ext in serializer.allowed_extensions
+ ]
+ possible_rfc_blobs = [
+ self._blob_destination(dest_stem + ext)
+ for ext in serializer.allowed_extensions
+ ]
+ if not replace:
+ # this is the default: refuse to overwrite anything if not replacing
+ for possible_existing_file in possible_rfc_files:
+ if possible_existing_file.exists():
+ raise Conflict(
+ "File(s) already exist for this RFC",
+ code="files-exist",
+ )
+ for possible_existing_blob in possible_rfc_blobs:
+ if exists_in_storage(kind=blob_kind, name=possible_existing_blob):
+ raise Conflict(
+ "Blob(s) already exist for this RFC",
+ code="blobs-exist",
+ )
+
+ with TemporaryDirectory() as tempdir:
+ # Save files in a temporary directory. Use the uploaded filename
+ # extensions to identify files, but ignore the stems and generate our own.
+ files_to_move = [] # list[Path]
+ tmpfile_stem = Path(tempdir) / dest_stem
+ for upfile in uploaded_files:
+ uploaded_filename = Path(upfile.name) # name supplied by request
+ uploaded_ext = "".join(uploaded_filename.suffixes)
+ tempfile_path = tmpfile_stem.with_suffix(uploaded_ext)
+ with tempfile_path.open("wb") as dest:
+ for chunk in upfile.chunks():
+ dest.write(chunk)
+ os.utime(tempfile_path, (mtimestamp, mtimestamp))
+ files_to_move.append(tempfile_path)
+ # copy files to final location, removing any existing ones first if the
+ # remove flag was set
+ if replace:
+ for possible_existing_file in possible_rfc_files:
+ possible_existing_file.unlink(missing_ok=True)
+ for possible_existing_blob in possible_rfc_blobs:
+ remove_from_storage(
+ blob_kind, possible_existing_blob, warn_if_missing=False
+ )
+ for ftm in files_to_move:
+ with ftm.open("rb") as f:
+ store_file(
+ kind=blob_kind,
+ name=self._blob_destination(ftm),
+ file=f,
+ doc_name=rfc.name,
+ doc_rev=rfc.rev, # expect blank, but match whatever it is
+ mtime=mtime,
+ )
+ destination = self._fs_destination(ftm)
+ if (
+ settings.SERVER_MODE != "production"
+ and not destination.parent.exists()
+ ):
+ destination.parent.mkdir()
+ shutil.move(ftm, destination)
+
+ # Trigger red precomputer
+ needs_updating = [rfc.rfc_number]
+ for rel in rfc.relateddocument_set.filter(
+ relationship_id__in=["obs", "updates"]
+ ):
+ needs_updating.append(rel.target.rfc_number)
+ trigger_red_precomputer_task.delay(rfc_number_list=sorted(needs_updating))
+ # Trigger search index update
+ update_rfc_searchindex_task.delay(rfc.rfc_number)
+ # Trigger reference relation srebuild
+ rebuild_reference_relations_task.delay(doc_names=[rfc.name])
+
+ return Response(NotificationAckSerializer().data)
+
+
+class RfcIndexView(APIView):
+ api_key_endpoint = "ietf.api.views_rpc"
+
+ @extend_schema(
+ operation_id="refresh_rfc_index",
+ summary="Refresh rfc-index files",
+ description="Requests creation of various index files.",
+ responses={202: None},
+ request=None,
+ )
+ def post(self, request):
+ mark_rfcindex_as_dirty()
+ return Response(status=202)
diff --git a/ietf/blobdb/models.py b/ietf/blobdb/models.py
index fa7831f203..27325ada5d 100644
--- a/ietf/blobdb/models.py
+++ b/ietf/blobdb/models.py
@@ -64,6 +64,9 @@ class Meta:
),
]
+ def __str__(self):
+ return f"{self.bucket}:{self.name}"
+
def save(self, **kwargs):
db = get_blobdb()
with transaction.atomic(using=db):
diff --git a/ietf/blobdb/replication.py b/ietf/blobdb/replication.py
index b9d55c9498..d251d3b95c 100644
--- a/ietf/blobdb/replication.py
+++ b/ietf/blobdb/replication.py
@@ -146,11 +146,11 @@ def replicate_blob(bucket, name):
blob = fetch_blob_via_sql(bucket, name)
if blob is None:
if verbose_logging_enabled():
- log.log("Deleting {bucket}:{name} from replica")
+ log.log(f"Deleting {bucket}:{name} from replica")
try:
destination_storage.delete(name)
except Exception as e:
- log.log("Failed to delete {bucket}:{name} from replica: {e}")
+ log.log(f"Failed to delete {bucket}:{name} from replica: {e}")
raise ReplicationError from e
else:
# Add metadata expected by the MetadataS3Storage
@@ -170,7 +170,7 @@ def replicate_blob(bucket, name):
try:
destination_storage.save(name, file_with_metadata)
except Exception as e:
- log.log("Failed to save {bucket}:{name} to replica: {e}")
+ log.log(f"Failed to save {bucket}:{name} to replica: {e}")
raise ReplicationError from e
diff --git a/ietf/community/utils.py b/ietf/community/utils.py
index f23e8d26ab..b6137095ef 100644
--- a/ietf/community/utils.py
+++ b/ietf/community/utils.py
@@ -72,8 +72,10 @@ def docs_matching_community_list_rule(rule):
return docs.filter(group=rule.group_id)
elif rule.rule_type.startswith("state_"):
return docs
- elif rule.rule_type in ["author", "author_rfc"]:
+ elif rule.rule_type == "author":
return docs.filter(documentauthor__person=rule.person)
+ elif rule.rule_type == "author_rfc":
+ return docs.filter(Q(rfcauthor__person=rule.person)|Q(rfcauthor__isnull=True,documentauthor__person=rule.person))
elif rule.rule_type == "ad":
return docs.filter(ad=rule.person)
elif rule.rule_type == "shepherd":
@@ -122,9 +124,16 @@ def community_list_rules_matching_doc(doc):
# author rules
if doc.type_id == "rfc":
+ has_rfcauthors = doc.rfcauthor_set.exists()
rules |= SearchRule.objects.filter(
rule_type="author_rfc",
- person__in=list(Person.objects.filter(documentauthor__document=doc)),
+ person__in=list(
+ Person.objects.filter(
+ Q(rfcauthor__document=doc)
+ if has_rfcauthors
+ else Q(documentauthor__document=doc)
+ )
+ ),
)
else:
rules |= SearchRule.objects.filter(
diff --git a/ietf/doc/admin.py b/ietf/doc/admin.py
index 745536f9a1..0d04e8db3a 100644
--- a/ietf/doc/admin.py
+++ b/ietf/doc/admin.py
@@ -13,8 +13,10 @@
TelechatDocEvent, BallotPositionDocEvent, ReviewRequestDocEvent, InitialReviewDocEvent,
AddedMessageEvent, SubmissionDocEvent, DeletedEvent, EditedAuthorsDocEvent, DocumentURL,
ReviewAssignmentDocEvent, IanaExpertDocEvent, IRSGBallotDocEvent, DocExtResource, DocumentActionHolder,
- BofreqEditorDocEvent, BofreqResponsibleDocEvent, StoredObject )
+ BofreqEditorDocEvent, BofreqResponsibleDocEvent, StoredObject, RfcAuthor,
+ EditedRfcAuthorsDocEvent)
+from ietf.utils.admin import SaferTabularInline
from ietf.utils.validators import validate_external_resource_value
class StateTypeAdmin(admin.ModelAdmin):
@@ -28,17 +30,17 @@ class StateAdmin(admin.ModelAdmin):
filter_horizontal = ["next_states"]
admin.site.register(State, StateAdmin)
-class DocAuthorInline(admin.TabularInline):
+class DocAuthorInline(SaferTabularInline):
model = DocumentAuthor
raw_id_fields = ['person', 'email']
extra = 1
-class DocActionHolderInline(admin.TabularInline):
+class DocActionHolderInline(SaferTabularInline):
model = DocumentActionHolder
raw_id_fields = ['person']
extra = 1
-class RelatedDocumentInline(admin.TabularInline):
+class RelatedDocumentInline(SaferTabularInline):
model = RelatedDocument
fk_name= 'source'
def this(self, instance):
@@ -48,7 +50,7 @@ def this(self, instance):
raw_id_fields = ['target']
extra = 1
-class AdditionalUrlInLine(admin.TabularInline):
+class AdditionalUrlInLine(SaferTabularInline):
model = DocumentURL
fields = ['tag','desc','url',]
extra = 1
@@ -173,6 +175,7 @@ def short_desc(self, obj):
admin.site.register(TelechatDocEvent, DocEventAdmin)
admin.site.register(InitialReviewDocEvent, DocEventAdmin)
admin.site.register(EditedAuthorsDocEvent, DocEventAdmin)
+admin.site.register(EditedRfcAuthorsDocEvent, DocEventAdmin)
admin.site.register(IanaExpertDocEvent, DocEventAdmin)
class BallotPositionDocEventAdmin(DocEventAdmin):
@@ -236,3 +239,11 @@ def is_deleted(self, instance):
admin.site.register(StoredObject, StoredObjectAdmin)
+
+class RfcAuthorAdmin(admin.ModelAdmin):
+ # the email field in the list_display/readonly_fields works through a @property
+ list_display = ['id', 'document', 'titlepage_name', 'person', 'email', 'affiliation', 'country', 'order']
+ search_fields = ['document__name', 'titlepage_name', 'person__name', 'person__email__address', 'affiliation', 'country']
+ raw_id_fields = ["document", "person"]
+ readonly_fields = ["email"]
+admin.site.register(RfcAuthor, RfcAuthorAdmin)
diff --git a/ietf/doc/api.py b/ietf/doc/api.py
new file mode 100644
index 0000000000..73fff6b27f
--- /dev/null
+++ b/ietf/doc/api.py
@@ -0,0 +1,213 @@
+# Copyright The IETF Trust 2024-2026, All Rights Reserved
+"""Doc API implementations"""
+
+from django.db.models import (
+ BooleanField,
+ Count,
+ OuterRef,
+ Prefetch,
+ Q,
+ QuerySet,
+ Subquery,
+)
+from django.db.models.functions import TruncDate
+from django_filters import rest_framework as filters
+from rest_framework import filters as drf_filters
+from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
+from rest_framework.pagination import LimitOffsetPagination
+from rest_framework.viewsets import GenericViewSet
+
+from ietf.group.models import Group
+from ietf.name.models import StreamName, DocTypeName
+from ietf.utils.timezone import RPC_TZINFO
+from .models import (
+ Document,
+ DocEvent,
+ RelatedDocument,
+ DocumentAuthor,
+ SUBSERIES_DOC_TYPE_IDS,
+)
+from .serializers import (
+ RfcMetadataSerializer,
+ RfcStatus,
+ RfcSerializer,
+ SubseriesDocSerializer,
+)
+
+
+class RfcLimitOffsetPagination(LimitOffsetPagination):
+ default_limit = 10
+ max_limit = 500
+
+
+class NumberInFilter(filters.BaseInFilter, filters.NumberFilter):
+ """Filter against a comma-separated list of numbers"""
+ pass
+
+
+class RfcFilter(filters.FilterSet):
+ published = filters.DateFromToRangeFilter()
+ stream = filters.ModelMultipleChoiceFilter(
+ queryset=StreamName.objects.filter(used=True)
+ )
+ number = NumberInFilter(
+ field_name="rfc_number"
+ )
+ group = filters.ModelMultipleChoiceFilter(
+ queryset=Group.objects.all(),
+ field_name="group__acronym",
+ to_field_name="acronym",
+ )
+ area = filters.ModelMultipleChoiceFilter(
+ queryset=Group.objects.areas(),
+ field_name="group__parent__acronym",
+ to_field_name="acronym",
+ )
+ status = filters.MultipleChoiceFilter(
+ choices=[(slug, slug) for slug in RfcStatus.status_slugs],
+ method=RfcStatus.filter,
+ )
+ sort = filters.OrderingFilter(
+ fields=(
+ ("rfc_number", "number"), # ?sort=number / ?sort=-number
+ ("published", "published"), # ?sort=published / ?sort=-published
+ ),
+ )
+
+
+class PrefetchRelatedDocument(Prefetch):
+ """Prefetch via a RelatedDocument
+
+ Prefetches following RelatedDocument relationships to other docs. By default, includes
+ those for which the current RFC is the `source`. If `reverse` is True, includes those
+ for which it is the `target` instead. Defaults to only "rfc" documents.
+ """
+
+ @staticmethod
+ def _get_queryset(relationship_id, reverse, doc_type_ids):
+ """Get queryset to use for the prefetch"""
+ if isinstance(doc_type_ids, str):
+ doc_type_ids = (doc_type_ids,)
+
+ return RelatedDocument.objects.filter(
+ **{
+ "relationship_id": relationship_id,
+ f"{'source' if reverse else 'target'}__type_id__in": doc_type_ids,
+ }
+ ).select_related("source" if reverse else "target")
+
+ def __init__(self, to_attr, relationship_id, reverse=False, doc_type_ids="rfc"):
+ super().__init__(
+ lookup="targets_related" if reverse else "relateddocument_set",
+ queryset=self._get_queryset(relationship_id, reverse, doc_type_ids),
+ to_attr=to_attr,
+ )
+
+
+def augment_rfc_queryset(queryset: QuerySet[Document]):
+ return (
+ queryset.select_related("std_level", "stream")
+ .prefetch_related(
+ Prefetch(
+ "group",
+ Group.objects.select_related("parent"),
+ ),
+ Prefetch(
+ "documentauthor_set",
+ DocumentAuthor.objects.select_related("email", "person"),
+ ),
+ PrefetchRelatedDocument(
+ to_attr="drafts",
+ relationship_id="became_rfc",
+ doc_type_ids="draft",
+ reverse=True,
+ ),
+ PrefetchRelatedDocument(to_attr="obsoletes", relationship_id="obs"),
+ PrefetchRelatedDocument(
+ to_attr="obsoleted_by", relationship_id="obs", reverse=True
+ ),
+ PrefetchRelatedDocument(to_attr="updates", relationship_id="updates"),
+ PrefetchRelatedDocument(
+ to_attr="updated_by", relationship_id="updates", reverse=True
+ ),
+ PrefetchRelatedDocument(
+ to_attr="subseries",
+ relationship_id="contains",
+ reverse=True,
+ doc_type_ids=SUBSERIES_DOC_TYPE_IDS,
+ ),
+ )
+ .annotate(
+ published_datetime=Subquery(
+ DocEvent.objects.filter(
+ doc_id=OuterRef("pk"),
+ type="published_rfc",
+ )
+ .order_by("-time")
+ .values("time")[:1]
+ ),
+ )
+ .annotate(published=TruncDate("published_datetime", tzinfo=RPC_TZINFO))
+ .annotate(
+ # Count of "verified-errata" tags will be 1 or 0, convert to Boolean
+ has_errata=Count(
+ "tags",
+ filter=Q(
+ tags__slug="verified-errata",
+ ),
+ output_field=BooleanField(),
+ )
+ )
+ )
+
+
+class RfcViewSet(ListModelMixin, RetrieveModelMixin, GenericViewSet):
+ api_key_endpoint = "ietf.api.red_api" # matches prefix in ietf/api/urls.py
+ lookup_field = "rfc_number"
+ queryset = augment_rfc_queryset(
+ Document.objects.filter(type_id="rfc", rfc_number__isnull=False)
+ ).order_by("-rfc_number")
+
+ pagination_class = RfcLimitOffsetPagination
+ filter_backends = [filters.DjangoFilterBackend, drf_filters.SearchFilter]
+ filterset_class = RfcFilter
+ search_fields = ["title", "abstract"]
+
+ def get_serializer_class(self):
+ if self.action == "retrieve":
+ return RfcSerializer
+ return RfcMetadataSerializer
+
+
+class PrefetchSubseriesContents(Prefetch):
+ def __init__(self, to_attr):
+ super().__init__(
+ lookup="relateddocument_set",
+ queryset=RelatedDocument.objects.filter(
+ relationship_id="contains",
+ target__type_id="rfc",
+ ).prefetch_related(
+ Prefetch(
+ "target",
+ queryset=augment_rfc_queryset(Document.objects.all()),
+ )
+ ),
+ to_attr=to_attr,
+ )
+
+
+class SubseriesFilter(filters.FilterSet):
+ type = filters.ModelMultipleChoiceFilter(
+ queryset=DocTypeName.objects.filter(pk__in=SUBSERIES_DOC_TYPE_IDS)
+ )
+
+
+class SubseriesViewSet(ListModelMixin, RetrieveModelMixin, GenericViewSet):
+ api_key_endpoint = "ietf.api.red_api" # matches prefix in ietf/api/urls.py
+ lookup_field = "name"
+ serializer_class = SubseriesDocSerializer
+ queryset = Document.objects.subseries_docs().prefetch_related(
+ PrefetchSubseriesContents(to_attr="contents")
+ )
+ filter_backends = [filters.DjangoFilterBackend]
+ filterset_class = SubseriesFilter
diff --git a/ietf/doc/expire.py b/ietf/doc/expire.py
index bf8523aa98..d42af628f8 100644
--- a/ietf/doc/expire.py
+++ b/ietf/doc/expire.py
@@ -38,22 +38,46 @@ def expirable_drafts(queryset=None):
# Populate this first time through (but after django has been set up)
if nonexpirable_states is None:
# all IESG states except I-D Exists and Dead block expiry
- nonexpirable_states = list(State.objects.filter(used=True, type="draft-iesg").exclude(slug__in=("idexists", "dead")))
+ nonexpirable_states = list(
+ State.objects.filter(used=True, type="draft-iesg").exclude(
+ slug__in=("idexists", "dead")
+ )
+ )
# sent to RFC Editor and RFC Published block expiry (the latter
# shouldn't be possible for an active draft, though)
- nonexpirable_states += list(State.objects.filter(used=True, type__in=("draft-stream-iab", "draft-stream-irtf", "draft-stream-ise"), slug__in=("rfc-edit", "pub")))
+ nonexpirable_states += list(
+ State.objects.filter(
+ used=True,
+ type__in=(
+ "draft-stream-iab",
+ "draft-stream-irtf",
+ "draft-stream-ise",
+ "draft-stream-editorial",
+ ),
+ slug__in=("rfc-edit", "pub"),
+ )
+ )
# other IRTF states that block expiration
- nonexpirable_states += list(State.objects.filter(used=True, type_id="draft-stream-irtf", slug__in=("irsgpoll", "iesg-rev",)))
-
- return queryset.filter(
- states__type="draft", states__slug="active"
- ).exclude(
- expires=None
- ).exclude(
- states__in=nonexpirable_states
- ).exclude(
- tags="rfc-rev" # under review by the RFC Editor blocks expiry
- ).distinct()
+ nonexpirable_states += list(
+ State.objects.filter(
+ used=True,
+ type_id="draft-stream-irtf",
+ slug__in=(
+ "irsgpoll",
+ "iesg-rev",
+ ),
+ )
+ )
+
+ return (
+ queryset.filter(states__type="draft", states__slug="active")
+ .exclude(expires=None)
+ .exclude(states__in=nonexpirable_states)
+ .exclude(
+ tags="rfc-rev" # under review by the RFC Editor blocks expiry
+ )
+ .distinct()
+ )
def get_soon_to_expire_drafts(days_of_warning):
diff --git a/ietf/doc/factories.py b/ietf/doc/factories.py
index 19aa9ecc9c..1a178c6f31 100644
--- a/ietf/doc/factories.py
+++ b/ietf/doc/factories.py
@@ -14,7 +14,7 @@
from ietf.doc.models import ( Document, DocEvent, NewRevisionDocEvent, State, DocumentAuthor,
StateDocEvent, BallotPositionDocEvent, BallotDocEvent, BallotType, IRSGBallotDocEvent, TelechatDocEvent,
- DocumentActionHolder, BofreqEditorDocEvent, BofreqResponsibleDocEvent, DocExtResource )
+ DocumentActionHolder, BofreqEditorDocEvent, BofreqResponsibleDocEvent, DocExtResource, RfcAuthor )
from ietf.group.models import Group
from ietf.person.factories import PersonFactory
from ietf.group.factories import RoleFactory
@@ -311,6 +311,12 @@ class Meta:
def desc(self):
return 'New version available %s-%s'%(self.doc.name,self.rev)
+class PublishedRfcDocEventFactory(DocEventFactory):
+ class Meta:
+ model = DocEvent
+ type = "published_rfc"
+ doc = factory.SubFactory(WgRfcFactory)
+
class StateDocEventFactory(DocEventFactory):
class Meta:
model = StateDocEvent
@@ -382,6 +388,18 @@ class Meta:
country = factory.Faker('country')
order = factory.LazyAttribute(lambda o: o.document.documentauthor_set.count() + 1)
+class RfcAuthorFactory(factory.django.DjangoModelFactory):
+ class Meta:
+ model = RfcAuthor
+
+ document = factory.SubFactory(DocumentFactory)
+ titlepage_name = factory.LazyAttribute(
+ lambda obj: " ".join([obj.person.initials(), obj.person.last_name()])
+ )
+ person = factory.SubFactory('ietf.person.factories.PersonFactory')
+ affiliation = factory.Faker('company')
+ order = factory.LazyAttribute(lambda o: o.document.rfcauthor_set.count() + 1)
+
class WgDocumentAuthorFactory(DocumentAuthorFactory):
document = factory.SubFactory(WgDraftFactory)
diff --git a/ietf/doc/feeds.py b/ietf/doc/feeds.py
index 500ed3cb18..0269906fcf 100644
--- a/ietf/doc/feeds.py
+++ b/ietf/doc/feeds.py
@@ -1,11 +1,11 @@
-# Copyright The IETF Trust 2007-2020, All Rights Reserved
-# -*- coding: utf-8 -*-
+# Copyright The IETF Trust 2007-2026, All Rights Reserved
import debug # pyflakes:ignore
import datetime
import unicodedata
+from django.conf import settings
from django.contrib.syndication.views import Feed, FeedDoesNotExist
from django.utils.feedgenerator import Atom1Feed, Rss201rev2Feed
from django.urls import reverse as urlreverse
@@ -224,7 +224,7 @@ def item_extra_kwargs(self, item):
extra.update({"dcterms_accessRights": "gratis"})
extra.update({"dcterms_format": "text/html"})
media_contents = []
- if item.rfc_number < 8650:
+ if item.rfc_number < settings.FIRST_V3_RFC:
if item.rfc_number not in [8, 9, 51, 418, 500, 530, 589]:
for fmt, media_type in [("txt", "text/plain"), ("html", "text/html")]:
media_contents.append(
@@ -263,9 +263,11 @@ def item_extra_kwargs(self, item):
)
extra.update({"media_contents": media_contents})
- extra.update({"doi": "10.17487/%s" % item.name.upper()})
extra.update(
- {"doiuri": "http://dx.doi.org/10.17487/%s" % item.name.upper()}
+ {
+ "doi": item.doi,
+ "doiuri": f"https://doi.org/{item.doi}",
+ }
)
# R104 Publisher (Mandatory - but we need a string from them first)
diff --git a/ietf/doc/mails.py b/ietf/doc/mails.py
index f20d398c3c..ddecbb6b54 100644
--- a/ietf/doc/mails.py
+++ b/ietf/doc/mails.py
@@ -103,61 +103,6 @@ def email_stream_changed(request, doc, old_stream, new_stream, text=""):
dict(text=text,
url=settings.IDTRACKER_BASE_URL + doc.get_absolute_url()),
cc=cc)
-
-def email_wg_call_for_adoption_issued(request, doc, cfa_duration_weeks=None):
- if cfa_duration_weeks is None:
- cfa_duration_weeks=2
- (to, cc) = gather_address_lists("doc_wg_call_for_adoption_issued", doc=doc)
- frm = request.user.person.formatted_email()
-
- end_date = date_today(DEADLINE_TZINFO) + datetime.timedelta(days=7 * cfa_duration_weeks)
-
- subject = f"Call for adoption: {doc.name}-{doc.rev} (Ends {end_date})"
-
- send_mail(
- request,
- to,
- frm,
- subject,
- "doc/mail/wg_call_for_adoption_issued.txt",
- dict(
- doc=doc,
- subject=subject,
- url=settings.IDTRACKER_BASE_URL + doc.get_absolute_url(),
- end_date=end_date,
- cfa_duration_weeks=cfa_duration_weeks,
- wg_list=doc.group.list_email,
- ),
- cc=cc,
- )
-
-
-def email_wg_last_call_issued(request, doc, wglc_duration_weeks=None):
- if wglc_duration_weeks is None:
- wglc_duration_weeks = 2
- (to, cc) = gather_address_lists("doc_wg_last_call_issued", doc=doc)
- frm = request.user.person.formatted_email()
-
-
- end_date = date_today(DEADLINE_TZINFO) + datetime.timedelta(days=7 * wglc_duration_weeks)
- subject = f"WG Last Call: {doc.name}-{doc.rev} (Ends {end_date})"
-
- send_mail(
- request,
- to,
- frm,
- subject,
- "doc/mail/wg_last_call_issued.txt",
- dict(
- doc=doc,
- subject=subject,
- url=settings.IDTRACKER_BASE_URL + doc.get_absolute_url(),
- end_date=end_date,
- wglc_duration_weeks=wglc_duration_weeks,
- wg_list=doc.group.list_email,
- ),
- cc=cc,
- )
def email_pulled_from_rfc_queue(request, doc, comment, prev_state, next_state):
extra=extra_automation_headers(doc)
diff --git a/ietf/doc/management/commands/reset_rfc_authors.py b/ietf/doc/management/commands/reset_rfc_authors.py
deleted file mode 100644
index e2ab5f1208..0000000000
--- a/ietf/doc/management/commands/reset_rfc_authors.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright The IETF Trust 2024, All Rights Reserved
-
-# Reset an RFC's authors to those of the draft it came from
-from django.core.management.base import BaseCommand, CommandError
-
-from ietf.doc.models import Document, DocEvent
-from ietf.person.models import Person
-
-
-class Command(BaseCommand):
- def add_arguments(self, parser):
- parser.add_argument("rfcnum", type=int, help="RFC number to modify")
- parser.add_argument(
- "--force",
- action="store_true",
- help="reset even if RFC already has authors",
- )
-
- def handle(self, *args, **options):
- try:
- rfc = Document.objects.get(type="rfc", rfc_number=options["rfcnum"])
- except Document.DoesNotExist:
- raise CommandError(
- f"rfc{options['rfcnum']} does not exist in the Datatracker."
- )
-
- draft = rfc.came_from_draft()
- if draft is None:
- raise CommandError(f"{rfc.name} did not come from a draft. Can't reset.")
-
- orig_authors = rfc.documentauthor_set.all()
- if orig_authors.exists():
- # Potentially dangerous, so refuse unless "--force" is specified
- if not options["force"]:
- raise CommandError(
- f"{rfc.name} already has authors. Not resetting. Use '--force' to reset anyway."
- )
- removed_auth_names = list(orig_authors.values_list("person__name", flat=True))
- rfc.documentauthor_set.all().delete()
- DocEvent.objects.create(
- doc=rfc,
- by=Person.objects.get(name="(System)"),
- type="edited_authors",
- desc=f"Removed all authors: {', '.join(removed_auth_names)}",
- )
- self.stdout.write(
- self.style.SUCCESS(
- f"Removed author(s): {', '.join(removed_auth_names)}"
- )
- )
-
- for author in draft.documentauthor_set.all():
- # Copy the author but point at the new doc.
- # See https://docs.djangoproject.com/en/4.2/topics/db/queries/#copying-model-instances
- author.pk = None
- author.id = None
- author._state.adding = True
- author.document = rfc
- author.save()
- self.stdout.write(
- self.style.SUCCESS(f"Added author {author.person.name} <{author.email}>")
- )
- auth_names = draft.documentauthor_set.values_list("person__name", flat=True)
- DocEvent.objects.create(
- doc=rfc,
- by=Person.objects.get(name="(System)"),
- type="edited_authors",
- desc=f"Set authors from rev {draft.rev} of {draft.name}: {', '.join(auth_names)}",
- )
diff --git a/ietf/doc/management/commands/tests.py b/ietf/doc/management/commands/tests.py
deleted file mode 100644
index 8244d87266..0000000000
--- a/ietf/doc/management/commands/tests.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright The IETF Trust 2024, All Rights Reserved
-# -*- coding: utf-8 -*-
-
-from io import StringIO
-
-from django.core.management import call_command, CommandError
-
-from ietf.doc.factories import DocumentAuthorFactory, WgDraftFactory, WgRfcFactory
-from ietf.doc.models import Document, DocumentAuthor
-from ietf.utils.test_utils import TestCase
-
-
-class CommandTests(TestCase):
- @staticmethod
- def _call_command(command_name, *args, **options):
- """Call command, capturing (and suppressing) output"""
- out = StringIO()
- err = StringIO()
- options["stdout"] = out
- options["stderr"] = err
- call_command(command_name, *args, **options)
- return out.getvalue(), err.getvalue()
-
- def test_reset_rfc_authors(self):
- command_name = "reset_rfc_authors"
-
- draft = WgDraftFactory()
- DocumentAuthorFactory.create_batch(3, document=draft)
- rfc = WgRfcFactory() # rfc does not yet have a draft
- DocumentAuthorFactory.create_batch(3, document=rfc)
- bad_rfc_num = (
- 1
- + Document.objects.filter(rfc_number__isnull=False)
- .order_by("-rfc_number")
- .first()
- .rfc_number
- )
- docauthor_fields = [
- field.name
- for field in DocumentAuthor._meta.get_fields()
- if field.name not in ["document", "id"]
- ]
-
- with self.assertRaises(CommandError, msg="Cannot reset a bad RFC number"):
- self._call_command(command_name, bad_rfc_num)
-
- with self.assertRaises(CommandError, msg="Cannot reset an RFC with no draft"):
- self._call_command(command_name, rfc.rfc_number)
-
- with self.assertRaises(CommandError, msg="Cannot force-reset an RFC with no draft"):
- self._call_command(command_name, rfc.rfc_number, "--force")
-
- # Link the draft to the rfc
- rfc.targets_related.create(relationship_id="became_rfc", source=draft)
-
- with self.assertRaises(CommandError, msg="Cannot reset an RFC with authors"):
- self._call_command(command_name, rfc.rfc_number)
-
- # Calling with force should work
- self._call_command(command_name, rfc.rfc_number, "--force")
- self.assertCountEqual(
- draft.documentauthor_set.values(*docauthor_fields),
- rfc.documentauthor_set.values(*docauthor_fields),
- )
-
- # Calling on an RFC with no authors should also work
- rfc.documentauthor_set.all().delete()
- self._call_command(command_name, rfc.rfc_number)
- self.assertCountEqual(
- draft.documentauthor_set.values(*docauthor_fields),
- rfc.documentauthor_set.values(*docauthor_fields),
- )
diff --git a/ietf/doc/migrations/0027_alter_dochistory_title_alter_document_title.py b/ietf/doc/migrations/0027_alter_dochistory_title_alter_document_title.py
new file mode 100644
index 0000000000..e0d8560e6f
--- /dev/null
+++ b/ietf/doc/migrations/0027_alter_dochistory_title_alter_document_title.py
@@ -0,0 +1,41 @@
+# Copyright The IETF Trust 2025, All Rights Reserved
+
+import django.core.validators
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("doc", "0026_change_wg_state_descriptions"),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name="dochistory",
+ name="title",
+ field=models.CharField(
+ max_length=255,
+ validators=[
+ django.core.validators.ProhibitNullCharactersValidator, # type:ignore
+ django.core.validators.RegexValidator(
+ message="Please enter a string without control characters.",
+ regex="^[^\x01-\x1f]*$",
+ ),
+ ],
+ ),
+ ),
+ migrations.AlterField(
+ model_name="document",
+ name="title",
+ field=models.CharField(
+ max_length=255,
+ validators=[
+ django.core.validators.ProhibitNullCharactersValidator, # type:ignore
+ django.core.validators.RegexValidator(
+ message="Please enter a string without control characters.",
+ regex="^[^\x01-\x1f]*$",
+ ),
+ ],
+ ),
+ ),
+ ]
diff --git a/ietf/doc/migrations/0028_rfcauthor.py b/ietf/doc/migrations/0028_rfcauthor.py
new file mode 100644
index 0000000000..776dc22eb1
--- /dev/null
+++ b/ietf/doc/migrations/0028_rfcauthor.py
@@ -0,0 +1,84 @@
+# Copyright The IETF Trust 2025, All Rights Reserved
+
+from django.db import migrations, models
+import django.db.models.deletion
+import ietf.utils.models
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("person", "0005_alter_historicalperson_pronouns_selectable_and_more"),
+ ("doc", "0027_alter_dochistory_title_alter_document_title"),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name="RfcAuthor",
+ fields=[
+ (
+ "id",
+ models.AutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
+ ("titlepage_name", models.CharField(max_length=128)),
+ ("is_editor", models.BooleanField(default=False)),
+ (
+ "affiliation",
+ models.CharField(
+ blank=True,
+ help_text="Organization/company used by author for submission",
+ max_length=100,
+ ),
+ ),
+ (
+ "country",
+ models.CharField(
+ blank=True,
+ help_text="Country used by author for submission",
+ max_length=255,
+ ),
+ ),
+ ("order", models.IntegerField(default=1)),
+ (
+ "document",
+ ietf.utils.models.ForeignKey(
+ limit_choices_to={"type_id": "rfc"},
+ on_delete=django.db.models.deletion.CASCADE,
+ to="doc.document",
+ ),
+ ),
+ (
+ "email",
+ ietf.utils.models.ForeignKey(
+ blank=True,
+ help_text="Email address used by author for submission",
+ null=True,
+ on_delete=django.db.models.deletion.PROTECT,
+ to="person.email",
+ ),
+ ),
+ (
+ "person",
+ ietf.utils.models.ForeignKey(
+ blank=True,
+ null=True,
+ on_delete=django.db.models.deletion.PROTECT,
+ to="person.person",
+ ),
+ ),
+ ],
+ options={
+ "ordering": ["document", "order"],
+ "indexes": [
+ models.Index(
+ fields=["document", "order"],
+ name="doc_rfcauth_documen_6b5dc4_idx",
+ )
+ ],
+ },
+ ),
+ ]
diff --git a/ietf/doc/migrations/0029_editedrfcauthorsdocevent.py b/ietf/doc/migrations/0029_editedrfcauthorsdocevent.py
new file mode 100644
index 0000000000..60837c5cb2
--- /dev/null
+++ b/ietf/doc/migrations/0029_editedrfcauthorsdocevent.py
@@ -0,0 +1,30 @@
+# Copyright The IETF Trust 2025, All Rights Reserved
+
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("doc", "0028_rfcauthor"),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name="EditedRfcAuthorsDocEvent",
+ fields=[
+ (
+ "docevent_ptr",
+ models.OneToOneField(
+ auto_created=True,
+ on_delete=django.db.models.deletion.CASCADE,
+ parent_link=True,
+ primary_key=True,
+ serialize=False,
+ to="doc.docevent",
+ ),
+ ),
+ ],
+ bases=("doc.docevent",),
+ ),
+ ]
diff --git a/ietf/doc/migrations/0030_alter_dochistory_title_alter_document_title.py b/ietf/doc/migrations/0030_alter_dochistory_title_alter_document_title.py
new file mode 100644
index 0000000000..9ee858b2e8
--- /dev/null
+++ b/ietf/doc/migrations/0030_alter_dochistory_title_alter_document_title.py
@@ -0,0 +1,41 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+
+import django.core.validators
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("doc", "0029_editedrfcauthorsdocevent"),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name="dochistory",
+ name="title",
+ field=models.CharField(
+ max_length=255,
+ validators=[
+ django.core.validators.ProhibitNullCharactersValidator(),
+ django.core.validators.RegexValidator(
+ message="Please enter a string without control characters.",
+ regex="^[^\x01-\x1f]*$",
+ ),
+ ],
+ ),
+ ),
+ migrations.AlterField(
+ model_name="document",
+ name="title",
+ field=models.CharField(
+ max_length=255,
+ validators=[
+ django.core.validators.ProhibitNullCharactersValidator(),
+ django.core.validators.RegexValidator(
+ message="Please enter a string without control characters.",
+ regex="^[^\x01-\x1f]*$",
+ ),
+ ],
+ ),
+ ),
+ ]
diff --git a/ietf/doc/migrations/0031_change_draft_stream_ietf_state_descriptions.py b/ietf/doc/migrations/0031_change_draft_stream_ietf_state_descriptions.py
new file mode 100644
index 0000000000..c664126da3
--- /dev/null
+++ b/ietf/doc/migrations/0031_change_draft_stream_ietf_state_descriptions.py
@@ -0,0 +1,57 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+
+from django.db import migrations
+
+
+def forward(apps, schema_editor):
+ State = apps.get_model("doc", "State")
+ for name, desc in [
+ (
+ "Adopted by a WG",
+ "The individual submission document has been adopted by the Working Group (WG), but some administrative matter still needs to be completed (e.g., a WG document replacing this document with the typical naming convention of 'draft-ietf-wgname-topic-nn' has not yet been submitted).",
+ ),
+ (
+ "WG Document",
+ "The document has been identified as a Working Group (WG) document and is under development per Section 7.2 of RFC2418.",
+ ),
+ (
+ "Waiting for WG Chair Go-Ahead",
+ "The Working Group (WG) document has completed Working Group Last Call (WGLC), but the WG chairs are not yet ready to call consensus on the document. The reasons for this may include comments from the WGLC need to be responded to, or a revision to the document is needed.",
+ ),
+ (
+ "Submitted to IESG for Publication",
+ "The Working Group (WG) document has been submitted to the Internet Engineering Steering Group (IESG) for evaluation and publication per Section 7.4 of RFC2418. See the “IESG State” or “RFC Editor State” for further details on the state of the document.",
+ ),
+ ]:
+ State.objects.filter(name=name).update(desc=desc, type="draft-stream-ietf")
+
+
+def reverse(apps, schema_editor):
+ State = apps.get_model("doc", "State")
+ for name, desc in [
+ (
+ "Adopted by a WG",
+ "The individual submission document has been adopted by the Working Group (WG), but a WG document replacing this document with the typical naming convention of 'draft- ietf-wgname-topic-nn' has not yet been submitted.",
+ ),
+ (
+ "WG Document",
+ "The document has been adopted by the Working Group (WG) and is under development. A document can only be adopted by one WG at a time. However, a document may be transferred between WGs.",
+ ),
+ (
+ "Waiting for WG Chair Go-Ahead",
+ "The Working Group (WG) document has completed Working Group Last Call (WGLC), but the WG chair(s) are not yet ready to call consensus on the document. The reasons for this may include comments from the WGLC need to be responded to, or a revision to the document is needed",
+ ),
+ (
+ "Submitted to IESG for Publication",
+ "The Working Group (WG) document has left the WG and been submitted to the Internet Engineering Steering Group (IESG) for evaluation and publication. See the “IESG State” or “RFC Editor State” for further details on the state of the document.",
+ ),
+ ]:
+ State.objects.filter(name=name).update(desc=desc, type="draft-stream-ietf")
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("doc", "0030_alter_dochistory_title_alter_document_title"),
+ ]
+
+ operations = [migrations.RunPython(forward, reverse)]
diff --git a/ietf/doc/migrations/0032_remove_rfcauthor_email.py b/ietf/doc/migrations/0032_remove_rfcauthor_email.py
new file mode 100644
index 0000000000..a0e147da59
--- /dev/null
+++ b/ietf/doc/migrations/0032_remove_rfcauthor_email.py
@@ -0,0 +1,16 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+
+from django.db import migrations
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("doc", "0031_change_draft_stream_ietf_state_descriptions"),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name="rfcauthor",
+ name="email",
+ ),
+ ]
diff --git a/ietf/doc/migrations/0033_dochistory_keywords_document_keywords.py b/ietf/doc/migrations/0033_dochistory_keywords_document_keywords.py
new file mode 100644
index 0000000000..5e2513e15a
--- /dev/null
+++ b/ietf/doc/migrations/0033_dochistory_keywords_document_keywords.py
@@ -0,0 +1,31 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+
+from django.db import migrations, models
+import ietf.doc.models
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("doc", "0032_remove_rfcauthor_email"),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name="dochistory",
+ name="keywords",
+ field=models.JSONField(
+ default=list,
+ max_length=1000,
+ validators=[ietf.doc.models.validate_doc_keywords],
+ ),
+ ),
+ migrations.AddField(
+ model_name="document",
+ name="keywords",
+ field=models.JSONField(
+ default=list,
+ max_length=1000,
+ validators=[ietf.doc.models.validate_doc_keywords],
+ ),
+ ),
+ ]
diff --git a/ietf/doc/models.py b/ietf/doc/models.py
index 8bb79b64ed..cc79b73831 100644
--- a/ietf/doc/models.py
+++ b/ietf/doc/models.py
@@ -1,7 +1,8 @@
-# Copyright The IETF Trust 2010-2025, All Rights Reserved
+# Copyright The IETF Trust 2010-2026, All Rights Reserved
# -*- coding: utf-8 -*-
+from collections import namedtuple
import datetime
import logging
import os
@@ -11,6 +12,9 @@
from io import BufferedReader
from pathlib import Path
+
+from django.core.exceptions import ValidationError
+from django.db.models import Q
from lxml import etree
from typing import Optional, Protocol, TYPE_CHECKING, Union
from weasyprint import HTML as wpHTML
@@ -20,7 +24,11 @@
from django.core import checks
from django.core.files.base import File
from django.core.cache import caches
-from django.core.validators import URLValidator, RegexValidator
+from django.core.validators import (
+ URLValidator,
+ RegexValidator,
+ ProhibitNullCharactersValidator,
+)
from django.urls import reverse as urlreverse
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
@@ -44,6 +52,7 @@
from ietf.person.utils import get_active_balloters
from ietf.utils import log
from ietf.utils.decorators import memoize
+from ietf.utils.text import decode_document_content
from ietf.utils.validators import validate_no_control_chars
from ietf.utils.mail import formataddr
from ietf.utils.models import ForeignKey
@@ -102,12 +111,27 @@ class Meta:
IESG_STATCHG_CONFLREV_ACTIVE_STATES = ("iesgeval", "defer")
IESG_SUBSTATE_TAGS = ('ad-f-up', 'need-rev', 'extpty')
+
+def validate_doc_keywords(value):
+ if (
+ not isinstance(value, list | tuple | set)
+ or not all(isinstance(elt, str) for elt in value)
+ ):
+ raise ValidationError("Value must be an array of strings")
+
+
class DocumentInfo(models.Model):
"""Any kind of document. Draft, RFC, Charter, IPR Statement, Liaison Statement"""
time = models.DateTimeField(default=timezone.now) # should probably have auto_now=True
type = ForeignKey(DocTypeName, blank=True, null=True) # Draft, Agenda, Minutes, Charter, Discuss, Guideline, Email, Review, Issue, Wiki, External ...
- title = models.CharField(max_length=255, validators=[validate_no_control_chars, ])
+ title = models.CharField(
+ max_length=255,
+ validators=[
+ ProhibitNullCharactersValidator(),
+ validate_no_control_chars,
+ ],
+ )
states = models.ManyToManyField(State, blank=True) # plain state (Active/Expired/...), IESG state, stream state
tags = models.ManyToManyField(DocTagName, blank=True) # Revised ID Needed, ExternalParty, AD Followup, ...
@@ -129,6 +153,17 @@ class DocumentInfo(models.Model):
uploaded_filename = models.TextField(blank=True)
note = models.TextField(blank=True)
rfc_number = models.PositiveIntegerField(blank=True, null=True) # only valid for type="rfc"
+ keywords = models.JSONField(
+ default=list,
+ max_length=1000,
+ validators=[validate_doc_keywords],
+ )
+
+ @property
+ def doi(self) -> str | None:
+ if self.type_id == "rfc" and self.rfc_number is not None:
+ return f"{settings.IETF_DOI_PREFIX}/RFC{self.rfc_number:04d}"
+ return None
def file_extension(self):
if not hasattr(self, '_cached_extension'):
@@ -228,14 +263,14 @@ def revisions_by_newrevisionevent(self):
return revisions
def get_href(self, meeting=None):
- return self._get_ref(meeting=meeting,meeting_doc_refs=settings.MEETING_DOC_HREFS)
+ return self._get_ref(meeting=meeting, versioned=True)
def get_versionless_href(self, meeting=None):
- return self._get_ref(meeting=meeting,meeting_doc_refs=settings.MEETING_DOC_GREFS)
+ return self._get_ref(meeting=meeting, versioned=False)
- def _get_ref(self, meeting=None, meeting_doc_refs=settings.MEETING_DOC_HREFS):
+ def _get_ref(self, meeting=None, versioned=True):
"""
Returns an url to the document text. This differs from .get_absolute_url(),
which returns an url to the datatracker page for the document.
@@ -244,12 +279,16 @@ def _get_ref(self, meeting=None, meeting_doc_refs=settings.MEETING_DOC_HREFS):
# the earlier resolution order, but there's at the moment one single
# instance which matches this (with correct results), so we won't
# break things all over the place.
- if not hasattr(self, '_cached_href'):
+ cache_attr = "_cached_href" if versioned else "_cached_versionless_href"
+ if not hasattr(self, cache_attr):
validator = URLValidator()
if self.external_url and self.external_url.split(':')[0] in validator.schemes:
validator(self.external_url)
return self.external_url
+ meeting_doc_refs = (
+ settings.MEETING_DOC_HREFS if versioned else settings.MEETING_DOC_GREFS
+ )
if self.type_id in settings.DOC_HREFS and self.type_id in meeting_doc_refs:
if self.meeting_related():
self.is_meeting_related = True
@@ -301,8 +340,8 @@ def _get_ref(self, meeting=None, meeting_doc_refs=settings.MEETING_DOC_HREFS):
if href.startswith('/'):
href = settings.IDTRACKER_BASE_URL + href
- self._cached_href = href
- return self._cached_href
+ setattr(self, cache_attr, href)
+ return getattr(self, cache_attr)
def set_state(self, state):
"""Switch state type implicit in state to state. This just
@@ -407,9 +446,56 @@ def friendly_state(self):
else:
return state.name
+ def author_names(self):
+ """Author names as a list of strings"""
+ names = []
+ if self.type_id == "rfc" and self.rfcauthor_set.exists():
+ for author in self.rfcauthor_set.select_related("person"):
+ if author.person:
+ names.append(author.person.name)
+ else:
+ # titlepage_name cannot be blank
+ names.append(author.titlepage_name)
+ else:
+ names = [
+ author.person.name
+ for author in self.documentauthor_set.select_related("person")
+ ]
+ return names
+
+ def author_persons_or_names(self):
+ """Authors as a list of named tuples with person and/or titlepage_name"""
+ Author = namedtuple("Author", "person titlepage_name")
+ persons_or_names = []
+ if self.type_id=="rfc" and self.rfcauthor_set.exists():
+ for author in self.rfcauthor_set.select_related("person"):
+ persons_or_names.append(Author(person=author.person, titlepage_name=author.titlepage_name))
+ else:
+ for author in self.documentauthor_set.select_related("person"):
+ persons_or_names.append(Author(person=author.person, titlepage_name=""))
+ return persons_or_names
+
+ def author_persons(self):
+ """Authors as a list of Persons
+
+ Omits any RfcAuthors with a null person field.
+ """
+ if self.type_id == "rfc" and self.rfcauthor_set.exists():
+ authors_qs = self.rfcauthor_set.filter(person__isnull=False)
+ else:
+ authors_qs = self.documentauthor_set.all()
+ return [a.person for a in authors_qs.select_related("person")]
+
def author_list(self):
+ """List of author emails"""
+ if self.type_id == "rfc" and self.rfcauthor_set.exists():
+ author_qs = self.rfcauthor_set.select_related("person").order_by("order")
+ else:
+ author_qs = self.documentauthor_set.select_related("email").order_by(
+ "order"
+ )
best_addresses = []
- for author in self.documentauthor_set.all():
+ for author in author_qs:
if author.email:
if author.email.active or not author.email.person:
best_addresses.append(author.email.address)
@@ -417,9 +503,6 @@ def author_list(self):
best_addresses.append(author.email.person.email_address())
return ", ".join(best_addresses)
- def authors(self):
- return [ a.person for a in self.documentauthor_set.all() ]
-
# This, and several other ballot related functions here, assume that there is only one active ballot for a document at any point in time.
# If that assumption is violated, they will only expose the most recently created ballot
def ballot_open(self, ballot_type_slug):
@@ -558,19 +641,7 @@ def text(self, size = -1):
except IOError as e:
log.log(f"Error reading text for {path}: {e}")
return None
- text = None
- try:
- text = raw.decode('utf-8')
- except UnicodeDecodeError:
- for back in range(1,4):
- try:
- text = raw[:-back].decode('utf-8')
- break
- except UnicodeDecodeError:
- pass
- if text is None:
- text = raw.decode('latin-1')
- return text
+ return decode_document_content(raw)
def text_or_error(self):
return self.text() or "Error; cannot read '%s'"%self.get_base_name()
@@ -721,7 +792,14 @@ def referenced_by_rfcs_as_rfc_or_draft(self):
if self.type_id == "rfc" and self.came_from_draft():
refs_to |= self.came_from_draft().referenced_by_rfcs()
return refs_to
-
+
+ def sent_to_rfc_editor_event(self):
+ if self.stream_id == "ietf":
+ return self.docevent_set.filter(type="iesg_approved").order_by("-time").first()
+ elif self.stream_id in ["editorial", "iab", "irtf", "ise"]:
+ return self.docevent_set.filter(type="requested_publication").order_by("-time").first()
+ else:
+ return None
class Meta:
abstract = True
@@ -845,6 +923,54 @@ def is_approved_downref(self):
return False
+class RfcAuthor(models.Model):
+ """Captures the authors of an RFC as represented on the RFC title page.
+
+ This deviates from DocumentAuthor in that it does not get moved into the DocHistory
+ hierarchy as documents are saved. It will attempt to preserve email, country, and affiliation
+ from the DocumentAuthor objects associated with the draft leading to this RFC (which
+ may be wrong if the author moves or changes affiliation while the document is in the
+ queue).
+
+ It does not, at this time, attempt to capture the authors from anything _but_ the title
+ page. The datatracker may know more about such authors based on information from the draft
+ leading to the RFC, and future work may take that into account.
+
+ Once doc.rfcauthor_set.exists() for a doc of type `rfc`, doc.documentauthor_set should be
+ ignored.
+ """
+
+ document = ForeignKey(
+ "Document",
+ on_delete=models.CASCADE,
+ limit_choices_to={"type_id": "rfc"}, # only affects ModelForms (e.g., admin)
+ )
+ titlepage_name = models.CharField(max_length=128, blank=False)
+ is_editor = models.BooleanField(default=False)
+ person = ForeignKey(Person, null=True, blank=True, on_delete=models.PROTECT)
+ affiliation = models.CharField(max_length=100, blank=True, help_text="Organization/company used by author for submission")
+ country = models.CharField(max_length=255, blank=True, help_text="Country used by author for submission")
+ order = models.IntegerField(default=1)
+
+ def __str__(self):
+ return u"%s %s (%s)" % (self.document.name, self.person, self.order)
+
+ class Meta:
+ ordering=["document", "order"]
+ indexes=[
+ models.Index(fields=["document", "order"])
+ ]
+
+ @property
+ def email(self) -> Email | None:
+ return self.person.email() if self.person else None
+
+ def format_for_titlepage(self):
+ if self.is_editor:
+ return f"{self.titlepage_name}, Ed."
+ return self.titlepage_name
+
+
class DocumentAuthorInfo(models.Model):
person = ForeignKey(Person)
# email should only be null for some historic documents
@@ -894,7 +1020,7 @@ class Meta:
def role_for_doc(self):
"""Brief string description of this person's relationship to the doc"""
roles = []
- if self.person in self.document.authors():
+ if self.person in self.document.author_persons():
roles.append('Author')
if self.person == self.document.ad:
roles.append('Responsible AD')
@@ -920,7 +1046,18 @@ def role_for_doc(self):
'invalid'
)
+
+SUBSERIES_DOC_TYPE_IDS = ("bcp", "fyi", "std")
+
+
+class DocumentQuerySet(models.QuerySet):
+ def subseries_docs(self):
+ return self.filter(type_id__in=SUBSERIES_DOC_TYPE_IDS)
+
+
class Document(StorableMixin, DocumentInfo):
+ objects = DocumentQuerySet.as_manager()
+
name = models.CharField(max_length=255, validators=[validate_docname,], unique=True) # immutable
action_holders = models.ManyToManyField(Person, through=DocumentActionHolder, blank=True)
@@ -1026,6 +1163,22 @@ def request_closed_time(self, review_req):
e = self.latest_event(ReviewRequestDocEvent, type="closed_review_request", review_request=review_req)
return e.time if e and e.time else None
+ @property
+ def area(self) -> Group | None:
+ """Get area for document, if one exists
+
+ None for non-IETF-stream documents. N.b., this is stricter than Group.area() and
+ uses different logic from Document.area_acronym().
+ """
+ if self.stream_id != "ietf":
+ return None
+ if self.group is None:
+ return None
+ parent = self.group.parent
+ if parent.type_id == "area":
+ return parent
+ return None
+
def area_acronym(self):
g = self.group
if g:
@@ -1121,11 +1274,8 @@ def submission(self):
s = s.first()
return s
- def pub_date(self):
- """Get the publication date for this document
-
- This is the rfc publication date for RFCs, and the new-revision date for other documents.
- """
+ def pub_datetime(self):
+ """Get the publication datetime of this document"""
if self.type_id == "rfc":
# As of Sept 2022, in ietf.sync.rfceditor.update_docs_from_rfc_index() `published_rfc` events are
# created with a timestamp whose date *in the PST8PDT timezone* is the official publication date
@@ -1133,7 +1283,15 @@ def pub_date(self):
event = self.latest_event(type='published_rfc')
else:
event = self.latest_event(type='new_revision')
- return event.time.astimezone(RPC_TZINFO).date() if event else None
+ return event.time.astimezone(RPC_TZINFO) if event else None
+
+ def pub_date(self):
+ """Get the publication date for this document
+
+ This is the rfc publication date for RFCs, and the new-revision date for other documents.
+ """
+ pub_datetime = self.pub_datetime()
+ return None if pub_datetime is None else pub_datetime.date()
def is_dochistory(self):
return False
@@ -1169,6 +1327,32 @@ def action_holders_enabled(self):
iesg_state = self.get_state('draft-iesg')
return iesg_state and iesg_state.slug != 'idexists'
+ def formats(self):
+ """List of file formats available
+
+ Only implemented for RFCs. Relies on StoredObject.
+ """
+ if self.type_id != "rfc":
+ raise RuntimeError("Only allowed for type=rfc")
+
+ # StoredObject.doc_rev can be null or "" to represent no rev. Match either
+ # of these when self.rev is "" (always expected to be the case for RFCs)
+ rev_q = Q(doc_rev=self.rev)
+ if self.rev == "":
+ rev_q |= Q(doc_rev__isnull=True)
+ return [
+ {
+ "fmt": Path(object_name).parts[0],
+ "name": object_name,
+ }
+ for object_name in StoredObject.objects.filter(
+ rev_q,
+ store="rfc",
+ doc_name=self.name,
+ ).values_list("name", flat=True)
+ ]
+
+
class DocumentURL(models.Model):
doc = ForeignKey(Document)
tag = ForeignKey(DocUrlTagName)
@@ -1581,6 +1765,11 @@ class EditedAuthorsDocEvent(DocEvent):
"""
basis = models.CharField(help_text="What is the source or reasoning for the changes to the author list",max_length=255)
+
+class EditedRfcAuthorsDocEvent(DocEvent):
+ """Change to the RfcAuthor list for a document"""
+
+
class BofreqEditorDocEvent(DocEvent):
""" Capture the proponents of a BOF Request."""
editors = models.ManyToManyField('person.Person', blank=True)
diff --git a/ietf/doc/resources.py b/ietf/doc/resources.py
index 157a3ad556..1d86df78d0 100644
--- a/ietf/doc/resources.py
+++ b/ietf/doc/resources.py
@@ -17,8 +17,9 @@
InitialReviewDocEvent, DocHistoryAuthor, BallotDocEvent, RelatedDocument,
RelatedDocHistory, BallotPositionDocEvent, AddedMessageEvent, SubmissionDocEvent,
ReviewRequestDocEvent, ReviewAssignmentDocEvent, EditedAuthorsDocEvent, DocumentURL,
- IanaExpertDocEvent, IRSGBallotDocEvent, DocExtResource, DocumentActionHolder,
- BofreqEditorDocEvent, BofreqResponsibleDocEvent, StoredObject)
+ IanaExpertDocEvent, IRSGBallotDocEvent, DocExtResource, DocumentActionHolder,
+ BofreqEditorDocEvent, BofreqResponsibleDocEvent, StoredObject, RfcAuthor,
+ EditedRfcAuthorsDocEvent)
from ietf.name.resources import BallotPositionNameResource, DocTypeNameResource
class BallotTypeResource(ModelResource):
@@ -650,6 +651,31 @@ class Meta:
api.doc.register(EditedAuthorsDocEventResource())
+
+from ietf.person.resources import PersonResource
+class EditedRfcAuthorsDocEventResource(ModelResource):
+ by = ToOneField(PersonResource, 'by')
+ doc = ToOneField(DocumentResource, 'doc')
+ docevent_ptr = ToOneField(DocEventResource, 'docevent_ptr')
+ class Meta:
+ queryset = EditedRfcAuthorsDocEvent.objects.all()
+ serializer = api.Serializer()
+ cache = SimpleCache()
+ #resource_name = 'editedrfcauthorsdocevent'
+ ordering = ['id', ]
+ filtering = {
+ "id": ALL,
+ "time": ALL,
+ "type": ALL,
+ "rev": ALL,
+ "desc": ALL,
+ "by": ALL_WITH_RELATIONS,
+ "doc": ALL_WITH_RELATIONS,
+ "docevent_ptr": ALL_WITH_RELATIONS,
+ }
+api.doc.register(EditedRfcAuthorsDocEventResource())
+
+
from ietf.name.resources import DocUrlTagNameResource
class DocumentURLResource(ModelResource):
doc = ToOneField(DocumentResource, 'doc')
@@ -865,3 +891,28 @@ class Meta:
"deleted": ALL,
}
api.doc.register(StoredObjectResource())
+
+
+from ietf.person.resources import EmailResource, PersonResource
+class RfcAuthorResource(ModelResource):
+ document = ToOneField(DocumentResource, 'document')
+ person = ToOneField(PersonResource, 'person', null=True)
+ email = ToOneField(EmailResource, 'email', null=True, readonly=True)
+ class Meta:
+ queryset = RfcAuthor.objects.all()
+ serializer = api.Serializer()
+ cache = SimpleCache()
+ #resource_name = 'rfcauthor'
+ ordering = ['id', ]
+ filtering = {
+ "id": ALL,
+ "titlepage_name": ALL,
+ "is_editor": ALL,
+ "affiliation": ALL,
+ "country": ALL,
+ "order": ALL,
+ "document": ALL_WITH_RELATIONS,
+ "person": ALL_WITH_RELATIONS,
+ "email": ALL_WITH_RELATIONS,
+ }
+api.doc.register(RfcAuthorResource())
diff --git a/ietf/doc/serializers.py b/ietf/doc/serializers.py
new file mode 100644
index 0000000000..3651670962
--- /dev/null
+++ b/ietf/doc/serializers.py
@@ -0,0 +1,360 @@
+# Copyright The IETF Trust 2024-2026, All Rights Reserved
+"""django-rest-framework serializers"""
+
+from dataclasses import dataclass
+from typing import Literal, ClassVar
+
+from django.db.models.manager import BaseManager
+from django.db.models.query import QuerySet
+from drf_spectacular.utils import extend_schema_field
+from rest_framework import serializers
+
+from ietf.group.serializers import (
+ AreaDirectorSerializer,
+ AreaSerializer,
+ GroupSerializer,
+)
+from ietf.name.serializers import StreamNameSerializer
+from ietf.utils import log
+from .models import Document, DocumentAuthor, RfcAuthor
+
+
+class RfcAuthorSerializer(serializers.ModelSerializer):
+ """Serializer for an RfcAuthor / DocumentAuthor in a response"""
+
+ email = serializers.EmailField(source="email.address", read_only=True)
+ datatracker_person_path = serializers.URLField(
+ source="person.get_absolute_url",
+ required=False,
+ help_text="URL for person link (relative to datatracker base URL)",
+ read_only=True,
+ )
+
+ class Meta:
+ model = RfcAuthor
+ fields = [
+ "titlepage_name",
+ "is_editor",
+ "person",
+ "email",
+ "affiliation",
+ "country",
+ "datatracker_person_path",
+ ]
+
+ def to_representation(self, instance):
+ """instance -> primitive data types
+
+ Translates a DocumentAuthor into an equivalent RfcAuthor we can use the same
+ serializer for either type.
+ """
+ if isinstance(instance, DocumentAuthor):
+ # create a non-persisted RfcAuthor as a shim - do not save it!
+ document_author = instance
+ instance = RfcAuthor(
+ titlepage_name=document_author.person.plain_name(),
+ is_editor=False,
+ person=document_author.person,
+ affiliation=document_author.affiliation,
+ country=document_author.country,
+ order=document_author.order,
+ )
+ return super().to_representation(instance)
+
+ def validate(self, data):
+ email = data.get("email")
+ if email is not None:
+ person = data.get("person")
+ if person is None:
+ raise serializers.ValidationError(
+ {
+ "email": "cannot have an email without a person",
+ },
+ code="email-without-person",
+ )
+ if email.person_id != person.pk:
+ raise serializers.ValidationError(
+ {
+ "email": "email must belong to person",
+ },
+ code="email-person-mismatch",
+ )
+ return data
+
+
+@dataclass
+class DocIdentifier:
+ type: Literal["doi", "issn"]
+ value: str
+
+
+class DocIdentifierSerializer(serializers.Serializer):
+ type = serializers.ChoiceField(choices=["doi", "issn"])
+ value = serializers.CharField()
+
+
+type RfcStatusSlugT = Literal[
+ "std",
+ "ps",
+ "ds",
+ "bcp",
+ "inf",
+ "exp",
+ "hist",
+ "unkn",
+ "not-issued",
+]
+
+
+@dataclass
+class RfcStatus:
+ """Helper to extract the 'Status' from an RFC document for serialization"""
+
+ slug: RfcStatusSlugT
+
+ # Names that aren't just the slug itself. ClassVar annotation prevents dataclass from treating this as a field.
+ fancy_names: ClassVar[dict[RfcStatusSlugT, str]] = {
+ "std": "internet standard",
+ "ps": "proposed standard",
+ "ds": "draft standard",
+ "bcp": "best current practice",
+ "inf": "informational",
+ "exp": "experimental",
+ "hist": "historic",
+ "unkn": "unknown",
+ }
+
+ # ClassVar annotation prevents dataclass from treating this as a field
+ stdlevelname_slug_map: ClassVar[dict[str, RfcStatusSlugT]] = {
+ "bcp": "bcp",
+ "ds": "ds",
+ "exp": "exp",
+ "hist": "hist",
+ "inf": "inf",
+ "std": "std",
+ "ps": "ps",
+ "unkn": "unkn",
+ }
+
+ # ClassVar annotation prevents dataclass from treating this as a field
+ status_slugs: ClassVar[list[RfcStatusSlugT]] = sorted(
+ # TODO implement "not-issued" RFCs
+ set(stdlevelname_slug_map.values()) | {"not-issued"}
+ )
+
+ @property
+ def name(self):
+ return RfcStatus.fancy_names.get(self.slug, self.slug)
+
+ @classmethod
+ def from_document(cls, doc: Document):
+ """Decide the status that applies to a document"""
+ return cls(
+ slug=(cls.stdlevelname_slug_map.get(doc.std_level.slug, "unkn")),
+ )
+
+ @classmethod
+ def filter(cls, queryset, name, value: list[RfcStatusSlugT]):
+ """Filter a queryset by status
+
+ This is basically the inverse of the from_document() method. Given a status name, filter
+ the queryset to those in that status. The queryset should be a Document queryset.
+ """
+ interesting_slugs = [
+ stdlevelname_slug
+ for stdlevelname_slug, status_slug in cls.stdlevelname_slug_map.items()
+ if status_slug in value
+ ]
+ if len(interesting_slugs) == 0:
+ return queryset.none()
+ return queryset.filter(std_level__slug__in=interesting_slugs)
+
+
+class RfcStatusSerializer(serializers.Serializer):
+ """Status serializer for a Document instance"""
+
+ slug = serializers.ChoiceField(choices=RfcStatus.status_slugs)
+ name = serializers.CharField()
+
+ def to_representation(self, instance: Document):
+ return super().to_representation(instance=RfcStatus.from_document(instance))
+
+
+class ShepherdSerializer(serializers.Serializer):
+ email = serializers.EmailField(source="email_address")
+
+
+class RelatedDraftSerializer(serializers.Serializer):
+ id = serializers.IntegerField(source="source.id")
+ name = serializers.CharField(source="source.name")
+ title = serializers.CharField(source="source.title")
+ shepherd = ShepherdSerializer(source="source.shepherd", allow_null=True)
+ ad = AreaDirectorSerializer(source="source.ad", allow_null=True)
+
+
+class RelatedRfcSerializer(serializers.Serializer):
+ id = serializers.IntegerField(source="target.id")
+ number = serializers.IntegerField(source="target.rfc_number")
+ title = serializers.CharField(source="target.title")
+
+
+class ReverseRelatedRfcSerializer(serializers.Serializer):
+ id = serializers.IntegerField(source="source.id")
+ number = serializers.IntegerField(source="source.rfc_number")
+ title = serializers.CharField(source="source.title")
+
+
+class ContainingSubseriesSerializer(serializers.Serializer):
+ name = serializers.CharField(source="source.name")
+ type = serializers.CharField(source="source.type_id")
+
+
+class RfcFormatSerializer(serializers.Serializer):
+ RFC_FORMATS = ("xml", "txt", "html", "pdf", "ps", "json", "notprepped")
+
+ fmt = serializers.ChoiceField(choices=RFC_FORMATS)
+ name = serializers.CharField(help_text="Name of blob in the blob store")
+
+
+class RfcMetadataSerializer(serializers.ModelSerializer):
+ """Serialize metadata of an RFC
+
+ This needs to be called with a Document queryset that has been processed with
+ api.augment_rfc_queryset() or it very likely will not work. Some of the typing
+ refers to Document, but this should really be WithAnnotations[Document, ...].
+ However, have not been able to make that work yet.
+ """
+
+ number = serializers.IntegerField(source="rfc_number")
+ published = serializers.DateField()
+ status = RfcStatusSerializer(source="*")
+ authors = serializers.SerializerMethodField()
+ group = GroupSerializer()
+ area = AreaSerializer(read_only=True)
+ stream = StreamNameSerializer()
+ ad = AreaDirectorSerializer(read_only=True, allow_null=True)
+ group_list_email = serializers.EmailField(source="group.list_email", read_only=True)
+ identifiers = serializers.SerializerMethodField()
+ draft = serializers.SerializerMethodField()
+ obsoletes = RelatedRfcSerializer(many=True, read_only=True)
+ obsoleted_by = ReverseRelatedRfcSerializer(many=True, read_only=True)
+ updates = RelatedRfcSerializer(many=True, read_only=True)
+ updated_by = ReverseRelatedRfcSerializer(many=True, read_only=True)
+ subseries = ContainingSubseriesSerializer(many=True, read_only=True)
+ formats = RfcFormatSerializer(
+ many=True, read_only=True, help_text="Available formats"
+ )
+ keywords = serializers.ListField(child=serializers.CharField(), read_only=True)
+ has_errata = serializers.BooleanField(read_only=True)
+
+ class Meta:
+ model = Document
+ fields = [
+ "number",
+ "title",
+ "published",
+ "status",
+ "pages",
+ "authors",
+ "group",
+ "area",
+ "stream",
+ "ad",
+ "group_list_email",
+ "identifiers",
+ "obsoletes",
+ "obsoleted_by",
+ "updates",
+ "updated_by",
+ "subseries",
+ "draft",
+ "abstract",
+ "formats",
+ "keywords",
+ "has_errata",
+ ]
+
+ @extend_schema_field(RfcAuthorSerializer(many=True))
+ def get_authors(self, doc: Document):
+ # If doc has any RfcAuthors, use those, otherwise fall back to DocumentAuthors
+ author_queryset: QuerySet[RfcAuthor] | QuerySet[DocumentAuthor] = (
+ doc.rfcauthor_set.all()
+ if doc.rfcauthor_set.exists()
+ else doc.documentauthor_set.all()
+ )
+ # RfcAuthorSerializer can deal with DocumentAuthor instances
+ return RfcAuthorSerializer(
+ instance=author_queryset,
+ many=True,
+ ).data
+
+ @extend_schema_field(DocIdentifierSerializer(many=True))
+ def get_identifiers(self, doc: Document):
+ identifiers = []
+ if doc.doi:
+ identifiers.append(
+ DocIdentifier(type="doi", value=doc.doi)
+ )
+ return DocIdentifierSerializer(instance=identifiers, many=True).data
+
+ @extend_schema_field(RelatedDraftSerializer)
+ def get_draft(self, doc: Document):
+ if hasattr(doc, "drafts"):
+ # This is the expected case - drafts is added by a Prefetch in
+ # the augment_rfc_queryset() method.
+ try:
+ related_doc = doc.drafts[0]
+ except IndexError:
+ return None
+ else:
+ # Fallback in case augment_rfc_queryset() was not called
+ log.log(
+ f"Warning: {self.__class__}.get_draft() called without prefetched draft"
+ )
+ related_doc = doc.came_from_draft()
+ return RelatedDraftSerializer(related_doc).data
+
+
+class RfcSerializer(RfcMetadataSerializer):
+ """Serialize an RFC, including its metadata and text content if available"""
+
+ text = serializers.CharField(allow_null=True)
+
+ class Meta:
+ model = RfcMetadataSerializer.Meta.model
+ fields = RfcMetadataSerializer.Meta.fields + ["text"]
+
+
+class SubseriesContentListSerializer(serializers.ListSerializer):
+ """ListSerializer that gets its object from item.target"""
+
+ def to_representation(self, data):
+ """
+ List of object instances -> List of dicts of primitive datatypes.
+ """
+ # Dealing with nested relationships, data can be a Manager,
+ # so, first get a queryset from the Manager if needed
+ iterable = data.all() if isinstance(data, BaseManager) else data
+ # Serialize item.target instead of item itself
+ return [self.child.to_representation(item.target) for item in iterable]
+
+
+class SubseriesContentSerializer(RfcMetadataSerializer):
+ """Serialize RFC contained in a subseries doc"""
+
+ class Meta(RfcMetadataSerializer.Meta):
+ list_serializer_class = SubseriesContentListSerializer
+
+
+class SubseriesDocSerializer(serializers.ModelSerializer):
+ """Serialize a subseries document (e.g., a BCP or STD)"""
+
+ contents = SubseriesContentSerializer(many=True)
+
+ class Meta:
+ model = Document
+ fields = [
+ "name",
+ "type",
+ "contents",
+ ]
diff --git a/ietf/doc/storage.py b/ietf/doc/storage.py
index 375620ccaf..ee1e76c4fa 100644
--- a/ietf/doc/storage.py
+++ b/ietf/doc/storage.py
@@ -114,7 +114,6 @@ def _get_write_parameters(self, name, content=None):
class StoredObjectBlobdbStorage(BlobdbStorage):
- ietf_log_blob_timing = True
warn_if_missing = True # TODO-BLOBSTORE make this configurable (or remove it)
def _save_stored_object(self, name, content) -> StoredObject:
diff --git a/ietf/doc/storage_utils.py b/ietf/doc/storage_utils.py
index 81588c83ec..9c18bb8a8a 100644
--- a/ietf/doc/storage_utils.py
+++ b/ietf/doc/storage_utils.py
@@ -10,6 +10,7 @@
from django.core.files.storage import storages, Storage
from ietf.utils.log import log
+from ietf.utils.text import decode_document_content
class StorageUtilsError(Exception):
@@ -164,34 +165,30 @@ def store_str(
def retrieve_bytes(kind: str, name: str) -> bytes:
from ietf.doc.storage import maybe_log_timing
- content = b""
- if settings.ENABLE_BLOBSTORAGE:
- try:
- store = _get_storage(kind)
- with store.open(name) as f:
- with maybe_log_timing(
- hasattr(store, "ietf_log_blob_timing") and store.ietf_log_blob_timing,
- "read",
- bucket_name=store.bucket_name if hasattr(store, "bucket_name") else "",
- name=name,
- ):
- content = f.read()
- except Exception as err:
- log(f"Blobstore Error: Failed to read bytes from {kind}:{name}: {repr(err)}")
- if settings.SERVER_MODE == "development":
- raise
+ if not settings.ENABLE_BLOBSTORAGE:
+ return b""
+ try:
+ store = _get_storage(kind)
+ with store.open(name) as f:
+ with maybe_log_timing(
+ hasattr(store, "ietf_log_blob_timing") and store.ietf_log_blob_timing,
+ "read",
+ bucket_name=store.bucket_name if hasattr(store, "bucket_name") else "",
+ name=name,
+ ):
+ content = f.read()
+ except Exception as err:
+ log(f"Blobstore Error: Failed to read bytes from {kind}:{name}: {repr(err)}")
+ raise
return content
def retrieve_str(kind: str, name: str) -> str:
- content = ""
- if settings.ENABLE_BLOBSTORAGE:
- try:
- content_bytes = retrieve_bytes(kind, name)
- # TODO-BLOBSTORE: try to decode all the different ways doc.text() does
- content = content_bytes.decode("utf-8")
- except Exception as err:
- log(f"Blobstore Error: Failed to read string from {kind}:{name}: {repr(err)}")
- if settings.SERVER_MODE == "development":
- raise
+ if not settings.ENABLE_BLOBSTORAGE:
+ return ""
+ try:
+ content = decode_document_content(retrieve_bytes(kind, name))
+ except Exception as err:
+ log(f"Blobstore Error: Failed to read string from {kind}:{name}: {repr(err)}")
+ raise
return content
diff --git a/ietf/doc/tasks.py b/ietf/doc/tasks.py
index 4f7fe37782..273242e35f 100644
--- a/ietf/doc/tasks.py
+++ b/ietf/doc/tasks.py
@@ -1,17 +1,21 @@
-# Copyright The IETF Trust 2024-2025, All Rights Reserved
+# Copyright The IETF Trust 2024-2026, All Rights Reserved
#
# Celery task definitions
#
import datetime
+
import debug # pyflakes:ignore
from celery import shared_task
+from celery.exceptions import MaxRetriesExceededError
from pathlib import Path
from django.conf import settings
from django.utils import timezone
-from ietf.utils import log
+from ietf.doc.utils_r2 import rfcs_are_in_r2
+from ietf.doc.utils_red import trigger_red_precomputer
+from ietf.utils import log, searchindex
from ietf.utils.timezone import datetime_today
from .expire import (
@@ -29,10 +33,13 @@
from .utils import (
generate_idnits2_rfc_status,
generate_idnits2_rfcs_obsoleted,
+ rebuild_reference_relations,
update_or_create_draft_bibxml_file,
ensure_draft_bibxml_path_exists,
investigate_fragment,
)
+from .utils_bofreq import fixup_bofreq_timestamps
+from .utils_errata import signal_update_rfc_metadata
@shared_task
@@ -74,17 +81,19 @@ def expire_last_calls_task():
try:
expire_last_call(doc)
except Exception:
- log.log(f"ERROR: Failed to expire last call for {doc.file_tag()} (id={doc.pk})")
+ log.log(
+ f"ERROR: Failed to expire last call for {doc.file_tag()} (id={doc.pk})"
+ )
else:
log.log(f"Expired last call for {doc.file_tag()} (id={doc.pk})")
-@shared_task
+@shared_task
def generate_idnits2_rfc_status_task():
outpath = Path(settings.DERIVED_DIR) / "idnits2-rfc-status"
blob = generate_idnits2_rfc_status()
try:
- outpath.write_text(blob, encoding="utf8") # TODO-BLOBSTORE
+ outpath.write_text(blob, encoding="utf8") # TODO-BLOBSTORE
except Exception as e:
log.log(f"failed to write idnits2-rfc-status: {e}")
@@ -94,7 +103,7 @@ def generate_idnits2_rfcs_obsoleted_task():
outpath = Path(settings.DERIVED_DIR) / "idnits2-rfcs-obsoleted"
blob = generate_idnits2_rfcs_obsoleted()
try:
- outpath.write_text(blob, encoding="utf8") # TODO-BLOBSTORE
+ outpath.write_text(blob, encoding="utf8") # TODO-BLOBSTORE
except Exception as e:
log.log(f"failed to write idnits2-rfcs-obsoleted: {e}")
@@ -102,7 +111,7 @@ def generate_idnits2_rfcs_obsoleted_task():
@shared_task
def generate_draft_bibxml_files_task(days=7, process_all=False):
"""Generate bibxml files for recently updated docs
-
+
If process_all is False (the default), processes only docs with new revisions
in the last specified number of days.
"""
@@ -114,7 +123,9 @@ def generate_draft_bibxml_files_task(days=7, process_all=False):
doc__type_id="draft",
).order_by("time")
if not process_all:
- doc_events = doc_events.filter(time__gte=timezone.now() - datetime.timedelta(days=days))
+ doc_events = doc_events.filter(
+ time__gte=timezone.now() - datetime.timedelta(days=days)
+ )
for event in doc_events:
try:
update_or_create_draft_bibxml_file(event.doc, event.rev)
@@ -128,3 +139,84 @@ def investigate_fragment_task(name_fragment: str):
"name_fragment": name_fragment,
"results": investigate_fragment(name_fragment),
}
+
+
+@shared_task
+def rebuild_reference_relations_task(doc_names: list[str]):
+ log.log(f"Task: Rebuilding reference relations for {doc_names}")
+ for doc in Document.objects.filter(name__in=doc_names, type__in=["rfc", "draft"]):
+ filenames = dict()
+ base = (
+ settings.RFC_PATH
+ if doc.type_id == "rfc"
+ else settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR
+ )
+ stem = doc.name if doc.type_id == "rfc" else f"{doc.name}-{doc.rev}"
+ for ext in ["xml", "txt"]:
+ path = Path(base) / f"{stem}.{ext}"
+ if path.is_file():
+ filenames[ext] = str(path)
+ if len(filenames) > 0:
+ rebuild_reference_relations(doc, filenames)
+ else:
+ log.log(f"Found no content for {stem}")
+
+
+@shared_task
+def fixup_bofreq_timestamps_task(): # pragma: nocover
+ fixup_bofreq_timestamps()
+
+
+@shared_task
+def signal_update_rfc_metadata_task(rfc_number_list=()):
+ signal_update_rfc_metadata(rfc_number_list)
+
+
+@shared_task(bind=True)
+def trigger_red_precomputer_task(self, rfc_number_list=()):
+ if not rfcs_are_in_r2(rfc_number_list):
+ log.log(f"Objects are not yet in R2 for RFCs {rfc_number_list}")
+ try:
+ countdown = getattr(settings, "RED_PRECOMPUTER_TRIGGER_RETRY_DELAY", 10)
+ max_retries = getattr(settings, "RED_PRECOMPUTER_TRIGGER_MAX_RETRIES", 12)
+ self.retry(countdown=countdown, max_retries=max_retries)
+ except MaxRetriesExceededError:
+ log.log(f"Gave up waiting for objects in R2 for RFCs {rfc_number_list}")
+ else:
+ trigger_red_precomputer(rfc_number_list)
+
+
+@shared_task(bind=True)
+def update_rfc_searchindex_task(self, rfc_number: int):
+ """Update the search index for one RFC"""
+ if not searchindex.enabled():
+ log.log("Search indexing is not enabled, skipping")
+ return
+
+ rfc = Document.objects.filter(type_id="rfc", rfc_number=rfc_number).first()
+ if rfc is None:
+ log.log(
+ f"ERROR: Document for rfc{rfc_number} not found, not updating search index"
+ )
+ return
+ try:
+ searchindex.update_or_create_rfc_entry(rfc)
+ except Exception as err:
+ log.log(f"Search index update for {rfc.name} failed ({err})")
+ if isinstance(err, searchindex.RETRYABLE_ERROR_CLASSES):
+ searchindex_settings = searchindex.get_settings()
+ self.retry(
+ countdown=searchindex_settings["TASK_RETRY_DELAY"],
+ max_retries=searchindex_settings["TASK_MAX_RETRIES"],
+ )
+
+
+@shared_task
+def rebuild_searchindex_task(*, batchsize=40, drop_collection=False):
+ if drop_collection:
+ searchindex.delete_collection()
+ searchindex.create_collection()
+ searchindex.update_or_create_rfc_entries(
+ Document.objects.filter(type_id="rfc").order_by("-rfc_number"),
+ batchsize=batchsize,
+ )
diff --git a/ietf/doc/templatetags/ietf_filters.py b/ietf/doc/templatetags/ietf_filters.py
index 5cabe1728d..ae5df641c2 100644
--- a/ietf/doc/templatetags/ietf_filters.py
+++ b/ietf/doc/templatetags/ietf_filters.py
@@ -1017,3 +1017,61 @@ def is_in_stream(doc):
elif stream == "editorial":
return True
return False
+
+
+@register.filter
+def is_doc_ietf_adoptable(doc):
+ return doc.stream_id is None or all(
+ [
+ doc.stream_id == "ietf",
+ doc.get_state_slug("draft-stream-ietf")
+ not in [
+ "c-adopt",
+ "adopt-wg",
+ "info",
+ "wg-doc",
+ "parked",
+ "dead",
+ "wg-lc",
+ "waiting-for-implementation",
+ "chair-w",
+ "writeupw",
+ "sub-pub",
+ ],
+ doc.get_state_slug("draft") != "rfc",
+ doc.became_rfc() is None,
+ ]
+ )
+
+
+@register.filter
+def can_issue_ietf_wg_lc(doc):
+ return all(
+ [
+ doc.stream_id == "ietf",
+ doc.get_state_slug("draft-stream-ietf")
+ not in ["wg-cand", "c-adopt", "wg-lc"],
+ doc.get_state_slug("draft") != "rfc",
+ doc.became_rfc() is None,
+ ]
+ )
+
+
+@register.filter
+def can_submit_to_iesg(doc):
+ return all(
+ [
+ doc.stream_id == "ietf",
+ doc.get_state_slug("draft-iesg") == "idexists",
+ doc.get_state_slug("draft-stream-ietf") not in ["wg-cand", "c-adopt"],
+ ]
+ )
+
+
+@register.filter
+def has_had_ietf_wg_lc(doc):
+ return (
+ doc.stream_id == "ietf"
+ and doc.docevent_set.filter(statedocevent__state__slug="wg-lc").exists()
+ )
+
diff --git a/ietf/doc/tests.py b/ietf/doc/tests.py
index 16dcfb7754..f92c9648e6 100644
--- a/ietf/doc/tests.py
+++ b/ietf/doc/tests.py
@@ -39,11 +39,15 @@
from ietf.doc.models import ( Document, DocRelationshipName, RelatedDocument, State,
DocEvent, BallotPositionDocEvent, LastCallDocEvent, WriteupDocEvent, NewRevisionDocEvent, BallotType,
EditedAuthorsDocEvent, StateType)
-from ietf.doc.factories import ( DocumentFactory, DocEventFactory, CharterFactory,
- ConflictReviewFactory, WgDraftFactory, IndividualDraftFactory, WgRfcFactory,
- IndividualRfcFactory, StateDocEventFactory, BallotPositionDocEventFactory,
- BallotDocEventFactory, DocumentAuthorFactory, NewRevisionDocEventFactory,
- StatusChangeFactory, DocExtResourceFactory, RgDraftFactory, BcpFactory)
+from ietf.doc.factories import (DocumentFactory, DocEventFactory, CharterFactory,
+ ConflictReviewFactory, WgDraftFactory,
+ IndividualDraftFactory, WgRfcFactory,
+ IndividualRfcFactory, StateDocEventFactory,
+ BallotPositionDocEventFactory,
+ BallotDocEventFactory, DocumentAuthorFactory,
+ NewRevisionDocEventFactory,
+ StatusChangeFactory, DocExtResourceFactory,
+ RgDraftFactory, BcpFactory, RfcAuthorFactory)
from ietf.doc.forms import NotifyForm
from ietf.doc.fields import SearchableDocumentsField
from ietf.doc.utils import (
@@ -979,7 +983,7 @@ def test_edit_authors_permissions(self):
# Relevant users not authorized to edit authors
unauthorized_usernames = [
'plain',
- *[author.user.username for author in draft.authors()],
+ *[author.user.username for author in draft.author_persons()],
draft.group.get_chair().person.user.username,
'ad'
]
@@ -994,7 +998,7 @@ def test_edit_authors_permissions(self):
self.client.logout()
# Try to add an author via POST - still only the secretary should be able to do this.
- orig_authors = draft.authors()
+ orig_authors = draft.author_persons()
post_data = self.make_edit_authors_post_data(
basis='permission test',
authors=draft.documentauthor_set.all(),
@@ -1012,12 +1016,12 @@ def test_edit_authors_permissions(self):
for username in unauthorized_usernames:
login_testing_unauthorized(self, username, url, method='post', request_kwargs=dict(data=post_data))
draft = Document.objects.get(pk=draft.pk)
- self.assertEqual(draft.authors(), orig_authors) # ensure draft author list was not modified
+ self.assertEqual(draft.author_persons(), orig_authors) # ensure draft author list was not modified
login_testing_unauthorized(self, 'secretary', url, method='post', request_kwargs=dict(data=post_data))
r = self.client.post(url, post_data)
self.assertEqual(r.status_code, 302)
draft = Document.objects.get(pk=draft.pk)
- self.assertEqual(draft.authors(), orig_authors + [new_auth_person])
+ self.assertEqual(draft.author_persons(), orig_authors + [new_auth_person])
def make_edit_authors_post_data(self, basis, authors):
"""Helper to generate edit_authors POST data for a set of authors"""
@@ -1365,8 +1369,8 @@ def test_edit_authors_edit_fields(self):
basis=change_reason
)
- old_address = draft.authors()[0].email()
- new_email = EmailFactory(person=draft.authors()[0], address=f'changed-{old_address}')
+ old_address = draft.author_persons()[0].email()
+ new_email = EmailFactory(person=draft.author_persons()[0], address=f'changed-{old_address}')
post_data['author-0-email'] = new_email.address
post_data['author-1-affiliation'] = 'University of Nowhere'
post_data['author-2-country'] = 'Chile'
@@ -1399,17 +1403,17 @@ def test_edit_authors_edit_fields(self):
country_event = change_events.filter(desc__icontains='changed country').first()
self.assertIsNotNone(email_event)
- self.assertIn(draft.authors()[0].name, email_event.desc)
+ self.assertIn(draft.author_persons()[0].name, email_event.desc)
self.assertIn(before[0]['email'], email_event.desc)
self.assertIn(after[0]['email'], email_event.desc)
self.assertIsNotNone(affiliation_event)
- self.assertIn(draft.authors()[1].name, affiliation_event.desc)
+ self.assertIn(draft.author_persons()[1].name, affiliation_event.desc)
self.assertIn(before[1]['affiliation'], affiliation_event.desc)
self.assertIn(after[1]['affiliation'], affiliation_event.desc)
self.assertIsNotNone(country_event)
- self.assertIn(draft.authors()[2].name, country_event.desc)
+ self.assertIn(draft.author_persons()[2].name, country_event.desc)
self.assertIn(before[2]['country'], country_event.desc)
self.assertIn(after[2]['country'], country_event.desc)
@@ -1863,13 +1867,63 @@ def test_document_ballot_needed_positions(self):
def test_document_json(self):
doc = IndividualDraftFactory()
-
+ author = DocumentAuthorFactory(document=doc)
+
r = self.client.get(urlreverse("ietf.doc.views_doc.document_json", kwargs=dict(name=doc.name)))
self.assertEqual(r.status_code, 200)
data = r.json()
- self.assertEqual(doc.name, data['name'])
- self.assertEqual(doc.pages,data['pages'])
+ self.assertEqual(data["name"], doc.name)
+ self.assertEqual(data["pages"], doc.pages)
+ self.assertEqual(
+ data["authors"],
+ [
+ {
+ "name": author.person.name,
+ "email": author.email.address,
+ "affiliation": author.affiliation,
+ }
+ ]
+ )
+ def test_document_json_rfc(self):
+ doc = IndividualRfcFactory()
+ old_style_author = DocumentAuthorFactory(document=doc)
+ url = urlreverse("ietf.doc.views_doc.document_json", kwargs=dict(name=doc.name))
+
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 200)
+ data = r.json()
+ self.assertEqual(data["name"], doc.name)
+ self.assertEqual(data["pages"], doc.pages)
+ self.assertEqual(
+ data["authors"],
+ [
+ {
+ "name": old_style_author.person.name,
+ "email": old_style_author.email.address,
+ "affiliation": old_style_author.affiliation,
+ }
+ ]
+ )
+
+ new_style_author = RfcAuthorFactory(document=doc)
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 200)
+ data = r.json()
+ self.assertEqual(data["name"], doc.name)
+ self.assertEqual(data["pages"], doc.pages)
+ self.assertEqual(
+ data["authors"],
+ [
+ {
+ "name": new_style_author.titlepage_name,
+ "email": new_style_author.email.address,
+ "affiliation": new_style_author.affiliation,
+ }
+ ]
+ )
+
+
def test_writeup(self):
doc = IndividualDraftFactory(states = [('draft','active'),('draft-iesg','iesg-eva')],)
diff --git a/ietf/doc/tests_bofreq.py b/ietf/doc/tests_bofreq.py
index 6a7c9393ef..6b142149be 100644
--- a/ietf/doc/tests_bofreq.py
+++ b/ietf/doc/tests_bofreq.py
@@ -307,17 +307,20 @@ def test_submit(self):
url = urlreverse('ietf.doc.views_bofreq.submit', kwargs=dict(name=doc.name))
rev = doc.rev
+ doc_time = doc.time
r = self.client.post(url,{'bofreq_submission':'enter','bofreq_content':'# oiwefrase'})
self.assertEqual(r.status_code, 302)
doc = reload_db_objects(doc)
- self.assertEqual(rev, doc.rev)
+ self.assertEqual(doc.rev, rev)
+ self.assertEqual(doc.time, doc_time)
nobody = PersonFactory()
self.client.login(username=nobody.user.username, password=nobody.user.username+'+password')
r = self.client.post(url,{'bofreq_submission':'enter','bofreq_content':'# oiwefrase'})
self.assertEqual(r.status_code, 403)
doc = reload_db_objects(doc)
- self.assertEqual(rev, doc.rev)
+ self.assertEqual(doc.rev, rev)
+ self.assertEqual(doc.time, doc_time)
self.client.logout()
editor = bofreq_editors(doc).first()
@@ -339,12 +342,14 @@ def test_submit(self):
r = self.client.post(url, postdict)
self.assertEqual(r.status_code, 302)
doc = reload_db_objects(doc)
- self.assertEqual('%02d'%(int(rev)+1) ,doc.rev)
- self.assertEqual(f'# {username}', doc.text())
- self.assertEqual(f'# {username}', retrieve_str('bofreq',doc.get_base_name()))
- self.assertEqual(docevent_count+1, doc.docevent_set.count())
- self.assertEqual(1, len(outbox))
+ self.assertEqual(doc.rev, '%02d'%(int(rev)+1))
+ self.assertGreater(doc.time, doc_time)
+ self.assertEqual(doc.text(), f'# {username}')
+ self.assertEqual(retrieve_str('bofreq', doc.get_base_name()), f'# {username}')
+ self.assertEqual(doc.docevent_set.count(), docevent_count+1)
+ self.assertEqual(len(outbox), 1)
rev = doc.rev
+ doc_time = doc.time
finally:
os.unlink(file.name)
diff --git a/ietf/doc/tests_draft.py b/ietf/doc/tests_draft.py
index 4d262c5a2f..21a873c5c0 100644
--- a/ietf/doc/tests_draft.py
+++ b/ietf/doc/tests_draft.py
@@ -21,13 +21,14 @@
import debug # pyflakes:ignore
from ietf.doc.expire import expirable_drafts, get_expired_drafts, send_expire_notice_for_draft, expire_draft
-from ietf.doc.factories import EditorialDraftFactory, IndividualDraftFactory, WgDraftFactory, RgDraftFactory, DocEventFactory
+from ietf.doc.factories import EditorialDraftFactory, IndividualDraftFactory, StateDocEventFactory, WgDraftFactory, RgDraftFactory, DocEventFactory, WgRfcFactory
from ietf.doc.models import ( Document, DocReminder, DocEvent,
ConsensusDocEvent, LastCallDocEvent, RelatedDocument, State, TelechatDocEvent,
WriteupDocEvent, DocRelationshipName, IanaExpertDocEvent )
from ietf.doc.storage_utils import exists_in_storage, store_str
from ietf.doc.utils import get_tags_for_stream_id, create_ballot_if_not_open
-from ietf.doc.views_draft import AdoptDraftForm
+from ietf.doc.views_draft import AdoptDraftForm, IssueCallForAdoptionForm, IssueWorkingGroupLastCallForm
+from ietf.ietfauth.utils import has_role
from ietf.name.models import DocTagName, RoleName
from ietf.group.factories import GroupFactory, RoleFactory
from ietf.group.models import Group, Role
@@ -86,7 +87,7 @@ def test_ad_approved(self):
self.assertTrue("Approved: " in outbox[-1]['Subject'])
self.assertTrue(draft.name in outbox[-1]['Subject'])
self.assertTrue('iesg@' in outbox[-1]['To'])
-
+
def test_change_state(self):
ad = Person.objects.get(user__username="ad")
draft = WgDraftFactory(
@@ -139,7 +140,7 @@ def test_change_state(self):
self.assertEqual(draft.get_state_slug("draft-iesg"), "review-e")
self.assertTrue(not draft.tags.filter(slug="ad-f-up"))
self.assertTrue(draft.tags.filter(slug="need-rev"))
- self.assertCountEqual(draft.action_holders.all(), [ad] + draft.authors())
+ self.assertCountEqual(draft.action_holders.all(), [ad] + draft.author_persons())
self.assertEqual(draft.docevent_set.count(), events_before + 3)
self.assertTrue("Test comment" in draft.docevent_set.all()[0].desc)
self.assertTrue("Changed action holders" in draft.docevent_set.all()[1].desc)
@@ -178,7 +179,7 @@ def test_pull_from_rfc_queue(self):
states=[('draft-iesg','rfcqueue')],
)
DocEventFactory(type='started_iesg_process',by=ad,doc=draft,rev=draft.rev,desc="Started IESG Process")
- draft.action_holders.add(*(draft.authors()))
+ draft.action_holders.add(*(draft.author_persons()))
url = urlreverse('ietf.doc.views_draft.change_state', kwargs=dict(name=draft.name))
login_testing_unauthorized(self, "secretary", url)
@@ -278,7 +279,7 @@ def test_request_last_call(self):
states=[('draft-iesg','ad-eval')],
)
DocEventFactory(type='started_iesg_process',by=ad,doc=draft,rev=draft.rev,desc="Started IESG Process")
- draft.action_holders.add(*(draft.authors()))
+ draft.action_holders.add(*(draft.author_persons()))
self.client.login(username="secretary", password="secretary+password")
url = urlreverse('ietf.doc.views_draft.change_state', kwargs=dict(name=draft.name))
@@ -1368,7 +1369,7 @@ def _test_changing_ah(action_holders, reason):
_test_changing_ah([doc.ad, doc.shepherd.person], 'this is a first test')
_test_changing_ah([doc.ad], 'this is a second test')
- _test_changing_ah(doc.authors(), 'authors can do it, too')
+ _test_changing_ah(doc.author_persons(), 'authors can do it, too')
_test_changing_ah([], 'clear it back out')
def test_doc_change_action_holders_as_doc_manager(self):
@@ -1708,11 +1709,7 @@ def test_adopt_document(self):
self.assertEqual(draft.group, chair_role.group)
self.assertEqual(draft.stream_id, stream_state_type_slug[type_id][13:]) # trim off "draft-stream-"
self.assertEqual(draft.docevent_set.count() - events_before, 5)
- self.assertEqual(len(outbox), 2)
- self.assertTrue("Call For Adoption" in outbox[0]["Subject"])
- self.assertTrue(f"{chair_role.group.acronym}-chairs@" in outbox[0]['To'])
- self.assertTrue(f"{draft.name}@" in outbox[0]['To'])
- self.assertTrue(f"{chair_role.group.acronym}@" in outbox[0]['To'])
+ self.assertEqual(len(outbox), 1)
# contents of outbox[1] are tested elsewhere
# adopt
@@ -2003,6 +2000,40 @@ def test_set_state(self):
self.assertTrue("mars-chairs@ietf.org" in outbox[0].as_string())
self.assertTrue("marsdelegate@ietf.org" in outbox[0].as_string())
+ def test_set_stream_state_to_wglc(self):
+ def _form_presents_state_option(response, state):
+ q = PyQuery(response.content)
+ option = q(f"select#id_new_state option[value='{state.pk}']")
+ return len(option) != 0
+
+ doc = WgDraftFactory()
+ chair = RoleFactory(name_id="chair", group=doc.group).person
+ url = urlreverse(
+ "ietf.doc.views_draft.change_stream_state",
+ kwargs=dict(name=doc.name, state_type="draft-stream-ietf"),
+ )
+ login_testing_unauthorized(self, chair.user.username, url)
+ r = self.client.get(url)
+ wglc_state = State.objects.get(type="draft-stream-ietf", slug="wg-lc")
+ doc.set_state(wglc_state)
+ StateDocEventFactory(
+ doc=doc,
+ state_type_id="draft-stream-ietf",
+ state=("draft-stream-ietf", "wg-lc"),
+ )
+ self.assertEqual(doc.docevent_set.count(), 2)
+ r = self.client.get(url)
+ self.assertTrue(_form_presents_state_option(r, wglc_state))
+ other_doc = WgDraftFactory()
+ self.client.logout()
+ url = urlreverse(
+ "ietf.doc.views_draft.change_stream_state",
+ kwargs=dict(name=other_doc.name, state_type="draft-stream-ietf"),
+ )
+ login_testing_unauthorized(self, "secretary", url)
+ r = self.client.get(url)
+ self.assertTrue(_form_presents_state_option(r, wglc_state))
+
def test_wg_call_for_adoption_issued(self):
role = RoleFactory(
name_id="chair",
@@ -2029,12 +2060,7 @@ def test_wg_call_for_adoption_issued(self):
),
)
self.assertEqual(r.status_code, 302)
- self.assertEqual(len(outbox), 2)
- self.assertIn("mars-wg@ietf.org", outbox[1]["To"])
- self.assertIn("Call for adoption", outbox[1]["Subject"])
- body = get_payload_text(outbox[1])
- self.assertIn("disclosure obligations", body)
- self.assertIn("starts a 10-week", body)
+ self.assertEqual(len(outbox), 1)
# Test not entering a duration on the form
draft = IndividualDraftFactory()
url = urlreverse(
@@ -2051,12 +2077,7 @@ def test_wg_call_for_adoption_issued(self):
),
)
self.assertEqual(r.status_code, 302)
- self.assertEqual(len(outbox), 2)
- self.assertIn("mars-wg@ietf.org", outbox[1]["To"])
- self.assertIn("Call for adoption", outbox[1]["Subject"])
- body = get_payload_text(outbox[1])
- self.assertIn("disclosure obligations", body)
- self.assertIn("starts a 2-week", body)
+ self.assertEqual(len(outbox), 1)
# Test the less usual workflow of issuing a call for adoption
# of a document that's already in the ietf stream
@@ -2086,12 +2107,7 @@ def test_wg_call_for_adoption_issued(self):
),
)
self.assertEqual(r.status_code, 302)
- self.assertEqual(len(outbox), 2)
- self.assertIn("mars-wg@ietf.org", outbox[1]["To"])
- self.assertIn("Call for adoption", outbox[1]["Subject"])
- body = get_payload_text(outbox[1])
- self.assertIn("disclosure obligations", body)
- self.assertIn("starts a 10-week", body)
+ self.assertEqual(len(outbox), 1)
draft = WgDraftFactory(group=role.group)
url = urlreverse(
"ietf.doc.views_draft.change_stream_state",
@@ -2117,85 +2133,210 @@ def test_wg_call_for_adoption_issued(self):
),
)
self.assertEqual(r.status_code, 302)
- self.assertEqual(len(outbox), 2)
- self.assertIn("mars-wg@ietf.org", outbox[1]["To"])
- self.assertIn("Call for adoption", outbox[1]["Subject"])
- body = get_payload_text(outbox[1])
- self.assertIn("disclosure obligations", body)
- self.assertIn("starts a 2-week", body)
+ self.assertEqual(len(outbox), 1)
- def test_wg_last_call_issued(self):
- role = RoleFactory(
- name_id="chair",
- group__acronym="mars",
- group__list_email="mars-wg@ietf.org",
- person__user__username="marschairman",
- person__name="WG Cháir Man",
+ def test_issue_wg_lc_form(self):
+ end_date = date_today(DEADLINE_TZINFO) + datetime.timedelta(days=1)
+ post = dict(
+ end_date=end_date,
+ to="foo@example.net, bar@example.com",
+ # Intentionally not passing cc
+ subject=f"garbage {end_date.isoformat()}",
+ body=f"garbage {end_date.isoformat()}",
)
- draft = WgDraftFactory(group=role.group)
- url = urlreverse(
- "ietf.doc.views_draft.change_stream_state",
- kwargs=dict(name=draft.name, state_type="draft-stream-ietf"),
+ form = IssueWorkingGroupLastCallForm(post)
+ self.assertTrue(form.is_valid())
+ post["end_date"] = date_today(DEADLINE_TZINFO)
+ form = IssueWorkingGroupLastCallForm(post)
+ self.assertFalse(form.is_valid())
+ self.assertIn(
+ "End date must be later than today",
+ form.errors["end_date"],
+ "Form accepted a too-early date",
)
- login_testing_unauthorized(self, "marschairman", url)
- old_state = draft.get_state("draft-stream-%s" % draft.stream_id)
- new_state = State.objects.get(
- used=True, type="draft-stream-%s" % draft.stream_id, slug="wg-lc"
+ post["end_date"] = end_date + datetime.timedelta(days=2)
+ form = IssueWorkingGroupLastCallForm(post)
+ self.assertFalse(form.is_valid())
+ self.assertIn(
+ f"Last call end date ({post['end_date'].isoformat()}) not found in subject",
+ form.errors["subject"],
+ "form allowed subject without end_date",
)
- self.assertNotEqual(old_state, new_state)
+ self.assertIn(
+ f"Last call end date ({post['end_date'].isoformat()}) not found in body",
+ form.errors["body"],
+ "form allowed body without end_date",
+ )
+
+ def test_issue_wg_lc(self):
+ def _assert_rejected(testcase, doc, person):
+ url = urlreverse(
+ "ietf.doc.views_draft.issue_wg_lc", kwargs=dict(name=doc.name)
+ )
+ login_testing_unauthorized(testcase, person.user.username, url)
+ r = testcase.client.get(url)
+ testcase.assertEqual(r.status_code, 404)
+ testcase.client.logout()
+
+ already_rfc = WgDraftFactory(states=[("draft", "rfc")])
+ rfc_chair = RoleFactory(name_id="chair", group=already_rfc.group).person
+ _assert_rejected(self, already_rfc, rfc_chair)
+ rg_doc = RgDraftFactory()
+ rg_chair = RoleFactory(name_id="chair", group=rg_doc.group).person
+ _assert_rejected(self, rg_doc, rg_chair)
+ inwglc_doc = WgDraftFactory(states=[("draft-stream-ietf", "wg-lc")])
+ inwglc_chair = RoleFactory(name_id="chair", group=inwglc_doc.group).person
+ _assert_rejected(self, inwglc_doc, inwglc_chair)
+ doc = WgDraftFactory()
+ chair = RoleFactory(name_id="chair", group=doc.group).person
+ url = urlreverse("ietf.doc.views_draft.issue_wg_lc", kwargs=dict(name=doc.name))
+ login_testing_unauthorized(self, chair.user.username, url)
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 200)
+ q = PyQuery(r.content)
+ postdict = dict()
+ postdict["end_date"] = q("input#id_end_date").attr("value")
+ postdict["to"] = q("input#id_to").attr("value") + ", extrato@example.org"
+ cc = q("input#id_cc").attr("value")
+ if cc is not None:
+ postdict["cc"] = cc + ", extracc@example.org"
+ else:
+ postdict["cc"] = "extracc@example.org"
+ postdict["subject"] = q("input#id_subject").attr("value") + " Extra Subject Words"
+ postdict["body"] = q("textarea#id_body").text() + "FGgqbQ$UNeXs"
empty_outbox()
r = self.client.post(
url,
- dict(
- new_state=new_state.pk,
- comment="some comment",
- weeks="10",
- tags=[
- t.pk
- for t in draft.tags.filter(
- slug__in=get_tags_for_stream_id(draft.stream_id)
- )
- ],
- ),
+ postdict,
)
self.assertEqual(r.status_code, 302)
+ self.assertEqual(doc.get_state_slug("draft-stream-ietf"), "wg-lc")
self.assertEqual(len(outbox), 2)
- self.assertIn("mars-wg@ietf.org", outbox[1]["To"])
+ self.assertIn(f"{doc.group.acronym}@ietf.org", outbox[1]["To"])
+ self.assertIn("extrato@example.org", outbox[1]["To"])
+ self.assertIn("extracc@example.org", outbox[1]["Cc"])
+ self.assertIn("Extra Subject Words", outbox[1]["Subject"])
self.assertIn("WG Last Call", outbox[1]["Subject"])
body = get_payload_text(outbox[1])
self.assertIn("disclosure obligations", body)
- self.assertIn("starts a 10-week", body)
- draft = WgDraftFactory(group=role.group)
- url = urlreverse(
- "ietf.doc.views_draft.change_stream_state",
- kwargs=dict(name=draft.name, state_type="draft-stream-ietf"),
+ self.assertIn("FGgqbQ$UNeXs", body)
+
+ def test_issue_wg_call_for_adoption_form(self):
+ end_date = date_today(DEADLINE_TZINFO) + datetime.timedelta(days=1)
+ post = dict(
+ end_date=end_date,
+ to="foo@example.net, bar@example.com",
+ # Intentionally not passing cc
+ subject=f"garbage {end_date.isoformat()}",
+ body=f"garbage {end_date.isoformat()}",
)
- old_state = draft.get_state("draft-stream-%s" % draft.stream_id)
- new_state = State.objects.get(
- used=True, type="draft-stream-%s" % draft.stream_id, slug="wg-lc"
+ form = IssueCallForAdoptionForm(post)
+ self.assertTrue(form.is_valid())
+ post["end_date"] = date_today(DEADLINE_TZINFO)
+ form = IssueCallForAdoptionForm(post)
+ self.assertFalse(form.is_valid())
+ self.assertIn(
+ "End date must be later than today",
+ form.errors["end_date"],
+ "Form accepted a too-early date",
)
- self.assertNotEqual(old_state, new_state)
- empty_outbox()
- r = self.client.post(
- url,
- dict(
- new_state=new_state.pk,
- comment="some comment",
- tags=[
- t.pk
- for t in draft.tags.filter(
- slug__in=get_tags_for_stream_id(draft.stream_id)
- )
- ],
- ),
+ post["end_date"] = end_date + datetime.timedelta(days=2)
+ form = IssueCallForAdoptionForm(post)
+ self.assertFalse(form.is_valid())
+ self.assertIn(
+ f"Call for adoption end date ({post['end_date'].isoformat()}) not found in subject",
+ form.errors["subject"],
+ "form allowed subject without end_date",
+ )
+ self.assertIn(
+ f"Call for adoption end date ({post['end_date'].isoformat()}) not found in body",
+ form.errors["body"],
+ "form allowed body without end_date",
+ )
+
+ def test_issue_wg_call_for_adoption(self):
+ def _assert_rejected(testcase, doc, person, group=None):
+ target_acronym = group.acronym if group is not None else doc.group.acronym
+ url = urlreverse(
+ "ietf.doc.views_draft.issue_wg_call_for_adoption",
+ kwargs=dict(name=doc.name, acronym=target_acronym),
+ )
+ login_testing_unauthorized(testcase, person.user.username, url)
+ r = testcase.client.get(url)
+ testcase.assertEqual(r.status_code, 403)
+ testcase.client.logout()
+
+ def _verify_call_issued(testcase, doc, chair_role):
+ url = urlreverse(
+ "ietf.doc.views_draft.issue_wg_call_for_adoption",
+ kwargs=dict(name=doc.name, acronym=chair_role.group.acronym),
+ )
+ login_testing_unauthorized(testcase, chair_role.person.user.username, url)
+ r = testcase.client.get(url)
+ testcase.assertEqual(r.status_code, 200)
+ q = PyQuery(r.content)
+ postdict = dict()
+ postdict["end_date"] = q("input#id_end_date").attr("value")
+ postdict["to"] = q("input#id_to").attr("value") + ", extrato@example.com"
+ self.assertIn(chair_role.group.list_email, postdict["to"])
+ cc = q("input#id_cc").attr("value")
+ if cc is not None:
+ postdict["cc"] = cc + ", extracc@example.com"
+ else:
+ postdict["cc"] = "extracc@example.com"
+ postdict["subject"] = q("input#id_subject").attr("value") + " Extra Subject Words"
+ postdict["body"] = q("textarea#id_body").text() + "FGgqbQ$UNeXs"
+ empty_outbox()
+ r = testcase.client.post(
+ url,
+ postdict,
+ )
+ testcase.assertEqual(r.status_code, 302)
+ doc.refresh_from_db()
+ self.assertEqual(doc.group, chair_role.group)
+ self.assertEqual(doc.get_state_slug("draft-stream-ietf"), "c-adopt")
+ self.assertEqual(len(outbox), 2)
+ self.assertIn(f"{doc.group.acronym}@ietf.org", outbox[1]["To"])
+ self.assertIn("extrato@example.com", outbox[1]["To"])
+ self.assertIn("extracc@example.com", outbox[1]["Cc"])
+ self.assertIn("Call for adoption", outbox[1]["Subject"])
+ self.assertIn("Extra Subject Words", outbox[1]["Subject"])
+ body = get_payload_text(outbox[1])
+ self.assertIn("disclosure obligations", body)
+ self.assertIn("FGgqbQ$UNeXs", body)
+ self.client.logout()
+ return doc
+
+ already_rfc = WgDraftFactory(states=[("draft", "rfc")])
+ rfc = WgRfcFactory(group=already_rfc.group)
+ already_rfc.relateddocument_set.create(relationship_id="became_rfc",target=rfc)
+ rfc_chair = RoleFactory(name_id="chair", group=already_rfc.group).person
+ _assert_rejected(self, already_rfc, rfc_chair)
+ rg_doc = RgDraftFactory()
+ rg_chair = RoleFactory(name_id="chair", group=rg_doc.group).person
+ _assert_rejected(self, rg_doc, rg_chair)
+ inwglc_doc = WgDraftFactory(states=[("draft-stream-ietf", "wg-lc")])
+ inwglc_chair = RoleFactory(name_id="chair", group=inwglc_doc.group).person
+ _assert_rejected(self, inwglc_doc, inwglc_chair)
+ ind_doc = IndividualDraftFactory()
+ _assert_rejected(self, ind_doc, rg_chair, rg_doc.group)
+
+ # Successful call issued for doc already in WG
+ doc = WgDraftFactory(states=[("draft-stream-ietf","wg-cand")])
+ chair_role = RoleFactory(name_id="chair",group=doc.group)
+ _ = _verify_call_issued(self, doc, chair_role)
+
+ # Successful call issued for doc not yet in WG
+ doc = IndividualDraftFactory()
+ chair_role = RoleFactory(name_id="chair",group__type_id="wg")
+ doc = _verify_call_issued(self, doc, chair_role)
+ self.assertEqual(doc.group, chair_role.group)
+ self.assertEqual(doc.stream_id, "ietf")
+ self.assertEqual(doc.get_state_slug("draft-stream-ietf"), "c-adopt")
+ self.assertCountEqual(
+ doc.docevent_set.values_list("type", flat=True),
+ ["changed_state", "changed_group", "changed_stream", "new_revision"]
)
- self.assertEqual(r.status_code, 302)
- self.assertEqual(len(outbox), 2)
- self.assertIn("mars-wg@ietf.org", outbox[1]["To"])
- self.assertIn("WG Last Call", outbox[1]["Subject"])
- body = get_payload_text(outbox[1])
- self.assertIn("disclosure obligations", body)
- self.assertIn("starts a 2-week", body)
def test_pubreq_validation(self):
role = RoleFactory(name_id='chair',group__acronym='mars',group__list_email='mars-wg@ietf.org',person__user__username='marschairman',person__name='WG Cháir Man')
@@ -2393,6 +2534,188 @@ def test_editorial_metadata(self):
self.assertNotIn("IESG", top_level_metadata_headings)
self.assertNotIn("IANA", top_level_metadata_headings)
+class IetfGroupActionHelperTests(TestCase):
+ def test_manage_adoption_routing(self):
+ draft = IndividualDraftFactory()
+ nobody = PersonFactory()
+ rgchair = RoleFactory(group__type_id="rg", name_id="chair").person
+ wgchair = RoleFactory(group__type_id="wg", name_id="chair").person
+ multichair = RoleFactory(group__type_id="rg", name_id="chair").person
+ RoleFactory(group__type_id="wg", person=multichair, name_id="chair")
+ ad = RoleFactory(group__type_id="area", name_id="ad").person
+ secretary = Role.objects.filter(
+ name_id="secr", group__acronym="secretariat"
+ ).first()
+ self.assertIsNotNone(secretary)
+ secretary = secretary.person
+ self.assertFalse(
+ has_role(rgchair.user, ["Secretariat", "Area Director", "WG Chair"])
+ )
+ url = urlreverse(
+ "ietf.doc.views_doc.document_main", kwargs={"name": draft.name}
+ )
+ ask_about_ietf_link = urlreverse(
+ "ietf.doc.views_draft.ask_about_ietf_adoption_call",
+ kwargs={"name": draft.name},
+ )
+ non_ietf_adoption_link = urlreverse(
+ "ietf.doc.views_draft.adopt_draft", kwargs={"name": draft.name}
+ )
+ for person in (None, nobody, rgchair, wgchair, multichair, ad, secretary):
+ if person is not None:
+ self.client.login(
+ username=person.user.username,
+ password=f"{person.user.username}+password",
+ )
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 200)
+ q = PyQuery(r.content)
+ has_ask_about_ietf_link = len(q(f'a[href="{ask_about_ietf_link}"]')) != 0
+ has_non_ietf_adoption_link = (
+ len(q(f'a[href="{non_ietf_adoption_link}"]')) != 0
+ )
+ ask_about_r = self.client.get(ask_about_ietf_link)
+ ask_about_link_return_code = ask_about_r.status_code
+ if person == rgchair:
+ self.assertFalse(has_ask_about_ietf_link)
+ self.assertTrue(has_non_ietf_adoption_link)
+ self.assertEqual(ask_about_link_return_code, 403)
+ elif person in (ad, nobody, None):
+ self.assertFalse(has_ask_about_ietf_link)
+ self.assertFalse(has_non_ietf_adoption_link)
+ self.assertEqual(
+ ask_about_link_return_code, 302 if person is None else 403
+ )
+ else:
+ self.assertTrue(has_ask_about_ietf_link)
+ self.assertFalse(has_non_ietf_adoption_link)
+ self.assertEqual(ask_about_link_return_code, 200)
+ self.client.logout()
+
+ def test_ask_about_ietf_adoption_call(self):
+ # Basic permission tests above
+ doc = IndividualDraftFactory()
+ self.assertEqual(doc.docevent_set.count(), 1)
+ chair_role = RoleFactory(group__type_id="wg", name_id="chair")
+ chair = chair_role.person
+ group = chair_role.group
+ othergroup = GroupFactory(type_id="wg")
+ url = urlreverse(
+ "ietf.doc.views_draft.ask_about_ietf_adoption_call",
+ kwargs={"name": doc.name},
+ )
+ login_testing_unauthorized(self, chair.user.username, url)
+ r = self.client.post(url, {"group": othergroup.pk})
+ self.assertEqual(r.status_code, 200)
+ r = self.client.post(url, {"group": group.pk})
+ self.assertEqual(r.status_code, 302)
+
+ def test_offer_wg_action_helpers(self):
+ def _assert_view_presents_buttons(testcase, response, expected):
+ q = PyQuery(response.content)
+ for id, expect in expected:
+ button = q(f"#{id}")
+ testcase.assertEqual(
+ len(button) != 0,
+ expect
+ )
+
+ # View rejects access
+ came_from_draft = WgDraftFactory(states=[("draft","rfc")])
+ rfc = WgRfcFactory(group=came_from_draft.group)
+ came_from_draft.relateddocument_set.create(relationship_id="became_rfc",target=rfc)
+ rfc_chair = RoleFactory(name_id="chair", group=rfc.group).person
+ url = urlreverse("ietf.doc.views_draft.offer_wg_action_helpers", kwargs=dict(name=came_from_draft.name))
+ login_testing_unauthorized(self, rfc_chair.user.username, url)
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 404)
+ self.client.logout()
+ rg_draft = RgDraftFactory()
+ rg_chair = RoleFactory(group=rg_draft.group, name_id="chair").person
+ url = urlreverse("ietf.doc.views_draft.offer_wg_action_helpers", kwargs=dict(name=rg_draft.name))
+ login_testing_unauthorized(self, rg_chair.user.username, url)
+ r = self.client.get(url)
+ self.assertEqual(r.status_code,404)
+ self.client.logout()
+
+ # View offers access
+ draft = WgDraftFactory()
+ chair = RoleFactory(group=draft.group, name_id="chair").person
+ url = urlreverse("ietf.doc.views_draft.offer_wg_action_helpers", kwargs=dict(name=draft.name))
+ login_testing_unauthorized(self, chair.user.username, url)
+ r = self.client.get(url)
+ self.assertEqual(r.status_code,200)
+ _assert_view_presents_buttons(
+ self,
+ r,
+ [
+ ("id_wgadopt_button", False),
+ ("id_wglc_button", True),
+ ("id_pubreq_button", True),
+ ],
+ )
+ draft.set_state(State.objects.get(type_id="draft-stream-ietf", slug="wg-cand"))
+ r = self.client.get(url)
+ self.assertEqual(r.status_code,200)
+ _assert_view_presents_buttons(
+ self,
+ r,
+ [
+ ("id_wgadopt_button", True),
+ ("id_wglc_button", False),
+ ("id_pubreq_button", False),
+ ],
+ )
+ draft.set_state(State.objects.get(type_id="draft-stream-ietf", slug="wg-lc"))
+ StateDocEventFactory(
+ doc=draft,
+ state_type_id="draft-stream-ietf",
+ state=("draft-stream-ietf", "wg-lc"),
+ )
+ self.assertEqual(draft.docevent_set.count(), 2)
+ r = self.client.get(url)
+ self.assertEqual(r.status_code,200)
+ _assert_view_presents_buttons(
+ self,
+ r,
+ [
+ ("id_wgadopt_button", False),
+ ("id_wglc_button", False),
+ ("id_pubreq_button", True),
+ ],
+ )
+ draft.set_state(State.objects.get(type_id="draft-stream-ietf",slug="chair-w"))
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 200)
+ _assert_view_presents_buttons(
+ self,
+ r,
+ [
+ ("id_wgadopt_button", False),
+ ("id_wglc_button", True),
+ ("id_pubreq_button", True),
+ ],
+ )
+ self.assertContains(response=r,text="Issue Another Working Group Last Call", status_code=200)
+ other_draft = WgDraftFactory()
+ self.client.logout()
+ url = urlreverse("ietf.doc.views_draft.offer_wg_action_helpers", kwargs=dict(name=other_draft.name))
+ login_testing_unauthorized(self, "secretary", url)
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 200)
+ _assert_view_presents_buttons(
+ self,
+ r,
+ [
+ ("id_wgadopt_button", False),
+ ("id_wglc_button", True),
+ ("id_pubreq_button", True),
+ ],
+ )
+ self.assertContains(
+ response=r, text="Issue Working Group Last Call", status_code=200
+ )
+
class BallotEmailAjaxTests(TestCase):
def test_ajax_build_position_email(self):
def _post_json(self, url, json_to_post):
diff --git a/ietf/doc/tests_notprepped.py b/ietf/doc/tests_notprepped.py
new file mode 100644
index 0000000000..f417aa7931
--- /dev/null
+++ b/ietf/doc/tests_notprepped.py
@@ -0,0 +1,122 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+
+from django.conf import settings
+from django.utils import timezone
+from django.urls import reverse as urlreverse
+
+from pyquery import PyQuery
+
+from ietf.doc.factories import WgRfcFactory
+from ietf.doc.models import StoredObject
+from ietf.doc.storage_utils import store_bytes
+from ietf.utils.test_utils import TestCase
+
+
+class NotpreppedRfcXmlTests(TestCase):
+ def test_editor_source_button_visibility(self):
+ pre_v3 = WgRfcFactory(rfc_number=settings.FIRST_V3_RFC - 1)
+ first_v3 = WgRfcFactory(rfc_number=settings.FIRST_V3_RFC)
+ post_v3 = WgRfcFactory(rfc_number=settings.FIRST_V3_RFC + 1)
+
+ for rfc, expect_button in [(pre_v3, False), (first_v3, True), (post_v3, True)]:
+ r = self.client.get(
+ urlreverse(
+ "ietf.doc.views_doc.document_main", kwargs=dict(name=rfc.name)
+ )
+ )
+ self.assertEqual(r.status_code, 200)
+ buttons = PyQuery(r.content)('a.btn:contains("Get editor source")')
+ if expect_button:
+ self.assertEqual(len(buttons), 1, msg=f"rfc_number={rfc.rfc_number}")
+ expected_href = urlreverse(
+ "ietf.doc.views_doc.rfcxml_notprepped_wrapper",
+ kwargs=dict(number=rfc.rfc_number),
+ )
+ self.assertEqual(
+ buttons.attr("href"),
+ expected_href,
+ msg=f"rfc_number={rfc.rfc_number}",
+ )
+ else:
+ self.assertEqual(len(buttons), 0, msg=f"rfc_number={rfc.rfc_number}")
+
+ def test_rfcxml_notprepped(self):
+ number = settings.FIRST_V3_RFC
+ stored_name = f"notprepped/rfc{number}.notprepped.xml"
+ url = f"/doc/rfc{number}/notprepped/"
+
+ # 404 for pre-v3 RFC numbers (no document needed)
+ r = self.client.get(f"/doc/rfc{number - 1}/notprepped/")
+ self.assertEqual(r.status_code, 404)
+
+ # 404 when no RFC document exists in the database
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 404)
+
+ # 404 when RFC document exists but has no StoredObject
+ WgRfcFactory(rfc_number=number)
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 404)
+
+ # 404 when StoredObject exists but backing storage is missing (FileNotFoundError)
+ now = timezone.now()
+ StoredObject.objects.create(
+ store="rfc",
+ name=stored_name,
+ sha384="a" * 96,
+ len=0,
+ store_created=now,
+ created=now,
+ modified=now,
+ )
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 404)
+
+ # 200 with correct content-type, attachment disposition, and body when object is fully stored
+ xml_content = b"test"
+ store_bytes("rfc", stored_name, xml_content, allow_overwrite=True)
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 200)
+ self.assertEqual(r["Content-Type"], "application/xml")
+ self.assertEqual(
+ r["Content-Disposition"],
+ f'attachment; filename="rfc{number}.notprepped.xml"',
+ )
+ self.assertEqual(b"".join(r.streaming_content), xml_content)
+
+ def test_rfcxml_notprepped_wrapper(self):
+ number = settings.FIRST_V3_RFC
+
+ # 404 for pre-v3 RFC numbers (no document needed)
+ r = self.client.get(
+ urlreverse(
+ "ietf.doc.views_doc.rfcxml_notprepped_wrapper",
+ kwargs=dict(number=number - 1),
+ )
+ )
+ self.assertEqual(r.status_code, 404)
+
+ # 404 when no RFC document exists in the database
+ r = self.client.get(
+ urlreverse(
+ "ietf.doc.views_doc.rfcxml_notprepped_wrapper",
+ kwargs=dict(number=number),
+ )
+ )
+ self.assertEqual(r.status_code, 404)
+
+ # 200 with rendered template when RFC document exists
+ rfc = WgRfcFactory(rfc_number=number)
+ r = self.client.get(
+ urlreverse(
+ "ietf.doc.views_doc.rfcxml_notprepped_wrapper",
+ kwargs=dict(number=number),
+ )
+ )
+ self.assertEqual(r.status_code, 200)
+ q = PyQuery(r.content)
+ self.assertIn(str(rfc.rfc_number), q("h1").text())
+ download_url = urlreverse(
+ "ietf.doc.views_doc.rfcxml_notprepped", kwargs=dict(number=number)
+ )
+ self.assertEqual(len(q(f'a.btn[href="{download_url}"]')), 1)
diff --git a/ietf/doc/tests_review.py b/ietf/doc/tests_review.py
index 8c1fc99ffe..82d1b5c232 100644
--- a/ietf/doc/tests_review.py
+++ b/ietf/doc/tests_review.py
@@ -822,7 +822,7 @@ def test_complete_review_upload_content(self):
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, assignment.review_request.team.list_email)
- for author in assignment.review_request.doc.authors():
+ for author in assignment.review_request.doc.author_persons():
self.assertContains(r, author.formatted_email())
# faulty post
diff --git a/ietf/doc/tests_tasks.py b/ietf/doc/tests_tasks.py
index 29689cd596..2e2d65463f 100644
--- a/ietf/doc/tests_tasks.py
+++ b/ietf/doc/tests_tasks.py
@@ -1,18 +1,20 @@
-# Copyright The IETF Trust 2024, All Rights Reserved
+# Copyright The IETF Trust 2024-2026, All Rights Reserved
-import debug # pyflakes:ignore
import datetime
from unittest import mock
from pathlib import Path
+from celery.exceptions import Retry
from django.conf import settings
+from django.test.utils import override_settings
from django.utils import timezone
+from typesense import exceptions as typesense_exceptions
from ietf.utils.test_utils import TestCase
from ietf.utils.timezone import datetime_today
-from .factories import DocumentFactory, NewRevisionDocEventFactory
+from .factories import DocumentFactory, NewRevisionDocEventFactory, WgRfcFactory
from .models import Document, NewRevisionDocEvent
from .tasks import (
expire_ids_task,
@@ -22,8 +24,11 @@
generate_idnits2_rfc_status_task,
investigate_fragment_task,
notify_expirations_task,
+ rebuild_searchindex_task,
+ update_rfc_searchindex_task,
)
+
class TaskTests(TestCase):
@mock.patch("ietf.doc.tasks.in_draft_expire_freeze")
@mock.patch("ietf.doc.tasks.get_expired_drafts")
@@ -87,7 +92,7 @@ def test_expire_last_calls_task(self, mock_get_expired, mock_expire):
self.assertEqual(mock_expire.call_args_list[0], mock.call(docs[0]))
self.assertEqual(mock_expire.call_args_list[1], mock.call(docs[1]))
self.assertEqual(mock_expire.call_args_list[2], mock.call(docs[2]))
-
+
# Check that it runs even if exceptions occur
mock_get_expired.reset_mock()
mock_expire.reset_mock()
@@ -111,9 +116,82 @@ def test_investigate_fragment_task(self):
retval, {"name_fragment": "some fragment", "results": investigation_results}
)
+ @mock.patch("ietf.doc.tasks.searchindex.update_or_create_rfc_entry")
+ @mock.patch("ietf.doc.tasks.searchindex.enabled")
+ def test_update_rfc_searchindex_task(
+ self, mock_searchindex_enabled, mock_create_entry
+ ):
+ mock_searchindex_enabled.return_value = False
+
+ self.assertFalse(Document.objects.filter(rfc_number=5073).exists())
+ rfc = WgRfcFactory()
+ update_rfc_searchindex_task(rfc_number=5073)
+ self.assertFalse(mock_create_entry.called)
+ update_rfc_searchindex_task(rfc_number=rfc.rfc_number)
+ self.assertFalse(mock_create_entry.called)
+
+ mock_searchindex_enabled.return_value = True
+ update_rfc_searchindex_task(rfc_number=5073)
+ self.assertFalse(mock_create_entry.called)
+ update_rfc_searchindex_task(rfc_number=rfc.rfc_number)
+ self.assertTrue(mock_create_entry.called)
+
+ with override_settings(SEARCHINDEX_CONFIG={"TASK_MAX_RETRIES": 0}):
+ # Try a non-retryable error (there are others)
+ mock_create_entry.side_effect = typesense_exceptions.RequestMalformed
+ update_rfc_searchindex_task(rfc_number=rfc.rfc_number) # no retry
+ # Now what should be a retryable error
+ mock_create_entry.side_effect = typesense_exceptions.Timeout
+ with self.assertRaises(Retry):
+ update_rfc_searchindex_task(rfc_number=rfc.rfc_number)
+
+ @mock.patch("ietf.doc.tasks.searchindex.update_or_create_rfc_entries")
+ @mock.patch("ietf.doc.tasks.searchindex.create_collection")
+ @mock.patch("ietf.doc.tasks.searchindex.delete_collection")
+ def test_rebuild_searchindex_task(self, mock_delete, mock_create, mock_update):
+ rfcs = WgRfcFactory.create_batch(10)
+ rebuild_searchindex_task()
+ self.assertFalse(mock_delete.called)
+ self.assertFalse(mock_create.called)
+ self.assertTrue(mock_update.called)
+ self.assertQuerysetEqual(
+ mock_update.call_args.args[0],
+ sorted(rfcs, key=lambda doc: -doc.rfc_number),
+ ordered=True,
+ )
+
+ mock_delete.reset_mock()
+ mock_create.reset_mock()
+ mock_update.reset_mock()
+ rebuild_searchindex_task(drop_collection=True)
+ self.assertTrue(mock_delete.called)
+ self.assertTrue(mock_create.called)
+ self.assertTrue(mock_update.called)
+ self.assertQuerysetEqual(
+ mock_update.call_args.args[0],
+ sorted(rfcs, key=lambda doc: -doc.rfc_number),
+ ordered=True,
+ )
+
+ mock_delete.reset_mock()
+ mock_create.reset_mock()
+ mock_update.reset_mock()
+ rebuild_searchindex_task(drop_collection=True, batchsize=3)
+ self.assertTrue(mock_delete.called)
+ self.assertTrue(mock_create.called)
+ self.assertTrue(mock_update.called)
+ self.assertQuerysetEqual(
+ mock_update.call_args.args[0],
+ sorted(rfcs, key=lambda doc: -doc.rfc_number),
+ ordered=True,
+ )
+ self.assertEqual(mock_update.call_args.kwargs["batchsize"], 3)
+
class Idnits2SupportTests(TestCase):
- settings_temp_path_overrides = TestCase.settings_temp_path_overrides + ['DERIVED_DIR']
+ settings_temp_path_overrides = TestCase.settings_temp_path_overrides + [
+ "DERIVED_DIR"
+ ]
@mock.patch("ietf.doc.tasks.generate_idnits2_rfcs_obsoleted")
def test_generate_idnits2_rfcs_obsoleted_task(self, mock_generate):
@@ -151,7 +229,9 @@ def setUp(self):
)
# a couple that should always be ignored
NewRevisionDocEventFactory(
- time=now - datetime.timedelta(days=6), rev="09", doc__type_id="rfc" # not a draft
+ time=now - datetime.timedelta(days=6),
+ rev="09",
+ doc__type_id="rfc", # not a draft
)
NewRevisionDocEventFactory(
type="changed_document", # not a "new_revision" type
@@ -164,7 +244,9 @@ def setUp(self):
@mock.patch("ietf.doc.tasks.ensure_draft_bibxml_path_exists")
@mock.patch("ietf.doc.tasks.update_or_create_draft_bibxml_file")
- def test_generate_bibxml_files_for_all_drafts_task(self, mock_create, mock_ensure_path):
+ def test_generate_bibxml_files_for_all_drafts_task(
+ self, mock_create, mock_ensure_path
+ ):
generate_draft_bibxml_files_task(process_all=True)
self.assertTrue(mock_ensure_path.called)
self.assertCountEqual(
@@ -193,12 +275,15 @@ def test_generate_bibxml_files_for_all_drafts_task(self, mock_create, mock_ensur
@mock.patch("ietf.doc.tasks.ensure_draft_bibxml_path_exists")
@mock.patch("ietf.doc.tasks.update_or_create_draft_bibxml_file")
- def test_generate_bibxml_files_for_recent_drafts_task(self, mock_create, mock_ensure_path):
+ def test_generate_bibxml_files_for_recent_drafts_task(
+ self, mock_create, mock_ensure_path
+ ):
# default args - look back 7 days
generate_draft_bibxml_files_task()
self.assertTrue(mock_ensure_path.called)
self.assertCountEqual(
- mock_create.call_args_list, [mock.call(self.young_event.doc, self.young_event.rev)]
+ mock_create.call_args_list,
+ [mock.call(self.young_event.doc, self.young_event.rev)],
)
mock_create.reset_mock()
mock_ensure_path.reset_mock()
@@ -223,7 +308,9 @@ def test_generate_bibxml_files_for_recent_drafts_task(self, mock_create, mock_en
@mock.patch("ietf.doc.tasks.ensure_draft_bibxml_path_exists")
@mock.patch("ietf.doc.tasks.update_or_create_draft_bibxml_file")
- def test_generate_bibxml_files_for_recent_drafts_task_with_bad_value(self, mock_create, mock_ensure_path):
+ def test_generate_bibxml_files_for_recent_drafts_task_with_bad_value(
+ self, mock_create, mock_ensure_path
+ ):
with self.assertRaises(ValueError):
generate_draft_bibxml_files_task(days=0)
self.assertFalse(mock_create.called)
diff --git a/ietf/doc/tests_utils.py b/ietf/doc/tests_utils.py
index ef71f6ae6e..ba672cd847 100644
--- a/ietf/doc/tests_utils.py
+++ b/ietf/doc/tests_utils.py
@@ -1,15 +1,23 @@
# Copyright The IETF Trust 2020, All Rights Reserved
import datetime
+from io import BytesIO
+
+import mock
import debug # pyflakes:ignore
+import requests
from pathlib import Path
from unittest.mock import call, patch
from django.conf import settings
+from django.core.files.storage import storages
from django.db import IntegrityError
from django.test.utils import override_settings
from django.utils import timezone
+
+from ietf.doc.utils_r2 import rfcs_are_in_r2
+from ietf.doc.utils_red import trigger_red_precomputer
from ietf.group.factories import GroupFactory, RoleFactory
from ietf.name.models import DocTagName
from ietf.person.factories import PersonFactory
@@ -17,11 +25,12 @@
from ietf.utils.test_utils import TestCase, name_of_file_containing, reload_db_objects
from ietf.person.models import Person
from ietf.doc.factories import DocumentFactory, WgRfcFactory, WgDraftFactory
-from ietf.doc.models import State, DocumentActionHolder, DocumentAuthor
+from ietf.doc.models import State, DocumentActionHolder, DocumentAuthor, StoredObject
from ietf.doc.utils import (update_action_holders, add_state_change_event, update_documentauthors,
fuzzy_find_documents, rebuild_reference_relations, build_file_urls,
ensure_draft_bibxml_path_exists, update_or_create_draft_bibxml_file,
last_ballot_doc_revision)
+from ietf.doc.storage_utils import store_str
from ietf.utils.draft import Draft, PlaintextDraft
from ietf.utils.xmldraft import XMLDraft
@@ -389,13 +398,13 @@ def test_requires_txt_or_xml(self):
result = rebuild_reference_relations(self.doc, {})
self.assertCountEqual(result.keys(), ['errors'])
self.assertEqual(len(result['errors']), 1)
- self.assertIn('No Internet-Draft text available', result['errors'][0],
+ self.assertIn('No file available', result['errors'][0],
'Error should be reported if no Internet-Draft file is given')
result = rebuild_reference_relations(self.doc, {'md': 'cant-do-this.md'})
self.assertCountEqual(result.keys(), ['errors'])
self.assertEqual(len(result['errors']), 1)
- self.assertIn('No Internet-Draft text available', result['errors'][0],
+ self.assertIn('No file available', result['errors'][0],
'Error should be reported if no XML or plaintext file is given')
@patch.object(XMLDraft, 'get_refs')
@@ -559,3 +568,132 @@ def test_last_ballot_doc_revision(self):
nobody = PersonFactory()
self.assertIsNone(last_ballot_doc_revision(doc, nobody))
self.assertEqual(rev, last_ballot_doc_revision(doc, ad))
+
+
+class UtilsRedTests(TestCase):
+ @mock.patch("ietf.doc.utils_red.log")
+ @mock.patch("ietf.doc.utils_red.requests.post")
+ def test_trigger_red_precomputer_not_configured(self, mock_post, mock_log):
+ with override_settings():
+ try:
+ del settings.CUSTOM_SETTING_NAME
+ except AttributeError:
+ pass
+ trigger_red_precomputer(rfc_number_list=[1, 2, 3])
+ self.assertEqual(mock_log.call_count, 1)
+ mock_args, _ = mock_log.call_args
+ self.assertEqual(
+ mock_args,
+ ("No URL configured for triggering red precompute multiple, skipping",),
+ )
+
+ mock_log.reset_mock()
+ with override_settings(TRIGGER_RED_PRECOMPUTE_MULTIPLE_URL=None):
+ trigger_red_precomputer(rfc_number_list=[1, 2, 3])
+ self.assertFalse(mock_post.called)
+ self.assertEqual(mock_log.call_count, 1)
+ mock_args, _ = mock_log.call_args
+ self.assertEqual(
+ mock_args,
+ ("No URL configured for triggering red precompute multiple, skipping",),
+ )
+
+ @override_settings(
+ TRIGGER_RED_PRECOMPUTE_MULTIPLE_URL="urlbits",
+ )
+ @mock.patch("ietf.doc.utils_red.log")
+ @mock.patch("ietf.doc.utils_red.requests.post", side_effect=requests.Timeout())
+ def test_trigger_red_precomputer_swallows_timeout_exception(
+ self, mock_post, mock_log
+ ):
+ exception_raised = False
+ try:
+ trigger_red_precomputer(rfc_number_list=[1, 2, 3])
+ except Exception:
+ exception_raised = True
+ self.assertFalse(exception_raised)
+ self.assertEqual(mock_log.call_count, 2)
+ # only checking the last log call
+ mock_args, _ = mock_log.call_args
+ self.assertEqual(len(mock_args), 1)
+ self.assertIn("POST request timed out", mock_args[0])
+
+ @override_settings(
+ TRIGGER_RED_PRECOMPUTE_MULTIPLE_URL="urlbits",
+ )
+ @mock.patch("ietf.doc.utils_red.requests.post", side_effect=Exception())
+ def test_trigger_red_precomputer_does_not_swallow_too_much(self, mock_post):
+ exception_raised = False
+ try:
+ trigger_red_precomputer(rfc_number_list=[1, 2, 3])
+ except Exception:
+ exception_raised = True
+ self.assertTrue(exception_raised)
+
+ @override_settings(
+ TRIGGER_RED_PRECOMPUTE_MULTIPLE_URL="urlbits",
+ DEFAULT_REQUESTS_TIMEOUT=314159265,
+ )
+ @mock.patch("ietf.doc.utils_red.log")
+ @mock.patch("ietf.doc.utils_red.requests.post")
+ def test_trigger_red_precomputer(self, mock_post, mock_log):
+ mock_post.return_value = mock.Mock(status_code=200)
+ trigger_red_precomputer(rfc_number_list=[1, 2, 3])
+ self.assertTrue(mock_post.called)
+ _, mock_kwargs = mock_post.call_args
+ self.assertIn("url", mock_kwargs)
+ self.assertEqual(mock_kwargs["url"], "urlbits")
+ self.assertIn("json", mock_kwargs)
+ self.assertEqual(mock_kwargs["json"], {"rfcs": "1,2,3"})
+ self.assertIn("timeout", mock_kwargs)
+ self.assertEqual(mock_kwargs["timeout"], 314159265)
+ self.assertEqual(mock_log.call_count, 1) # Not testing the first info log value
+ mock_log.reset_mock()
+ mock_post.reset_mock()
+ mock_post.return_value = mock.Mock(
+ status_code=500,
+ )
+ trigger_red_precomputer(rfc_number_list=[1, 2, 3])
+ self.assertEqual(mock_log.call_count, 2)
+ mock_args, _ = mock_log.call_args
+ self.assertEqual(len(mock_args), 1)
+ expected = f"POST request failed for {settings.TRIGGER_RED_PRECOMPUTE_MULTIPLE_URL} : status_code=500"
+ self.assertEqual(mock_args[0], expected)
+
+
+class UtilsR2TestCase(TestCase):
+ def test_rfcs_are_in_r2(self):
+ rfcs = WgRfcFactory.create_batch(2)
+ rfc_name_list = [rfc.name for rfc in rfcs]
+ rfc_number_list = [rfc.rfc_number for rfc in rfcs]
+ r2_rfc_bucket = storages["r2-rfc"]
+ # Right now the various doc Factories do not populate any content
+ self.assertEqual(
+ StoredObject.objects.filter(
+ store="rfc", doc_name__in=rfc_name_list
+ ).count(),
+ 0,
+ )
+ self.assertTrue(rfcs_are_in_r2(rfc_number_list=rfc_number_list))
+ for rfc in rfcs:
+ store_str(
+ kind="rfc",
+ name=f"testartifact/{rfc.name}.testartifact",
+ content="",
+ doc_name=rfc.name,
+ doc_rev=None,
+ )
+ self.assertEqual(
+ StoredObject.objects.filter(
+ store="rfc", doc_name__in=rfc_name_list
+ ).count(),
+ 2,
+ )
+ self.assertFalse(rfcs_are_in_r2(rfc_number_list=rfc_number_list))
+ r2_rfc_bucket.save(f"testartifact/{rfcs[0].name}.testartifact", BytesIO(b""))
+ self.assertFalse(rfcs_are_in_r2(rfc_number_list=rfc_number_list))
+ r2_rfc_bucket.save(f"testartifact/{rfcs[1].name}.testartifact", BytesIO(b""))
+ self.assertTrue(rfcs_are_in_r2(rfc_number_list=rfc_number_list))
+
+
+
diff --git a/ietf/doc/urls.py b/ietf/doc/urls.py
index 8e9c0569e2..0c13503b78 100644
--- a/ietf/doc/urls.py
+++ b/ietf/doc/urls.py
@@ -99,6 +99,8 @@
url(r'^%(name)s(?:/%(rev)s)?/$' % settings.URL_REGEXPS, views_doc.document_main),
url(r'^%(name)s(?:/%(rev)s)?/bibtex/$' % settings.URL_REGEXPS, views_doc.document_bibtex),
+ url(r'^rfc(?P[0-9]+)/notprepped/$' , views_doc.rfcxml_notprepped),
+ url(r'^rfc(?P[0-9]+)/notprepped-wrapper/$', views_doc.rfcxml_notprepped_wrapper),
url(r'^%(name)s(?:/%(rev)s)?/idnits2-state/$' % settings.URL_REGEXPS, views_doc.idnits2_state),
url(r'^bibxml3/reference.I-D.%(name)s(?:-%(rev)s)?.xml$' % settings.URL_REGEXPS, views_doc.document_bibxml_ref),
url(r'^bibxml3/%(name)s(?:-%(rev)s)?.xml$' % settings.URL_REGEXPS, views_doc.document_bibxml),
@@ -125,6 +127,7 @@
url(r'^%(name)s/edit/info/$' % settings.URL_REGEXPS, views_draft.edit_info),
url(r'^%(name)s/edit/requestresurrect/$' % settings.URL_REGEXPS, views_draft.request_resurrect),
url(r'^%(name)s/edit/submit-to-iesg/$' % settings.URL_REGEXPS, views_draft.to_iesg),
+ url(r'^%(name)s/edit/issue-wg-lc/$' % settings.URL_REGEXPS, views_draft.issue_wg_lc),
url(r'^%(name)s/edit/resurrect/$' % settings.URL_REGEXPS, views_draft.resurrect),
url(r'^%(name)s/edit/addcomment/$' % settings.URL_REGEXPS, views_doc.add_comment),
@@ -143,9 +146,13 @@
url(r'^%(name)s/edit/shepherdemail/$' % settings.URL_REGEXPS, views_draft.change_shepherd_email),
url(r'^%(name)s/edit/shepherdwriteup/$' % settings.URL_REGEXPS, views_draft.edit_shepherd_writeup),
url(r'^%(name)s/edit/requestpublication/$' % settings.URL_REGEXPS, views_draft.request_publication),
+ url(r'^%(name)s/edit/ask-about-ietf-adoption/$' % settings.URL_REGEXPS, views_draft.ask_about_ietf_adoption_call),
url(r'^%(name)s/edit/adopt/$' % settings.URL_REGEXPS, views_draft.adopt_draft),
+ url(r'^%(name)s/edit/issue-wg-call-for-adoption/%(acronym)s/$' % settings.URL_REGEXPS, views_draft.issue_wg_call_for_adoption),
+
url(r'^%(name)s/edit/release/$' % settings.URL_REGEXPS, views_draft.release_draft),
url(r'^%(name)s/edit/state/(?Pdraft-stream-[a-z]+)/$' % settings.URL_REGEXPS, views_draft.change_stream_state),
+ url(r'^%(name)s/edit/wg-action-helpers/$' % settings.URL_REGEXPS, views_draft.offer_wg_action_helpers),
url(r'^%(name)s/edit/state/statement/$' % settings.URL_REGEXPS, views_statement.change_statement_state),
url(r'^%(name)s/edit/clearballot/(?P[\w-]+)/$' % settings.URL_REGEXPS, views_ballot.clear_ballot),
diff --git a/ietf/doc/utils.py b/ietf/doc/utils.py
index 2bd9a3d314..8cbe5e8f3e 100644
--- a/ietf/doc/utils.py
+++ b/ietf/doc/utils.py
@@ -4,6 +4,7 @@
import datetime
import io
+import json
import math
import os
import re
@@ -13,7 +14,7 @@
from dataclasses import dataclass
from hashlib import sha384
from pathlib import Path
-from typing import Iterator, Optional, Union
+from typing import Iterator, Optional, Union, Iterable
from zoneinfo import ZoneInfo
from django.conf import settings
@@ -33,7 +34,14 @@
from ietf.community.models import CommunityList
from ietf.community.utils import docs_tracked_by_community_list
-from ietf.doc.models import Document, DocHistory, State, DocumentAuthor, DocHistoryAuthor
+from ietf.doc.models import (
+ DocHistory,
+ DocHistoryAuthor,
+ Document,
+ DocumentAuthor,
+ RfcAuthor,
+ State, EditedRfcAuthorsDocEvent,
+)
from ietf.doc.models import RelatedDocument, RelatedDocHistory, BallotType, DocReminder
from ietf.doc.models import DocEvent, ConsensusDocEvent, BallotDocEvent, IRSGBallotDocEvent, NewRevisionDocEvent, StateDocEvent
from ietf.doc.models import TelechatDocEvent, DocumentActionHolder, EditedAuthorsDocEvent, BallotPositionDocEvent
@@ -534,7 +542,7 @@ def update_action_holders(doc, prev_state=None, new_state=None, prev_tags=None,
doc.action_holders.clear()
if tags.removed("need-rev"):
# Removed the 'need-rev' tag - drop authors from the action holders list
- DocumentActionHolder.objects.filter(document=doc, person__in=doc.authors()).delete()
+ DocumentActionHolder.objects.filter(document=doc, person__in=doc.author_persons()).delete()
elif tags.added("need-rev"):
# Remove the AD if we're asking for a new revision
DocumentActionHolder.objects.filter(document=doc, person=doc.ad).delete()
@@ -549,7 +557,7 @@ def update_action_holders(doc, prev_state=None, new_state=None, prev_tags=None,
doc.action_holders.add(doc.ad)
# Authors get the action if a revision is needed
if tags.added("need-rev"):
- for auth in doc.authors():
+ for auth in doc.author_persons():
doc.action_holders.add(auth)
# Now create an event if we changed the set
@@ -561,6 +569,40 @@ def update_action_holders(doc, prev_state=None, new_state=None, prev_tags=None,
)
+def _change_field_and_describe(
+ author: DocumentAuthor | RfcAuthor,
+ field: str,
+ newval,
+ field_display_name: str | None = None,
+):
+ # make the change
+ oldval = getattr(author, field)
+ setattr(author, field, newval)
+
+ was_empty = oldval is None or len(str(oldval)) == 0
+ now_empty = newval is None or len(str(newval)) == 0
+
+ # describe the change
+ if oldval == newval:
+ return None
+ else:
+ if field_display_name is None:
+ field_display_name = field
+
+ if was_empty and not now_empty:
+ return 'set {field} to "{new}"'.format(
+ field=field_display_name, new=newval
+ )
+ elif now_empty and not was_empty:
+ return 'cleared {field} (was "{old}")'.format(
+ field=field_display_name, old=oldval
+ )
+ else:
+ return 'changed {field} from "{old}" to "{new}"'.format(
+ field=field_display_name, old=oldval, new=newval
+ )
+
+
def update_documentauthors(doc, new_docauthors, by=None, basis=None):
"""Update the list of authors for a document
@@ -573,27 +615,6 @@ def update_documentauthors(doc, new_docauthors, by=None, basis=None):
used. These objects will not be saved, their attributes will be used to create new
DocumentAuthor instances. (The document and order fields will be ignored.)
"""
- def _change_field_and_describe(auth, field, newval):
- # make the change
- oldval = getattr(auth, field)
- setattr(auth, field, newval)
-
- was_empty = oldval is None or len(str(oldval)) == 0
- now_empty = newval is None or len(str(newval)) == 0
-
- # describe the change
- if oldval == newval:
- return None
- else:
- if was_empty and not now_empty:
- return 'set {field} to "{new}"'.format(field=field, new=newval)
- elif now_empty and not was_empty:
- return 'cleared {field} (was "{old}")'.format(field=field, old=oldval)
- else:
- return 'changed {field} from "{old}" to "{new}"'.format(
- field=field, old=oldval, new=newval
- )
-
persons = []
changes = [] # list of change descriptions
@@ -637,6 +658,123 @@ def _change_field_and_describe(auth, field, newval):
) for change in changes
]
+
+def update_rfcauthors(
+ rfc: Document, new_rfcauthors: Iterable[RfcAuthor], by: Person | None = None
+) -> Iterable[EditedRfcAuthorsDocEvent]:
+ def _find_matching_author(
+ author_to_match: RfcAuthor, existing_authors: Iterable[RfcAuthor]
+ ) -> RfcAuthor | None:
+ """Helper to find a matching existing author"""
+ if author_to_match.person_id is not None:
+ for candidate in existing_authors:
+ if candidate.person_id == author_to_match.person_id:
+ return candidate
+ return None # no match
+ # author does not have a person, match on titlepage name
+ for candidate in existing_authors:
+ if candidate.titlepage_name == author_to_match.titlepage_name:
+ return candidate
+ return None # no match
+
+ def _rfcauthor_from_documentauthor(docauthor: DocumentAuthor) -> RfcAuthor:
+ """Helper to create an equivalent RfcAuthor from a DocumentAuthor"""
+ return RfcAuthor(
+ document_id=docauthor.document_id,
+ titlepage_name=docauthor.person.plain_name(), # closest thing we have
+ is_editor=False,
+ person_id=docauthor.person_id,
+ affiliation=docauthor.affiliation,
+ country=docauthor.country,
+ order=docauthor.order,
+ )
+
+ # Is this the first time this document is getting an RfcAuthor? If so, the
+ # updates will need to account for the model change.
+ converting_from_docauthors = not rfc.rfcauthor_set.exists()
+
+ if converting_from_docauthors:
+ original_authors = [
+ _rfcauthor_from_documentauthor(da) for da in rfc.documentauthor_set.all()
+ ]
+ else:
+ original_authors = list(rfc.rfcauthor_set.all())
+
+ authors_to_commit = []
+ changes = []
+ for order, new_author in enumerate(new_rfcauthors):
+ matching_author = _find_matching_author(new_author, original_authors)
+ if matching_author is not None:
+ # Update existing matching author using new_author data
+ authors_to_commit.append(matching_author)
+ original_authors.remove(matching_author) # avoid reuse
+ # Describe changes to this author
+ author_changes = []
+ # Update fields other than order
+ for field in ["titlepage_name", "is_editor", "affiliation", "country"]:
+ author_changes.append(
+ _change_field_and_describe(
+ matching_author,
+ field,
+ getattr(new_author, field),
+ # List titlepage_name as "name" in logs
+ "name" if field == "titlepage_name" else field,
+ )
+ )
+ # Update order
+ author_changes.append(
+ _change_field_and_describe(matching_author, "order", order + 1)
+ )
+ matching_author.save()
+ author_change_summary = ", ".join(
+ [ch for ch in author_changes if ch is not None]
+ )
+ if len(author_change_summary) > 0:
+ changes.append(
+ 'Changed author "{name}": {summary}'.format(
+ name=matching_author.titlepage_name,
+ summary=author_change_summary,
+ )
+ )
+ else:
+ # No author matched, so update the new_author and use that
+ new_author.document = rfc
+ new_author.order = order + 1
+ new_author.save()
+ if new_author.person_id is not None:
+ person_desc = f"Person {new_author.person_id}"
+ else:
+ person_desc = "no Person linked"
+ changes.append(
+ f'Added "{new_author.titlepage_name}" ({person_desc}) as author'
+ )
+ # Any authors left in original_authors are no longer in the list, so remove them
+ for removed_author in original_authors:
+ # Skip actual removal of old authors if we are converting from the
+ # DocumentAuthor models - the original_authors were just stand-ins anyway.
+ if not converting_from_docauthors:
+ removed_author.delete()
+ if removed_author.person_id is not None:
+ person_desc = f"Person {removed_author.person_id}"
+ else:
+ person_desc = "no Person linked"
+ changes.append(
+ f'Removed "{removed_author.titlepage_name}" ({person_desc}) as author'
+ )
+ # Create DocEvents, but leave it up to caller to save
+ if by is None:
+ by = Person.objects.get(name="(System)")
+ return [
+ EditedRfcAuthorsDocEvent(
+ type="edited_authors",
+ by=by,
+ doc=rfc,
+ desc=change,
+ )
+ for change in changes
+ ]
+
+
def update_reminder(doc, reminder_type_slug, event, due_date):
reminder_type = DocReminderTypeName.objects.get(slug=reminder_type_slug)
@@ -816,50 +954,93 @@ def rebuild_reference_relations(doc, filenames):
filenames should be a dict mapping file ext (i.e., type) to the full path of each file.
"""
- if doc.type.slug != 'draft':
+ if doc.type.slug not in ["draft", "rfc"]:
+ log.log(f"rebuild_reference_relations called for non draft/rfc doc {doc.name}")
return None
- # try XML first
- if 'xml' in filenames:
- refs = XMLDraft(filenames['xml']).get_refs()
- elif 'txt' in filenames:
- filename = filenames['txt']
- try:
- refs = draft.PlaintextDraft.from_file(filename).get_refs()
- except IOError as e:
- return { 'errors': ["%s :%s" % (e.strerror, filename)] }
- else:
- return {'errors': ['No Internet-Draft text available for rebuilding reference relations. Need XML or plaintext.']}
- doc.relateddocument_set.filter(relationship__slug__in=['refnorm','refinfo','refold','refunk']).delete()
+ if "xml" not in filenames and "txt" not in filenames:
+ log.log(f"rebuild_reference_relations error: no file available for {doc.name}")
+ return {
+ "errors": [
+ "No file available for rebuilding reference relations. Need XML or plaintext."
+ ]
+ }
+ else:
+ try:
+ # try XML first
+ if "xml" in filenames:
+ refs = XMLDraft(filenames["xml"]).get_refs()
+ elif "txt" in filenames:
+ filename = filenames["txt"]
+ refs = draft.PlaintextDraft.from_file(filename).get_refs()
+ except (IOError, UnicodeDecodeError) as e:
+ log.log(f"rebuild_reference_relations error: On {doc.name}: {e}")
+ return {"errors": [f"{e}: {filename}"]}
+
+ before = set(doc.relateddocument_set.filter(
+ relationship__slug__in=["refnorm", "refinfo", "refold", "refunk"]
+ ).values_list("relationship__slug","target__name"))
warnings = []
errors = []
unfound = set()
- for ( ref, refType ) in refs.items():
- refdoc = Document.objects.filter(name=ref)
- if not refdoc and re.match(r"^draft-.*-\d{2}$", ref):
- refdoc = Document.objects.filter(name=ref[:-3])
+ intended = set()
+ names = [ref for ref in refs]
+ names.extend([ref[:-3] for ref in refs if re.match(r"^draft-.*-\d{2}$", ref)])
+ queryset = Document.objects.filter(name__in=names)
+ for ref, refType in refs.items():
+ refdoc = queryset.filter(name=ref)
+ if not refdoc.exists() and re.match(r"^draft-.*-\d{2}$", ref):
+ refdoc = queryset.filter(name=ref[:-3])
count = refdoc.count()
if count == 0:
- unfound.add( "%s" % ref )
+ unfound.add("%s" % ref)
continue
elif count > 1:
- errors.append("Too many Document objects found for %s"%ref)
+ log.unreachable("2026-3-16") # This branch is holdover from DocAlias
+ errors.append("Too many Document objects found for %s" % ref)
else:
# Don't add references to ourself
if doc != refdoc[0]:
- RelatedDocument.objects.get_or_create( source=doc, target=refdoc[ 0 ], relationship=DocRelationshipName.objects.get( slug='ref%s' % refType ) )
+ intended.add((f"ref{refType}", refdoc[0].name))
+
if unfound:
- warnings.append('There were %d references with no matching Document'%len(unfound))
+ warnings.append(
+ "There were %d references with no matching Document" % len(unfound)
+ )
+
+ if intended != before:
+ for slug, name in before-intended:
+ doc.relateddocument_set.filter(target__name=name, relationship_id=slug).delete()
+ for slug, name in intended-before:
+ doc.relateddocument_set.create(
+ target=queryset.get(name=name),
+ relationship_id=slug
+ )
+ after = set(doc.relateddocument_set.filter(
+ relationship__slug__in=["refnorm", "refinfo", "refold", "refunk"]
+ ).values_list("relationship__slug","target__name"))
+ if after != intended:
+ errors.append("Attempted changed didn't achieve intended results")
+ changed_references = True
+ else:
+ changed_references = False
ret = {}
if errors:
- ret['errors']=errors
+ ret["errors"] = errors
if warnings:
- ret['warnings']=warnings
+ ret["warnings"] = warnings
if unfound:
- ret['unfound']=list(unfound)
+ ret["unfound"] = list(unfound)
+
+ logmsg = f"rebuild_reference_relations for {doc.name}: "
+ logmsg += "changed references" if changed_references else "references unchanged"
+ if ret:
+ logmsg += f" {json.dumps(ret)}"
+
+ log.log(logmsg)
return ret
diff --git a/ietf/doc/utils_bofreq.py b/ietf/doc/utils_bofreq.py
index aec8f60ad6..d01b039b8e 100644
--- a/ietf/doc/utils_bofreq.py
+++ b/ietf/doc/utils_bofreq.py
@@ -1,12 +1,149 @@
-# Copyright The IETF Trust 2021 All Rights Reserved
+# Copyright The IETF Trust 2021-2026 All Rights Reserved
+import datetime
+from pathlib import Path
-from ietf.doc.models import BofreqEditorDocEvent, BofreqResponsibleDocEvent
+from django.conf import settings
+
+from ietf.doc.models import (
+ BofreqEditorDocEvent,
+ BofreqResponsibleDocEvent,
+ DocEvent,
+ DocHistory,
+ Document,
+)
from ietf.person.models import Person
+from ietf.utils import log
+
def bofreq_editors(bofreq):
e = bofreq.latest_event(BofreqEditorDocEvent)
return e.editors.all() if e else Person.objects.none()
+
def bofreq_responsible(bofreq):
e = bofreq.latest_event(BofreqResponsibleDocEvent)
- return e.responsible.all() if e else Person.objects.none()
\ No newline at end of file
+ return e.responsible.all() if e else Person.objects.none()
+
+
+def fixup_bofreq_timestamps(): # pragma: nocover
+ """Fixes bofreq event / document timestamps
+
+ Timestamp errors resulted from the bug fixed by
+ https://github.com/ietf-tools/datatracker/pull/10333
+
+ Does not fix up -00 revs because the timestamps on these were not affected by
+ the bug. Replacing their timestamps creates a confusing event history because the
+ filesystem timestamp is usually a fraction of a second later than other events
+ created upon the initial rev creation. This causes the "New revision available"
+ event to appear _after_ these events in the history. Better to leave them as is.
+ """
+ FIX_DEPLOYMENT_TIME = "2026-02-03T01:16:00+00:00" # 12.58.0 -> production
+
+ def _get_doc_time(doc_name: str, rev: str):
+ path = Path(settings.BOFREQ_PATH) / f"{doc_name}-{rev}.md"
+ return datetime.datetime.fromtimestamp(path.stat().st_mtime, datetime.UTC)
+
+ # Find affected DocEvents and DocHistories
+ new_bofreq_events = (
+ DocEvent.objects.filter(
+ doc__type="bofreq", type="new_revision", time__lt=FIX_DEPLOYMENT_TIME
+ )
+ .exclude(rev="00") # bug did not affect rev 00 events
+ .order_by("doc__name", "rev")
+ )
+ log.log(
+ f"fixup_bofreq_timestamps: found {new_bofreq_events.count()} "
+ f"new_revision events before {FIX_DEPLOYMENT_TIME}"
+ )
+ document_fixups = {}
+ for e in new_bofreq_events:
+ name = e.doc.name
+ rev = e.rev
+ filesystem_time = _get_doc_time(name, rev)
+ assert e.time < filesystem_time, (
+ f"Rev {rev} event timestamp for {name} unexpectedly later than the "
+ "filesystem timestamp!"
+ )
+ try:
+ dochistory = DocHistory.objects.filter(
+ name=name, time__lt=filesystem_time
+ ).get(rev=rev)
+ except DocHistory.MultipleObjectsReturned as err:
+ raise RuntimeError(
+ f"Multiple DocHistories for {name} rev {rev} exist earlier than the "
+ "filesystem timestamp!"
+ ) from err
+ except DocHistory.DoesNotExist as err:
+ if rev == "00":
+ # Unreachable because we don't adjust -00 revs, but could be needed
+ # if we did, in theory. In practice it's still not reached, but
+ # keeping the case for completeness.
+ dochistory = None
+ else:
+ raise RuntimeError(
+ f"No DocHistory for {name} rev {rev} exists earlier than the "
+ f"filesystem timestamp!"
+ ) from err
+
+ if name not in document_fixups:
+ document_fixups[name] = []
+ document_fixups[name].append(
+ {
+ "event": e,
+ "dochistory": dochistory,
+ "filesystem_time": filesystem_time,
+ }
+ )
+
+ # Now do the actual fixup
+ system_person = Person.objects.get(name="(System)")
+ for doc_name, fixups in document_fixups.items():
+ bofreq = Document.objects.get(type="bofreq", name=doc_name)
+ log_msg_parts = []
+ adjusted_revs = []
+ for fixup in fixups:
+ event_to_fix = fixup["event"]
+ dh_to_fix = fixup["dochistory"]
+ new_time = fixup["filesystem_time"]
+ adjusted_revs.append(event_to_fix.rev)
+
+ # Fix up the event
+ event_to_fix.time = new_time
+ event_to_fix.save()
+ log_msg_parts.append(f"rev {event_to_fix.rev} DocEvent")
+
+ # Fix up the DocHistory
+ if dh_to_fix is not None:
+ dh_to_fix.time = new_time
+ dh_to_fix.save()
+ log_msg_parts.append(f"rev {dh_to_fix.rev} DocHistory")
+
+ if event_to_fix.rev == bofreq.rev and bofreq.time < new_time:
+ # Update the Document without calling save(). Only update if
+ # the time has not changed so we don't inadvertently overwrite
+ # a concurrent update.
+ Document.objects.filter(pk=bofreq.pk, time=bofreq.time).update(
+ time=new_time
+ )
+ bofreq.refresh_from_db()
+ if bofreq.rev == event_to_fix.rev:
+ log_msg_parts.append(f"rev {bofreq.rev} Document")
+ else:
+ log.log(
+ "fixup_bofreq_timestamps: WARNING: bofreq Document rev "
+ f"changed for {bofreq.name}"
+ )
+ log.log(f"fixup_bofreq_timestamps: {bofreq.name}: " + ", ".join(log_msg_parts))
+
+ # Fix up the Document, if necessary, and add a record of the adjustment
+ DocEvent.objects.create(
+ type="added_comment",
+ by=system_person,
+ doc=bofreq,
+ rev=bofreq.rev,
+ desc=(
+ "Corrected inaccurate document and new revision event timestamps for "
+ + ("version " if len(adjusted_revs) == 1 else "versions ")
+ + ", ".join(adjusted_revs)
+ ),
+ )
diff --git a/ietf/doc/utils_errata.py b/ietf/doc/utils_errata.py
new file mode 100644
index 0000000000..539262151f
--- /dev/null
+++ b/ietf/doc/utils_errata.py
@@ -0,0 +1,35 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+
+import requests
+
+from django.conf import settings
+
+from ietf.utils.log import log
+
+
+def signal_update_rfc_metadata(rfc_number_list=()):
+ key = getattr(settings, "ERRATA_METADATA_NOTIFICATION_API_KEY", None)
+ if key is not None:
+ headers = {"X-Api-Key": settings.ERRATA_METADATA_NOTIFICATION_API_KEY}
+ post_dict = {
+ "rfc_number_list": list(rfc_number_list),
+ }
+ try:
+ response = requests.post(
+ settings.ERRATA_METADATA_NOTIFICATION_URL,
+ headers=headers,
+ json=post_dict,
+ timeout=settings.DEFAULT_REQUESTS_TIMEOUT,
+ )
+ except requests.Timeout as e:
+ log(
+ f"POST request timed out for {settings.ERRATA_METADATA_NOTIFICATION_URL} ]: {e}"
+ )
+ # raise RuntimeError(f'POST request timed out for {settings.ERRATA_METADATA_NOTIFICATION_URL}') from e
+ return
+ if response.status_code != 200:
+ log(
+ f"POST request failed for {settings.ERRATA_METADATA_NOTIFICATION_URL} ]: {response.status_code} {response.text}"
+ )
+ else:
+ log("No API key configured for errata metadata notification, skipping")
diff --git a/ietf/doc/utils_r2.py b/ietf/doc/utils_r2.py
new file mode 100644
index 0000000000..53fb978303
--- /dev/null
+++ b/ietf/doc/utils_r2.py
@@ -0,0 +1,17 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+
+from django.core.files.storage import storages
+
+from ietf.doc.models import StoredObject
+
+
+def rfcs_are_in_r2(rfc_number_list=()):
+ r2_rfc_bucket = storages["r2-rfc"]
+ for rfc_number in rfc_number_list:
+ stored_objects = StoredObject.objects.filter(
+ store="rfc", doc_name=f"rfc{rfc_number}"
+ )
+ for stored_object in stored_objects:
+ if not r2_rfc_bucket.exists(stored_object.name):
+ return False
+ return True
diff --git a/ietf/doc/utils_red.py b/ietf/doc/utils_red.py
new file mode 100644
index 0000000000..bcda893dca
--- /dev/null
+++ b/ietf/doc/utils_red.py
@@ -0,0 +1,31 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+
+import requests
+
+from django.conf import settings
+
+from ietf.utils.log import log
+
+
+def trigger_red_precomputer(rfc_number_list=()):
+ url = getattr(settings, "TRIGGER_RED_PRECOMPUTE_MULTIPLE_URL", None)
+ if url is not None:
+ payload = {
+ "rfcs": ",".join([str(n) for n in rfc_number_list]),
+ }
+ try:
+ log(f"Triggering red precompute multiple for RFCs {rfc_number_list}")
+ response = requests.post(
+ url=url,
+ json=payload,
+ timeout=settings.DEFAULT_REQUESTS_TIMEOUT,
+ )
+ except requests.Timeout as e:
+ log(f"POST request timed out for {url} : {e}")
+ return
+ if response.status_code != 200:
+ log(
+ f"POST request failed for {url} : status_code={response.status_code}"
+ )
+ else:
+ log("No URL configured for triggering red precompute multiple, skipping")
diff --git a/ietf/doc/views_bofreq.py b/ietf/doc/views_bofreq.py
index 71cbe30491..94e3960dfa 100644
--- a/ietf/doc/views_bofreq.py
+++ b/ietf/doc/views_bofreq.py
@@ -91,7 +91,6 @@ def submit(request, name):
by=request.user.person,
rev=bofreq.rev,
desc='New revision available',
- time=bofreq.time,
)
bofreq.save_with_history([e])
bofreq_submission = form.cleaned_data['bofreq_submission']
diff --git a/ietf/doc/views_doc.py b/ietf/doc/views_doc.py
index 5210317325..5b57a62074 100644
--- a/ietf/doc/views_doc.py
+++ b/ietf/doc/views_doc.py
@@ -1,4 +1,4 @@
-# Copyright The IETF Trust 2009-2024, All Rights Reserved
+# Copyright The IETF Trust 2009-2026, All Rights Reserved
# -*- coding: utf-8 -*-
#
# Parts Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).
@@ -43,9 +43,10 @@
from celery.result import AsyncResult
from django.core.cache import caches
+from django.core.files.base import ContentFile
from django.core.exceptions import PermissionDenied
from django.db.models import Max
-from django.http import HttpResponse, Http404, HttpResponseBadRequest, JsonResponse
+from django.http import FileResponse, HttpResponse, Http404, HttpResponseBadRequest, JsonResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.template.loader import render_to_string
from django.urls import reverse as urlreverse
@@ -57,7 +58,7 @@
import debug # pyflakes:ignore
from ietf.doc.models import ( Document, DocHistory, DocEvent, BallotDocEvent, BallotType,
- ConsensusDocEvent, NewRevisionDocEvent, TelechatDocEvent, WriteupDocEvent, IanaExpertDocEvent,
+ ConsensusDocEvent, NewRevisionDocEvent, StoredObject, TelechatDocEvent, WriteupDocEvent, IanaExpertDocEvent,
IESG_BALLOT_ACTIVE_STATES, STATUSCHANGE_RELATIONS, DocumentActionHolder, DocumentAuthor,
RelatedDocument, RelatedDocHistory)
from ietf.doc.tasks import investigate_fragment_task
@@ -86,6 +87,7 @@
from ietf.review.models import ReviewAssignment
from ietf.review.utils import can_request_review_of_doc, review_assignments_to_list_for_docs, review_requests_to_list_for_docs
from ietf.review.utils import no_review_from_teams_on_doc
+from ietf.doc.storage_utils import retrieve_bytes
from ietf.utils import markup_txt, log, markdown
from ietf.utils.draft import get_status_from_draft_text
from ietf.utils.meetecho import MeetechoAPIError, SlidesManager
@@ -515,13 +517,17 @@ def document_main(request, name, rev=None, document_html=False):
# remaining actions
actions = []
- if can_adopt_draft(request.user, doc) and not doc.get_state_slug() in ["rfc"] and not snapshot:
+ if can_adopt_draft(request.user, doc) and doc.get_state_slug() not in ["rfc"] and not snapshot:
+ target = urlreverse("ietf.doc.views_draft.adopt_draft", kwargs=dict(name=doc.name))
if doc.group and doc.group.acronym != 'none': # individual submission
# already adopted in one group
button_text = "Switch adoption"
else:
button_text = "Manage adoption"
- actions.append((button_text, urlreverse('ietf.doc.views_draft.adopt_draft', kwargs=dict(name=doc.name))))
+ # can_adopt_draft currently returns False for Area Directors
+ if has_role(request.user, ["Secretariat", "WG Chair"]):
+ target = urlreverse("ietf.doc.views_draft.ask_about_ietf_adoption_call", kwargs=dict(name=doc.name))
+ actions.append((button_text, target))
if can_unadopt_draft(request.user, doc) and not doc.get_state_slug() in ["rfc"] and not snapshot:
if doc.get_state_slug('draft-iesg') == 'idexists':
@@ -1281,9 +1287,7 @@ def document_bibtex(request, name, rev=None):
break
elif doc.type_id == "rfc":
- # This needs to be replaced with a lookup, as the mapping may change
- # over time.
- doi = f"10.17487/RFC{doc.rfc_number:04d}"
+ doi = doc.doi
if doc.is_dochistory():
latest_event = doc.latest_event(type='new_revision', rev=rev)
@@ -1649,11 +1653,18 @@ def extract_name(s):
data["state"] = extract_name(doc.get_state())
data["intended_std_level"] = extract_name(doc.intended_std_level)
data["std_level"] = extract_name(doc.std_level)
+ author_qs = (
+ doc.rfcauthor_set
+ if doc.type_id == "rfc" and doc.rfcauthor_set.exists()
+ else doc.documentauthor_set
+ ).select_related("person").prefetch_related("person__email_set").order_by("order")
data["authors"] = [
- dict(name=author.person.name,
- email=author.email.address if author.email else None,
- affiliation=author.affiliation)
- for author in doc.documentauthor_set.all().select_related("person", "email").order_by("order")
+ {
+ "name": author.titlepage_name if hasattr(author, "titlepage_name") else author.person.name,
+ "email": author.email.address if author.email else None,
+ "affiliation": author.affiliation,
+ }
+ for author in author_qs
]
data["shepherd"] = doc.shepherd.formatted_email() if doc.shepherd else None
data["ad"] = doc.ad.role_email("ad").formatted_email() if doc.ad else None
@@ -1937,9 +1948,9 @@ def edit_action_holders(request, name):
role_ids = dict() # maps role slug to list of Person IDs (assumed numeric in the JavaScript)
extra_prefetch = [] # list of Person objects to prefetch for select2 field
- if len(doc.authors()) > 0:
+ authors = doc.author_persons()
+ if len(authors) > 0:
doc_role_labels.append(dict(slug='authors', label='Authors'))
- authors = doc.authors()
role_ids['authors'] = [p.pk for p in authors]
extra_prefetch += authors
@@ -2347,3 +2358,29 @@ def investigate(request):
"results": results,
},
)
+
+def rfcxml_notprepped(request, number):
+ number = int(number)
+ if number < settings.FIRST_V3_RFC:
+ raise Http404
+ rfc = Document.objects.filter(type="rfc", rfc_number=number).first()
+ if rfc is None:
+ raise Http404
+ name = f"notprepped/rfc{number}.notprepped.xml"
+ if not StoredObject.objects.filter(name=name).exists():
+ raise Http404
+ try:
+ bytes = retrieve_bytes("rfc", name)
+ except FileNotFoundError:
+ raise Http404
+ return FileResponse(ContentFile(bytes, name=f"rfc{number}.notprepped.xml"), as_attachment=True)
+
+
+def rfcxml_notprepped_wrapper(request, number):
+ number = int(number)
+ if number < settings.FIRST_V3_RFC:
+ raise Http404
+ rfc = Document.objects.filter(type="rfc", rfc_number=number).first()
+ if rfc is None:
+ raise Http404
+ return render(request, "doc/notprepped_wrapper.html", context={"rfc": rfc})
diff --git a/ietf/doc/views_draft.py b/ietf/doc/views_draft.py
index 16d04ee66a..c5faf1140b 100644
--- a/ietf/doc/views_draft.py
+++ b/ietf/doc/views_draft.py
@@ -28,12 +28,12 @@
IanaExpertDocEvent, IESG_SUBSTATE_TAGS)
from ietf.doc.mails import ( email_pulled_from_rfc_queue, email_resurrect_requested,
email_resurrection_completed, email_state_changed, email_stream_changed,
- email_wg_call_for_adoption_issued, email_wg_last_call_issued,
email_stream_state_changed, email_stream_tags_changed, extra_automation_headers,
generate_publication_request, email_adopted, email_intended_status_changed,
email_iesg_processing_document, email_ad_approved_doc,
email_iana_expert_review_state_changed )
from ietf.doc.storage_utils import retrieve_bytes, store_bytes
+from ietf.doc.templatetags.ietf_filters import is_doc_ietf_adoptable
from ietf.doc.utils import ( add_state_change_event, can_adopt_draft, can_unadopt_draft,
get_tags_for_stream_id, nice_consensus, update_action_holders,
update_reminder, update_telechat, make_notify_changed_event, get_initial_notify,
@@ -51,12 +51,12 @@
from ietf.name.models import IntendedStdLevelName, DocTagName, StreamName
from ietf.person.fields import SearchableEmailField
from ietf.person.models import Person, Email
-from ietf.utils.mail import send_mail, send_mail_message, on_behalf_of
+from ietf.utils.mail import send_mail, send_mail_message, on_behalf_of, send_mail_text
from ietf.utils.textupload import get_cleaned_text_file_content
from ietf.utils import log
-from ietf.utils.fields import ModelMultipleChoiceField
+from ietf.utils.fields import DatepickerDateField, ModelMultipleChoiceField, MultiEmailField
from ietf.utils.response import permission_denied
-from ietf.utils.timezone import datetime_today, DEADLINE_TZINFO
+from ietf.utils.timezone import date_today, datetime_from_date, datetime_today, DEADLINE_TZINFO
class ChangeStateForm(forms.Form):
@@ -1564,7 +1564,7 @@ def adopt_draft(request, name):
events.append(e)
due_date = None
- if form.cleaned_data["weeks"] != None:
+ if form.cleaned_data["weeks"] is not None:
due_date = datetime_today(DEADLINE_TZINFO) + datetime.timedelta(weeks=form.cleaned_data["weeks"])
update_reminder(doc, "stream-s", e, due_date)
@@ -1573,11 +1573,6 @@ def adopt_draft(request, name):
# setting states that are _not_ the adopted state.
email_adopted(request, doc, prev_state, new_state, by, comment)
- # Currently only the IETF stream uses the c-adopt state - guard against other
- # streams starting to use it asthe IPR rules for those streams will be different.
- if doc.stream_id == "ietf" and new_state.slug == "c-adopt":
- email_wg_call_for_adoption_issued(request, doc, cfa_duration_weeks=form.cleaned_data["weeks"])
-
# comment
if comment:
e = DocEvent(type="added_comment", doc=doc, rev=doc.rev, by=by)
@@ -1689,11 +1684,14 @@ def __init__(self, *args, **kwargs):
f.queryset = f.queryset.exclude(pk__in=unused_states)
f.label = state_type.label
if self.stream.slug == 'ietf':
+ help_text_items = []
if self.can_set_sub_pub:
- f.help_text = "Only select 'Submitted to IESG for Publication' to correct errors. Use the document's main page to request publication."
+ help_text_items.append("Only select 'Submitted to IESG for Publication' to correct errors. This is not how to submit a document to the IESG.")
else:
f.queryset = f.queryset.exclude(slug='sub-pub')
- f.help_text = "You may not set the 'Submitted to IESG for Publication' using this form - Use the document's main page to request publication."
+ help_text_items.append("You may not set the 'Submitted to IESG for Publication' using this form - Use the button above or the document's main page to request publication.")
+ help_text_items.append("Only use this form in unusual circumstances when issuing call for adoption or working group last call.")
+ f.help_text = " ".join(help_text_items)
f = self.fields['tags']
f.queryset = f.queryset.filter(slug__in=get_tags_for_stream_id(doc.stream_id))
@@ -1704,7 +1702,7 @@ def __init__(self, *args, **kwargs):
def clean_new_state(self):
new_state = self.cleaned_data.get('new_state')
if new_state.slug=='sub-pub' and not self.can_set_sub_pub:
- raise forms.ValidationError('You may not set the %s state using this form. Use the "Submit to IESG for publication" button on the document\'s main page instead. If that button does not appear, the document may already have IESG state. Ask your Area Director or the Secretariat for help.'%new_state.name)
+ raise forms.ValidationError('You may not set the %s state using this form. Use the "Submit to IESG for Publication" button on the document\'s main page instead. If that button does not appear, the document may already have IESG state. Ask your Area Director or the Secretariat for help.'%new_state.name)
return new_state
@@ -1730,6 +1728,19 @@ def next_states_for_stream_state(doc, state_type, current_state):
return next_states
+@login_required
+def offer_wg_action_helpers(request, name):
+ doc = get_object_or_404(Document, type="draft", name=name)
+ if doc.stream is None or doc.stream_id != "ietf" or doc.became_rfc() is not None:
+ raise Http404
+
+ if not is_authorized_in_doc_stream(request.user, doc):
+ permission_denied(request, "You don't have permission to access this page.")
+
+ return render(request, "doc/draft/wg_action_helpers.html",
+ {"doc": doc,
+ })
+
@login_required
def change_stream_state(request, name, state_type):
doc = get_object_or_404(Document, type="draft", name=name)
@@ -1744,10 +1755,17 @@ def change_stream_state(request, name, state_type):
prev_state = doc.get_state(state_type.slug)
next_states = next_states_for_stream_state(doc, state_type, prev_state)
+ # These tell the form to allow directly setting the state to fix up errors.
can_set_sub_pub = has_role(request.user,('Secretariat','Area Director')) or (prev_state and prev_state.slug=='sub-pub')
if request.method == 'POST':
- form = ChangeStreamStateForm(request.POST, doc=doc, state_type=state_type,can_set_sub_pub=can_set_sub_pub,stream=doc.stream)
+ form = ChangeStreamStateForm(
+ request.POST,
+ doc=doc,
+ state_type=state_type,
+ can_set_sub_pub=can_set_sub_pub,
+ stream=doc.stream,
+ )
if form.is_valid():
by = request.user.person
events = []
@@ -1768,14 +1786,7 @@ def change_stream_state(request, name, state_type):
update_reminder(doc, "stream-s", e, due_date)
email_stream_state_changed(request, doc, prev_state, new_state, by, comment)
-
- if doc.stream_id == "ietf":
- if new_state.slug == "c-adopt":
- email_wg_call_for_adoption_issued(request, doc, cfa_duration_weeks=form.cleaned_data["weeks"])
- if new_state.slug == "wg-lc":
- email_wg_last_call_issued(request, doc, wglc_duration_weeks=form.cleaned_data["weeks"])
-
# tags
existing_tags = set(doc.tags.all())
new_tags = set(form.cleaned_data["tags"])
@@ -1811,8 +1822,15 @@ def change_stream_state(request, name, state_type):
else:
form.add_error(None, "No change in state or tags found, and no comment provided -- nothing to do.")
else:
- form = ChangeStreamStateForm(initial=dict(new_state=prev_state.pk if prev_state else None, tags= doc.tags.all()),
- doc=doc, state_type=state_type, can_set_sub_pub = can_set_sub_pub,stream = doc.stream)
+ form = ChangeStreamStateForm(
+ initial=dict(
+ new_state=prev_state.pk if prev_state else None, tags=doc.tags.all()
+ ),
+ doc=doc,
+ state_type=state_type,
+ can_set_sub_pub=can_set_sub_pub,
+ stream=doc.stream,
+ )
milestones = doc.groupmilestone_set.all()
@@ -1857,3 +1875,325 @@ def set_intended_status_level(request, doc, new_level, old_level, comment):
msg = "\n".join(e.desc for e in events)
email_intended_status_changed(request, doc, msg)
+
+class IssueWorkingGroupLastCallForm(forms.Form):
+ end_date = DatepickerDateField(
+ required=True,
+ date_format="yyyy-mm-dd",
+ picker_settings={
+ "autoclose": "1",
+ },
+ help_text="The date the Last Call closes. If you change this, review the subject and body carefully to ensure the change is captured correctly.",
+ )
+
+ to = MultiEmailField(
+ required=True,
+ help_text="Comma separated list of address to use in the To: header",
+ )
+ cc = MultiEmailField(
+ required=False, help_text="Comma separated list of addresses to copy"
+ )
+ subject = forms.CharField(
+ required=True,
+ help_text="Subject for Last Call message. If you change the date here, be sure to make a matching change in the body.",
+ )
+ body = forms.CharField(
+ widget=forms.Textarea, required=True, help_text="Body for Last Call message"
+ )
+
+ def clean_end_date(self):
+ end_date = self.cleaned_data["end_date"]
+ if end_date <= date_today(DEADLINE_TZINFO):
+ raise forms.ValidationError("End date must be later than today")
+ return end_date
+
+ def clean(self):
+ cleaned_data = super().clean()
+ end_date = cleaned_data.get("end_date")
+ if end_date is not None:
+ body = cleaned_data.get("body")
+ subject = cleaned_data.get("subject")
+ if end_date.isoformat() not in body:
+ self.add_error(
+ "body",
+ forms.ValidationError(
+ f"Last call end date ({end_date.isoformat()}) not found in body"
+ ),
+ )
+ if end_date.isoformat() not in subject:
+ self.add_error(
+ "subject",
+ forms.ValidationError(
+ f"Last call end date ({end_date.isoformat()}) not found in subject"
+ ),
+ )
+ return cleaned_data
+
+
+@login_required
+def issue_wg_lc(request, name):
+ doc = get_object_or_404(Document, name=name)
+
+ if doc.stream_id != "ietf":
+ raise Http404
+ if doc.type_id != "draft" or doc.group.type_id != "wg":
+ raise Http404
+ if doc.get_state_slug("draft-stream-ietf") == "wg-lc":
+ raise Http404
+ if doc.get_state_slug("draft") == "rfc":
+ raise Http404
+
+ if not is_authorized_in_doc_stream(request.user, doc):
+ permission_denied(request, "You don't have permission to access this page.")
+
+ if request.method == "POST":
+ form = IssueWorkingGroupLastCallForm(request.POST)
+ if form.is_valid():
+ # Intentionally not changing tags or adding a comment
+ # those things can be done with other workflows
+ by = request.user.person
+ prev_state = doc.get_state("draft-stream-ietf")
+ events = []
+ wglc_state = State.objects.get(type="draft-stream-ietf", slug="wg-lc")
+ doc.set_state(wglc_state)
+ e = add_state_change_event(doc, by, prev_state, wglc_state)
+ events.append(e)
+ end_date = form.cleaned_data["end_date"]
+ update_reminder(
+ doc, "stream-s", e, datetime_from_date(end_date, DEADLINE_TZINFO)
+ )
+ doc.save_with_history(events)
+ email_stream_state_changed(request, doc, prev_state, wglc_state, by)
+ send_mail_text(
+ request,
+ to = form.cleaned_data["to"],
+ frm = request.user.person.formatted_email(),
+ subject = form.cleaned_data["subject"],
+ txt = form.cleaned_data["body"],
+ cc = form.cleaned_data["cc"],
+ )
+ return redirect("ietf.doc.views_doc.document_main", name=doc.name)
+ else:
+ end_date = date_today(DEADLINE_TZINFO) + datetime.timedelta(days=14)
+ subject = f"WG Last Call: {doc.name}-{doc.rev} (Ends {end_date})"
+ body = render_to_string(
+ "doc/mail/wg_last_call_issued.txt",
+ dict(
+ doc=doc,
+ end_date=end_date,
+ wg_list=doc.group.list_email,
+ settings=settings,
+ ),
+ )
+ (to, cc) = gather_address_lists("doc_wg_last_call_issued", doc=doc)
+
+ form = IssueWorkingGroupLastCallForm(
+ initial=dict(
+ end_date=end_date,
+ to=", ".join(to),
+ cc=", ".join(cc),
+ subject=subject,
+ body=body,
+ )
+ )
+
+ return render(
+ request,
+ "doc/draft/issue_working_group_last_call.html",
+ dict(
+ doc=doc,
+ form=form,
+ ),
+ )
+
+class IssueCallForAdoptionForm(forms.Form):
+ end_date = DatepickerDateField(
+ required=True,
+ date_format="yyyy-mm-dd",
+ picker_settings={
+ "autoclose": "1",
+ },
+ help_text="The date the Call for Adoption closes. If you change this, review the subject and body carefully to ensure the change is captured correctly.",
+ )
+
+ to = MultiEmailField(
+ required=True,
+ help_text="Comma separated list of address to use in the To: header",
+ )
+ cc = MultiEmailField(
+ required=False, help_text="Comma separated list of addresses to copy"
+ )
+ subject = forms.CharField(
+ required=True,
+ help_text="Subject for Call for Adoption message. If you change the date here, be sure to make a matching change in the body.",
+ )
+ body = forms.CharField(
+ widget=forms.Textarea, required=True, help_text="Body for Call for Adoption message"
+ )
+
+ def clean_end_date(self):
+ end_date = self.cleaned_data["end_date"]
+ if end_date <= date_today(DEADLINE_TZINFO):
+ raise forms.ValidationError("End date must be later than today")
+ return end_date
+
+ def clean(self):
+ cleaned_data = super().clean()
+ end_date = cleaned_data.get("end_date")
+ if end_date is not None:
+ body = cleaned_data.get("body")
+ subject = cleaned_data.get("subject")
+ if end_date.isoformat() not in body:
+ self.add_error(
+ "body",
+ forms.ValidationError(
+ f"Call for adoption end date ({end_date.isoformat()}) not found in body"
+ ),
+ )
+ if end_date.isoformat() not in subject:
+ self.add_error(
+ "subject",
+ forms.ValidationError(
+ f"Call for adoption end date ({end_date.isoformat()}) not found in subject"
+ ),
+ )
+ return cleaned_data
+
+@login_required
+def issue_wg_call_for_adoption(request, name, acronym):
+ doc = get_object_or_404(Document, name=name)
+ group = Group.objects.filter(acronym=acronym, type_id="wg").first()
+ reject = False
+ if group is None or doc.type_id != "draft" or not is_doc_ietf_adoptable(doc):
+ reject = True
+ if doc.stream is None:
+ if not can_adopt_draft(request.user, doc):
+ reject = True
+ elif doc.stream_id != "ietf":
+ reject = True
+ else: # doc.stream_id == "ietf"
+ if not is_authorized_in_doc_stream(request.user, doc):
+ reject = True
+ if reject:
+ raise permission_denied(request, f"You can't issue a {acronym} wg call for adoption for this document.")
+
+ if request.method == "POST":
+ form = IssueCallForAdoptionForm(request.POST)
+ if form.is_valid():
+ # Intentionally not changing tags or adding a comment
+ # those things can be done with other workflows
+ by = request.user.person
+
+ events = []
+ if doc.stream_id != "ietf":
+ stream = StreamName.objects.get(slug="ietf")
+ doc.stream = stream
+ e = DocEvent(type="changed_stream", doc=doc, rev=doc.rev, by=by)
+ e.desc = f"Changed stream to {stream.name}" # Propogates embedding html in DocEvent.desc for consistency
+ e.save()
+ events.append(e)
+ if doc.group != group:
+ doc.group = group
+ e = DocEvent(type="changed_group", doc=doc, rev=doc.rev, by=by)
+ e.desc = f"Changed group to {group.name} ({group.acronym.upper()})" # Even if it makes the cats cry
+ e.save()
+ events.append(e)
+ prev_state = doc.get_state("draft-stream-ietf")
+ c_adopt_state = State.objects.get(type="draft-stream-ietf", slug="c-adopt")
+ doc.set_state(c_adopt_state)
+ e = add_state_change_event(doc, by, prev_state, c_adopt_state)
+ events.append(e)
+ end_date = form.cleaned_data["end_date"]
+ update_reminder(
+ doc, "stream-s", e, datetime_from_date(end_date, DEADLINE_TZINFO)
+ )
+ doc.save_with_history(events)
+ email_stream_state_changed(request, doc, prev_state, c_adopt_state, by)
+ send_mail_text(
+ request,
+ to = form.cleaned_data["to"],
+ frm = request.user.person.formatted_email(),
+ subject = form.cleaned_data["subject"],
+ txt = form.cleaned_data["body"],
+ cc = form.cleaned_data["cc"],
+ )
+ return redirect("ietf.doc.views_doc.document_main", name=doc.name)
+ else:
+ end_date = date_today(DEADLINE_TZINFO) + datetime.timedelta(days=14)
+ subject = f"Call for adoption: {doc.name}-{doc.rev} (Ends {end_date})"
+ body = render_to_string(
+ "doc/mail/wg_call_for_adoption_issued.txt",
+ dict(
+ doc=doc,
+ group=group,
+ end_date=end_date,
+ wg_list=doc.group.list_email,
+ settings=settings,
+ ),
+ )
+ (to, cc) = gather_address_lists("doc_wg_call_for_adoption_issued", doc=doc)
+ if doc.group.acronym == "none":
+ to.insert(0, f"{group.acronym}-chairs@ietf.org")
+ to.insert(0, group.list_email)
+ form = IssueCallForAdoptionForm(
+ initial=dict(
+ end_date=end_date,
+ to=", ".join(to),
+ cc=", ".join(cc),
+ subject=subject,
+ body=body,
+ )
+ )
+
+ return render(
+ request,
+ "doc/draft/issue_working_group_call_for_adoption.html",
+ dict(
+ doc=doc,
+ form=form,
+ ),
+ )
+
+class GroupModelChoiceField(forms.ModelChoiceField):
+ def label_from_instance(self, obj):
+ return f"{obj.acronym} - {obj.name}"
+
+
+class WgForm(forms.Form):
+ group = GroupModelChoiceField(
+ queryset=Group.objects.filter(type_id="wg", state="active")
+ .order_by("acronym")
+ .distinct(),
+ required=True,
+ empty_label="Select IETF Working Group",
+ )
+
+ def __init__(self, *args, **kwargs):
+ user = kwargs.pop("user")
+ super(WgForm, self).__init__(*args, **kwargs)
+ if not has_role(user, ["Secretariat", "Area Director"]):
+ self.fields["group"].queryset = self.fields["group"].queryset.filter(
+ role__name_id="chair", role__person=user.person
+ )
+
+
+@role_required("Secretariat", "WG Chair")
+def ask_about_ietf_adoption_call(request, name):
+ doc = get_object_or_404(Document, name=name)
+ if doc.stream is not None or doc.group.acronym != "none":
+ raise Http404
+ if request.method == "POST":
+ form = WgForm(request.POST, user=request.user)
+ if form.is_valid():
+ group = form.cleaned_data["group"]
+ return redirect(issue_wg_call_for_adoption, name=doc.name, acronym=group.acronym)
+ else:
+ form = WgForm(initial={"group": None}, user=request.user)
+ return render(
+ request,
+ "doc/draft/ask_about_ietf_adoption.html",
+ dict(
+ doc=doc,
+ form=form,
+ ),
+ )
diff --git a/ietf/doc/views_search.py b/ietf/doc/views_search.py
index 3b67061b05..4232d77f6c 100644
--- a/ietf/doc/views_search.py
+++ b/ietf/doc/views_search.py
@@ -219,7 +219,7 @@ def retrieve_search_results(form, all_types=False):
queries.extend([Q(targets_related__source__name__icontains=look_for, targets_related__relationship_id="became_rfc")])
combined_query = reduce(operator.or_, queries)
- docs = docs.filter(combined_query).distinct()
+ docs = docs.filter(combined_query)
# rfc/active/old check buttons
allowed_draft_states = []
@@ -229,20 +229,23 @@ def retrieve_search_results(form, all_types=False):
allowed_draft_states.extend(['repl', 'expired', 'auth-rm', 'ietf-rm'])
docs = docs.filter(Q(states__slug__in=allowed_draft_states) |
- ~Q(type__slug='draft')).distinct()
+ ~Q(type__slug='draft'))
# radio choices
by = query["by"]
if by == "author":
docs = docs.filter(
Q(documentauthor__person__alias__name__icontains=query["author"]) |
- Q(documentauthor__person__email__address__icontains=query["author"])
+ Q(documentauthor__person__email__address__icontains=query["author"]) |
+ Q(rfcauthor__person__alias__name__icontains=query["author"]) |
+ Q(rfcauthor__person__email__address__icontains=query["author"]) |
+ Q(rfcauthor__titlepage_name__icontains=query["author"])
)
elif by == "group":
docs = docs.filter(group__acronym__iexact=query["group"])
elif by == "area":
docs = docs.filter(Q(group__type="wg", group__parent=query["area"]) |
- Q(group=query["area"])).distinct()
+ Q(group=query["area"]))
elif by == "ad":
docs = docs.filter(ad=query["ad"])
elif by == "state":
@@ -255,6 +258,8 @@ def retrieve_search_results(form, all_types=False):
elif by == "stream":
docs = docs.filter(stream=query["stream"])
+ docs=docs.distinct()
+
return docs
diff --git a/ietf/group/admin.py b/ietf/group/admin.py
index fedec49d85..685c10aeea 100644
--- a/ietf/group/admin.py
+++ b/ietf/group/admin.py
@@ -26,14 +26,15 @@
MilestoneGroupEvent, GroupExtResource, Appeal, AppealArtifact )
from ietf.name.models import GroupTypeName
-from ietf.utils.validators import validate_external_resource_value
+from ietf.utils.admin import SaferTabularInline
from ietf.utils.response import permission_denied
+from ietf.utils.validators import validate_external_resource_value
-class RoleInline(admin.TabularInline):
+class RoleInline(SaferTabularInline):
model = Role
raw_id_fields = ["person", "email"]
-class GroupURLInline(admin.TabularInline):
+class GroupURLInline(SaferTabularInline):
model = GroupURL
class GroupForm(forms.ModelForm):
diff --git a/ietf/group/models.py b/ietf/group/models.py
index 2d5e7c4e6f..a7e3c6616e 100644
--- a/ietf/group/models.py
+++ b/ietf/group/models.py
@@ -111,6 +111,9 @@ def active_wgs(self):
def closed_wgs(self):
return self.wgs().exclude(state__in=Group.ACTIVE_STATE_IDS)
+ def areas(self):
+ return self.get_queryset().filter(type="area")
+
def with_meetings(self):
return self.get_queryset().filter(type__features__has_meetings=True)
diff --git a/ietf/group/serializers.py b/ietf/group/serializers.py
new file mode 100644
index 0000000000..e789ba46bf
--- /dev/null
+++ b/ietf/group/serializers.py
@@ -0,0 +1,50 @@
+# Copyright The IETF Trust 2024-2026, All Rights Reserved
+"""django-rest-framework serializers"""
+
+from drf_spectacular.utils import extend_schema_field
+from rest_framework import serializers
+
+from ietf.person.models import Email
+from .models import Group, Role
+
+
+class GroupSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = Group
+ fields = ["acronym", "name", "type", "list_email"]
+
+
+class AreaDirectorSerializer(serializers.Serializer):
+ """Serialize an area director
+
+ Works with Email or Role
+ """
+
+ name = serializers.SerializerMethodField()
+ email = serializers.SerializerMethodField()
+
+ @extend_schema_field(serializers.CharField)
+ def get_name(self, instance: Email | Role):
+ person = getattr(instance, 'person', None)
+ return person.plain_name() if person else None
+
+ @extend_schema_field(serializers.EmailField)
+ def get_email(self, instance: Email | Role):
+ if isinstance(instance, Role):
+ return instance.email.email_address()
+ return instance.email_address()
+
+
+class AreaSerializer(serializers.ModelSerializer):
+ ads = serializers.SerializerMethodField()
+
+ class Meta:
+ model = Group
+ fields = ["acronym", "name", "ads"]
+
+ @extend_schema_field(AreaDirectorSerializer(many=True))
+ def get_ads(self, area: Group):
+ return AreaDirectorSerializer(
+ area.ads if area.is_active else Role.objects.none(),
+ many=True,
+ ).data
diff --git a/ietf/group/tests_info.py b/ietf/group/tests_info.py
index 34f8500854..3f24e2e3d6 100644
--- a/ietf/group/tests_info.py
+++ b/ietf/group/tests_info.py
@@ -543,6 +543,25 @@ def verify_can_edit_group(url, group, username):
for username in list(set(interesting_users)-set(can_edit[group.type_id])):
verify_cannot_edit_group(url, group, username)
+ def test_group_about_team_parent(self):
+ """Team about page should show parent when parent is not an area"""
+ GroupFactory(type_id='team', parent=GroupFactory(type_id='area', acronym='gen'))
+ GroupFactory(type_id='team', parent=GroupFactory(type_id='ietf', acronym='iab'))
+ GroupFactory(type_id='team', parent=None)
+
+ for team in Group.objects.filter(type='team').select_related('parent'):
+ url = urlreverse('ietf.group.views.group_about', kwargs=dict(acronym=team.acronym))
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 200)
+ if team.parent and team.parent.type_id != 'area':
+ self.assertContains(r, 'Parent')
+ self.assertContains(r, team.parent.acronym)
+ elif team.parent and team.parent.type_id == 'area':
+ self.assertContains(r, team.parent.name)
+ self.assertNotContains(r, '>Parent<')
+ else:
+ self.assertNotContains(r, '>Parent<')
+
def test_group_about_personnel(self):
"""Correct personnel should appear on the group About page"""
group = GroupFactory()
diff --git a/ietf/group/tests_review.py b/ietf/group/tests_review.py
index 89c755bb26..bb9b79a416 100644
--- a/ietf/group/tests_review.py
+++ b/ietf/group/tests_review.py
@@ -888,10 +888,10 @@ def test_requests_history_filter_page(self):
self.assertEqual(r.status_code, 200)
self.assertContains(r, review_req.doc.name)
self.assertContains(r, review_req2.doc.name)
- self.assertContains(r, 'Assigned')
- self.assertContains(r, 'Accepted')
- self.assertContains(r, 'Completed')
- self.assertContains(r, 'Ready')
+ self.assertContains(r, 'data-text="Assigned"')
+ self.assertContains(r, 'data-text="Accepted"')
+ self.assertContains(r, 'data-text="Completed"')
+ self.assertContains(r, 'data-text="Ready"')
self.assertContains(r, escape(assignment.reviewer.person.name))
self.assertContains(r, escape(assignment2.reviewer.person.name))
@@ -907,10 +907,10 @@ def test_requests_history_filter_page(self):
self.assertEqual(r.status_code, 200)
self.assertContains(r, review_req.doc.name)
self.assertNotContains(r, review_req2.doc.name)
- self.assertContains(r, 'Assigned')
- self.assertNotContains(r, 'Accepted')
- self.assertNotContains(r, 'Completed')
- self.assertNotContains(r, 'Ready')
+ self.assertContains(r, 'data-text="Assigned"')
+ self.assertNotContains(r, 'data-text="Accepted"')
+ self.assertNotContains(r, 'data-text="Completed"')
+ self.assertNotContains(r, 'data-text="Ready"')
self.assertContains(r, escape(assignment.reviewer.person.name))
self.assertNotContains(r, escape(assignment2.reviewer.person.name))
@@ -926,10 +926,10 @@ def test_requests_history_filter_page(self):
self.assertEqual(r.status_code, 200)
self.assertNotContains(r, review_req.doc.name)
self.assertContains(r, review_req2.doc.name)
- self.assertNotContains(r, 'Assigned')
- self.assertContains(r, 'Accepted')
- self.assertContains(r, 'Completed')
- self.assertContains(r, 'Ready')
+ self.assertNotContains(r, 'data-text="Assigned"')
+ self.assertContains(r, 'data-text="Accepted"')
+ self.assertContains(r, 'data-text="Completed"')
+ self.assertContains(r, 'data-text="Ready"')
self.assertNotContains(r, escape(assignment.reviewer.person.name))
self.assertContains(r, escape(assignment2.reviewer.person.name))
@@ -940,9 +940,9 @@ def test_requests_history_filter_page(self):
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertNotContains(r, review_req.doc.name)
- self.assertNotContains(r, 'Assigned')
- self.assertNotContains(r, 'Accepted')
- self.assertNotContains(r, 'Completed')
+ self.assertNotContains(r, 'data-text="Assigned"')
+ self.assertNotContains(r, 'data-text="Accepted"')
+ self.assertNotContains(r, 'data-text="Completed"')
def test_requests_history_invalid_filter_parameters(self):
# First assignment as assigned
diff --git a/ietf/group/tests_serializers.py b/ietf/group/tests_serializers.py
new file mode 100644
index 0000000000..b584a17ae2
--- /dev/null
+++ b/ietf/group/tests_serializers.py
@@ -0,0 +1,96 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+from ietf.group.factories import RoleFactory, GroupFactory
+from ietf.group.serializers import (
+ AreaDirectorSerializer,
+ AreaSerializer,
+ GroupSerializer,
+)
+from ietf.person.factories import EmailFactory
+from ietf.utils.test_utils import TestCase
+
+
+class GroupSerializerTests(TestCase):
+ def test_serializes(self):
+ wg = GroupFactory()
+ serialized = GroupSerializer(wg).data
+ self.assertEqual(
+ serialized,
+ {
+ "acronym": wg.acronym,
+ "name": wg.name,
+ "type": "wg",
+ "list_email": wg.list_email,
+ },
+ )
+
+
+class AreaDirectorSerializerTests(TestCase):
+ def test_serializes_role(self):
+ """Should serialize a Role correctly"""
+ role = RoleFactory(group__type_id="area", name_id="ad")
+ serialized = AreaDirectorSerializer(role).data
+ self.assertEqual(
+ serialized,
+ {"email": role.email.email_address(), "name": role.person.plain_name()},
+ )
+
+ def test_serializes_email(self):
+ """Should serialize an Email correctly"""
+ email = EmailFactory()
+ serialized = AreaDirectorSerializer(email).data
+ self.assertEqual(
+ serialized,
+ {
+ "email": email.email_address(),
+ "name": email.person.plain_name() if email.person else None,
+ },
+ )
+
+
+class AreaSerializerTests(TestCase):
+ def test_serializes_active_area(self):
+ """Should serialize an active area correctly"""
+ area = GroupFactory(type_id="area", state_id="active")
+ serialized = AreaSerializer(area).data
+ self.assertEqual(
+ serialized,
+ {
+ "acronym": area.acronym,
+ "name": area.name,
+ "ads": [],
+ },
+ )
+ ad_roles = RoleFactory.create_batch(2, group=area, name_id="ad")
+ serialized = AreaSerializer(area).data
+ self.assertEqual(serialized["acronym"], area.acronym)
+ self.assertEqual(serialized["name"], area.name)
+ self.assertCountEqual(
+ serialized["ads"],
+ [
+ {"email": ad.email.email_address(), "name": ad.person.plain_name()}
+ for ad in ad_roles
+ ],
+ )
+
+ def test_serializes_inactive_area(self):
+ """Should serialize an inactive area correctly"""
+ area = GroupFactory(type_id="area", state_id="conclude")
+ serialized = AreaSerializer(area).data
+ self.assertEqual(
+ serialized,
+ {
+ "acronym": area.acronym,
+ "name": area.name,
+ "ads": [],
+ },
+ )
+ RoleFactory.create_batch(2, group=area, name_id="ad")
+ serialized = AreaSerializer(area).data
+ self.assertEqual(
+ serialized,
+ {
+ "acronym": area.acronym,
+ "name": area.name,
+ "ads": [],
+ },
+ )
diff --git a/ietf/group/views.py b/ietf/group/views.py
index efe3eca15d..8561a5059f 100644
--- a/ietf/group/views.py
+++ b/ietf/group/views.py
@@ -245,10 +245,19 @@ def active_review_dirs(request):
return render(request, 'group/active_review_dirs.html', {'dirs' : dirs })
def active_teams(request):
- teams = Group.objects.filter(type="team", state="active").order_by("name")
+ parent_type_order = {"area": 1, "adm": 3, None: 4}
+
+ def team_sort_key(group):
+ type_id = group.parent.type_id if group.parent else None
+ return (parent_type_order.get(type_id, 2), group.parent.name if group.parent else "", group.name)
+
+ teams = sorted(
+ Group.objects.filter(type="team", state="active").select_related("parent"),
+ key=team_sort_key,
+ )
for group in teams:
group.chairs = sorted(roles(group, "chair"), key=extract_last_name)
- return render(request, 'group/active_teams.html', {'teams' : teams })
+ return render(request, 'group/active_teams.html', {'teams': teams})
def active_iab(request):
iabgroups = Group.objects.filter(type__in=("program","iabasg","iabworkshop"), state="active").order_by("-type_id","name")
diff --git a/ietf/iesg/agenda.py b/ietf/iesg/agenda.py
index 587713089f..ace4c9ec40 100644
--- a/ietf/iesg/agenda.py
+++ b/ietf/iesg/agenda.py
@@ -133,7 +133,7 @@ def agenda_sections():
('4.2', {'title':"WG rechartering"}),
('4.2.1', {'title':"Under evaluation for IETF review", 'docs':[]}),
('4.2.2', {'title':"Proposed for approval", 'docs':[]}),
- ('5', {'title':"IAB news we can use"}),
+ ('5', {'title':"IESG Liaison News"}),
('6', {'title':"Management issues"}),
('7', {'title':"Any Other Business (WG News, New Proposals, etc.)"}),
])
diff --git a/ietf/ietfauth/utils.py b/ietf/ietfauth/utils.py
index e2893a90f7..0df667fbd2 100644
--- a/ietf/ietfauth/utils.py
+++ b/ietf/ietfauth/utils.py
@@ -211,9 +211,9 @@ def role_required(*role_names):
# specific permissions
+
def is_authorized_in_doc_stream(user, doc):
- """Return whether user is authorized to perform stream duties on
- document."""
+ """Is user authorized to perform stream duties on doc?"""
if has_role(user, ["Secretariat"]):
return True
@@ -287,7 +287,7 @@ def is_individual_draft_author(user, doc):
if not hasattr(user, 'person'):
return False
- if user.person in doc.authors():
+ if user.person in doc.author_persons():
return True
return False
diff --git a/ietf/ipr/admin.py b/ietf/ipr/admin.py
index 1a8a908dcd..d6a320203b 100644
--- a/ietf/ipr/admin.py
+++ b/ietf/ipr/admin.py
@@ -17,6 +17,7 @@
NonDocSpecificIprDisclosure,
LegacyMigrationIprEvent,
)
+from ietf.utils.admin import SaferTabularInline
# ------------------------------------------------------
# ModelAdmins
@@ -29,13 +30,13 @@ class Meta:
'sections':forms.TextInput,
}
-class IprDocRelInline(admin.TabularInline):
+class IprDocRelInline(SaferTabularInline):
model = IprDocRel
form = IprDocRelAdminForm
raw_id_fields = ['document']
extra = 1
-class RelatedIprInline(admin.TabularInline):
+class RelatedIprInline(SaferTabularInline):
model = RelatedIpr
raw_id_fields = ['target']
fk_name = 'source'
diff --git a/ietf/ipr/views.py b/ietf/ipr/views.py
index 665c99dc43..0a43ff2c27 100644
--- a/ietf/ipr/views.py
+++ b/ietf/ipr/views.py
@@ -81,7 +81,8 @@ def get_document_emails(ipr):
addrs = gather_address_lists('ipr_posted_on_doc',doc=doc).as_strings(compact=False)
- author_names = ', '.join(a.person.name for a in doc.documentauthor_set.select_related("person"))
+ # Get a list of author names for the salutation in the body of the email
+ author_names = ', '.join(doc.author_names())
context = dict(
settings=settings,
diff --git a/ietf/liaisons/admin.py b/ietf/liaisons/admin.py
index 21515ed1a3..d873cce536 100644
--- a/ietf/liaisons/admin.py
+++ b/ietf/liaisons/admin.py
@@ -7,15 +7,16 @@
from ietf.liaisons.models import ( LiaisonStatement, LiaisonStatementEvent,
RelatedLiaisonStatement, LiaisonStatementAttachment )
+from ietf.utils.admin import SaferTabularInline
-class RelatedLiaisonStatementInline(admin.TabularInline):
+class RelatedLiaisonStatementInline(SaferTabularInline):
model = RelatedLiaisonStatement
fk_name = 'source'
raw_id_fields = ['target']
extra = 1
-class LiaisonStatementAttachmentInline(admin.TabularInline):
+class LiaisonStatementAttachmentInline(SaferTabularInline):
model = LiaisonStatementAttachment
raw_id_fields = ['document']
extra = 1
diff --git a/ietf/liaisons/widgets.py b/ietf/liaisons/widgets.py
index 74368e83f2..48db8af0a3 100644
--- a/ietf/liaisons/widgets.py
+++ b/ietf/liaisons/widgets.py
@@ -26,7 +26,9 @@ def render(self, name, value, **kwargs):
html += '%s' % conditional_escape(i)
required_str = 'Please fill in %s to attach a new file' % conditional_escape(self.required_label)
html += '%s' % conditional_escape(required_str)
- html += '' % conditional_escape(self.label)
+ html += ''.format(
+ f"id_{name}", conditional_escape(self.label)
+ )
return mark_safe(html)
diff --git a/ietf/meeting/admin.py b/ietf/meeting/admin.py
index d886a9a4b6..03abf5c029 100644
--- a/ietf/meeting/admin.py
+++ b/ietf/meeting/admin.py
@@ -10,6 +10,7 @@
SessionPresentation, ImportantDate, SlideSubmission, SchedulingEvent, BusinessConstraint,
ProceedingsMaterial, MeetingHost, Registration, RegistrationTicket,
AttendanceTypeName)
+from ietf.utils.admin import SaferTabularInline
class UrlResourceAdmin(admin.ModelAdmin):
@@ -18,7 +19,7 @@ class UrlResourceAdmin(admin.ModelAdmin):
raw_id_fields = ['room', ]
admin.site.register(UrlResource, UrlResourceAdmin)
-class UrlResourceInline(admin.TabularInline):
+class UrlResourceInline(SaferTabularInline):
model = UrlResource
class RoomAdmin(admin.ModelAdmin):
@@ -28,7 +29,7 @@ class RoomAdmin(admin.ModelAdmin):
admin.site.register(Room, RoomAdmin)
-class RoomInline(admin.TabularInline):
+class RoomInline(SaferTabularInline):
model = Room
class MeetingAdmin(admin.ModelAdmin):
@@ -93,7 +94,7 @@ def name_lower(self, instance):
admin.site.register(Constraint, ConstraintAdmin)
-class SchedulingEventInline(admin.TabularInline):
+class SchedulingEventInline(SaferTabularInline):
model = SchedulingEvent
raw_id_fields = ["by"]
@@ -244,7 +245,7 @@ def queryset(self, request, queryset):
return queryset.filter(tickets__attendance_type__slug=self.value()).distinct()
return queryset
-class RegistrationTicketInline(admin.TabularInline):
+class RegistrationTicketInline(SaferTabularInline):
model = RegistrationTicket
class RegistrationAdmin(admin.ModelAdmin):
diff --git a/ietf/meeting/resources.py b/ietf/meeting/resources.py
index 88562a88fe..490b75f925 100644
--- a/ietf/meeting/resources.py
+++ b/ietf/meeting/resources.py
@@ -21,7 +21,13 @@
Attended,
Registration, RegistrationTicket)
-from ietf.name.resources import MeetingTypeNameResource
+from ietf.name.resources import (
+ AttendanceTypeNameResource,
+ MeetingTypeNameResource,
+ RegistrationTicketTypeNameResource,
+)
+
+
class MeetingResource(ModelResource):
type = ToOneField(MeetingTypeNameResource, 'type')
schedule = ToOneField('ietf.meeting.resources.ScheduleResource', 'schedule', null=True)
@@ -437,11 +443,16 @@ class Meta:
}
api.meeting.register(AttendedResource())
-from ietf.meeting.resources import MeetingResource
from ietf.person.resources import PersonResource
class RegistrationResource(ModelResource):
meeting = ToOneField(MeetingResource, 'meeting')
person = ToOneField(PersonResource, 'person', null=True)
+ tickets = ToManyField(
+ 'ietf.meeting.resources.RegistrationTicketResource',
+ 'tickets',
+ full=True,
+ )
+
class Meta:
queryset = Registration.objects.all()
serializer = api.Serializer()
@@ -456,13 +467,17 @@ class Meta:
"country_code": ALL,
"email": ALL,
"attended": ALL,
+ "checkedin": ALL,
"meeting": ALL_WITH_RELATIONS,
"person": ALL_WITH_RELATIONS,
+ "tickets": ALL_WITH_RELATIONS,
}
api.meeting.register(RegistrationResource())
class RegistrationTicketResource(ModelResource):
registration = ToOneField(RegistrationResource, 'registration')
+ attendance_type = ToOneField(AttendanceTypeNameResource, 'attendance_type')
+ ticket_type = ToOneField(RegistrationTicketTypeNameResource, 'ticket_type')
class Meta:
queryset = RegistrationTicket.objects.all()
serializer = api.Serializer()
@@ -471,8 +486,8 @@ class Meta:
ordering = ['id', ]
filtering = {
"id": ALL,
- "ticket_type": ALL,
- "attendance_type": ALL,
+ "ticket_type": ALL_WITH_RELATIONS,
+ "attendance_type": ALL_WITH_RELATIONS,
"registration": ALL_WITH_RELATIONS,
}
api.meeting.register(RegistrationTicketResource())
diff --git a/ietf/meeting/tasks.py b/ietf/meeting/tasks.py
index c361325f9a..a73763560b 100644
--- a/ietf/meeting/tasks.py
+++ b/ietf/meeting/tasks.py
@@ -1,11 +1,14 @@
-# Copyright The IETF Trust 2024-2025, All Rights Reserved
+# Copyright The IETF Trust 2024-2026, All Rights Reserved
#
# Celery task definitions
#
import datetime
-from celery import shared_task
-# from django.db.models import QuerySet
+from itertools import batched
+
+from celery import shared_task, chain
+from django.db.models import IntegerField
+from django.db.models.functions import Cast
from django.utils import timezone
from ietf.utils import log
@@ -19,9 +22,56 @@
from .utils import fetch_attendance_from_meetings
+@shared_task
+def agenda_data_refresh_task(num=None):
+ """Refresh agenda data for one plenary meeting
+
+ If `num` is `None`, refreshes data for the current meeting.
+ """
+ log.log(
+ f"Refreshing agenda data for {f"IETF-{num}" if num else "current IETF meeting"}"
+ )
+ try:
+ generate_agenda_data(num, force_refresh=True)
+ except Exception as err:
+ # Log and swallow exceptions so failure on one meeting won't break a chain of
+ # tasks. This is used by agenda_data_refresh_all_task().
+ log.log(f"ERROR: Refreshing agenda data failed for num={num}: {err}")
+
+
@shared_task
def agenda_data_refresh():
- generate_agenda_data(force_refresh=True)
+ """Deprecated. Use agenda_data_refresh_task() instead.
+
+ TODO remove this after switching the periodic task to the new name
+ """
+ log.log("Deprecated agenda_data_refresh task called!")
+ agenda_data_refresh_task()
+
+
+@shared_task
+def agenda_data_refresh_all_task(*, batch_size=10):
+ """Refresh agenda data for all plenary meetings
+
+ Executes as a chain of tasks, each computing up to `batch_size` meetings
+ in a single task.
+ """
+ meeting_numbers = sorted(
+ Meeting.objects.annotate(
+ number_as_int=Cast("number", output_field=IntegerField())
+ )
+ .filter(type_id="ietf", number_as_int__gt=64)
+ .values_list("number_as_int", flat=True)
+ )
+ # Batch using chained maps rather than celery.chunk so we only use one worker
+ # at a time.
+ batched_task_chain = chain(
+ *(
+ agenda_data_refresh_task.map(nums)
+ for nums in batched(meeting_numbers, batch_size)
+ )
+ )
+ batched_task_chain.delay()
@shared_task
@@ -55,7 +105,9 @@ def proceedings_content_refresh_task(*, all=False):
@shared_task
def fetch_meeting_attendance_task():
# fetch most recent two meetings
- meetings = Meeting.objects.filter(type="ietf", date__lte=timezone.now()).order_by("-date")[:2]
+ meetings = Meeting.objects.filter(type="ietf", date__lte=timezone.now()).order_by(
+ "-date"
+ )[:2]
try:
stats = fetch_attendance_from_meetings(meetings)
except RuntimeError as err:
@@ -64,8 +116,11 @@ def fetch_meeting_attendance_task():
for meeting, meeting_stats in zip(meetings, stats):
log.log(
"Fetched data for meeting {:>3}: {:4d} created, {:4d} updated, {:4d} deleted, {:4d} processed".format(
- meeting.number, meeting_stats['created'], meeting_stats['updated'], meeting_stats['deleted'],
- meeting_stats['processed']
+ meeting.number,
+ meeting_stats["created"],
+ meeting_stats["updated"],
+ meeting_stats["deleted"],
+ meeting_stats["processed"],
)
)
@@ -73,7 +128,7 @@ def fetch_meeting_attendance_task():
def _select_meetings(
meetings: list[str] | None = None,
meetings_since: str | None = None,
- meetings_until: str | None = None
+ meetings_until: str | None = None,
): # nyah
"""Select meetings by number or date range"""
# IETF-1 = 1986-01-16
@@ -130,15 +185,15 @@ def _select_meetings(
@shared_task
def resolve_meeting_materials_task(
*, # only allow kw arguments
- meetings: list[str] | None=None,
- meetings_since: str | None=None,
- meetings_until: str | None=None
+ meetings: list[str] | None = None,
+ meetings_since: str | None = None,
+ meetings_until: str | None = None,
):
"""Run materials resolver on meetings
-
+
Can request a set of meetings by number by passing a list in the meetings arg, or
by range by passing an iso-format timestamps in meetings_since / meetings_until.
- To select all meetings, set meetings_since="zero" and omit other parameters.
+ To select all meetings, set meetings_since="zero" and omit other parameters.
"""
meetings_qs = _select_meetings(meetings, meetings_since, meetings_until)
for meeting in meetings_qs.order_by("date"):
@@ -155,7 +210,9 @@ def resolve_meeting_materials_task(
f"meeting {meeting.number}: {err}"
)
else:
- log.log(f"Resolved in {(timezone.now() - mark).total_seconds():0.3f} seconds.")
+ log.log(
+ f"Resolved in {(timezone.now() - mark).total_seconds():0.3f} seconds."
+ )
@shared_task
@@ -163,13 +220,13 @@ def store_meeting_materials_as_blobs_task(
*, # only allow kw arguments
meetings: list[str] | None = None,
meetings_since: str | None = None,
- meetings_until: str | None = None
+ meetings_until: str | None = None,
):
"""Push meeting materials into the blob store
Can request a set of meetings by number by passing a list in the meetings arg, or
by range by passing an iso-format timestamps in meetings_since / meetings_until.
- To select all meetings, set meetings_since="zero" and omit other parameters.
+ To select all meetings, set meetings_since="zero" and omit other parameters.
"""
meetings_qs = _select_meetings(meetings, meetings_since, meetings_until)
for meeting in meetings_qs.order_by("date"):
@@ -187,4 +244,5 @@ def store_meeting_materials_as_blobs_task(
)
else:
log.log(
- f"Blobs created in {(timezone.now() - mark).total_seconds():0.3f} seconds.")
+ f"Blobs created in {(timezone.now() - mark).total_seconds():0.3f} seconds."
+ )
diff --git a/ietf/meeting/tests_session_requests.py b/ietf/meeting/tests_session_requests.py
index 0cb092d2f8..42dbee5f23 100644
--- a/ietf/meeting/tests_session_requests.py
+++ b/ietf/meeting/tests_session_requests.py
@@ -236,7 +236,7 @@ def test_edit(self):
self.assertRedirects(r, redirect_url)
# Check whether updates were stored in the database
- sessions = Session.objects.filter(meeting=meeting, group=mars)
+ sessions = Session.objects.filter(meeting=meeting, group=mars).order_by("id")
self.assertEqual(len(sessions), 2)
session = sessions[0]
self.assertFalse(session.constraints().filter(name='time_relation'))
diff --git a/ietf/meeting/tests_tasks.py b/ietf/meeting/tests_tasks.py
index a5da00ecbf..2c5120a39d 100644
--- a/ietf/meeting/tests_tasks.py
+++ b/ietf/meeting/tests_tasks.py
@@ -5,23 +5,63 @@
from ietf.utils.test_utils import TestCase
from ietf.utils.timezone import date_today
from .factories import MeetingFactory
-from .tasks import proceedings_content_refresh_task, agenda_data_refresh
+from .tasks import (
+ proceedings_content_refresh_task,
+ agenda_data_refresh_task,
+ agenda_data_refresh_all_task,
+)
from .tasks import fetch_meeting_attendance_task
class TaskTests(TestCase):
@patch("ietf.meeting.tasks.generate_agenda_data")
- def test_agenda_data_refresh(self, mock_generate):
- agenda_data_refresh()
+ def test_agenda_data_refresh_task(self, mock_generate):
+ agenda_data_refresh_task()
self.assertTrue(mock_generate.called)
- self.assertEqual(mock_generate.call_args, call(force_refresh=True))
+ self.assertEqual(mock_generate.call_args, call(None, force_refresh=True))
+
+ mock_generate.reset_mock()
+ mock_generate.side_effect = RuntimeError
+ try:
+ agenda_data_refresh_task()
+ except Exception as err:
+ self.fail(
+ f"agenda_data_refresh_task should not raise exceptions (got {repr(err)})"
+ )
+
+ @patch("ietf.meeting.tasks.agenda_data_refresh_task")
+ @patch("ietf.meeting.tasks.chain")
+ def test_agenda_data_refresh_all_task(self, mock_chain, mock_agenda_data_refresh):
+ # Patch the agenda_data_refresh_task task with a mock whose `.map` attribute
+ # converts its argument, which is expected to be an iterator, to a list
+ # and returns it. We'll use this to check that the expected task chain
+ # was set up, but we don't actually run any celery tasks.
+ mock_agenda_data_refresh.map.side_effect = lambda x: list(x)
+
+ meetings = MeetingFactory.create_batch(5, type_id="ietf")
+ numbers = sorted(int(m.number) for m in meetings)
+ agenda_data_refresh_all_task(batch_size=2)
+ self.assertTrue(mock_chain.called)
+ # The lists in the call() below are the output of the lambda we patched in
+ # via mock_agenda_data_refresh.map.side_effect above. I.e., this tests that
+ # map() was called with the correct batched data.
+ self.assertEqual(
+ mock_chain.call_args,
+ call(
+ [numbers[0], numbers[1]],
+ [numbers[2], numbers[3]],
+ [numbers[4]],
+ ),
+ )
+ self.assertEqual(mock_agenda_data_refresh.call_count, 0)
+ self.assertEqual(mock_agenda_data_refresh.map.call_count, 3)
@patch("ietf.meeting.tasks.generate_proceedings_content")
def test_proceedings_content_refresh_task(self, mock_generate):
# Generate a couple of meetings
meeting120 = MeetingFactory(type_id="ietf", number="120") # 24 * 5
meeting127 = MeetingFactory(type_id="ietf", number="127") # 24 * 5 + 7
-
+
# Times to be returned
now_utc = datetime.datetime.now(tz=datetime.UTC)
hour_00_utc = now_utc.replace(hour=0)
@@ -34,19 +74,19 @@ def test_proceedings_content_refresh_task(self, mock_generate):
self.assertEqual(mock_generate.call_count, 1)
self.assertEqual(mock_generate.call_args, call(meeting120, force_refresh=True))
mock_generate.reset_mock()
-
+
# hour 01 - should call no meetings
with patch("ietf.meeting.tasks.timezone.now", return_value=hour_01_utc):
proceedings_content_refresh_task()
self.assertEqual(mock_generate.call_count, 0)
-
+
# hour 07 - should call meeting with number % 24 == 0
with patch("ietf.meeting.tasks.timezone.now", return_value=hour_07_utc):
proceedings_content_refresh_task()
self.assertEqual(mock_generate.call_count, 1)
self.assertEqual(mock_generate.call_args, call(meeting127, force_refresh=True))
mock_generate.reset_mock()
-
+
# With all=True, all should be called regardless of time. Reuse hour_01_utc which called none before
with patch("ietf.meeting.tasks.timezone.now", return_value=hour_01_utc):
proceedings_content_refresh_task(all=True)
@@ -61,10 +101,10 @@ def test_fetch_meeting_attendance_task(self, mock_fetch_attendance):
MeetingFactory(type_id="ietf", date=today - datetime.timedelta(days=3)),
]
data = {
- 'created': 1,
- 'updated': 2,
- 'deleted': 0,
- 'processed': 3,
+ "created": 1,
+ "updated": 2,
+ "deleted": 0,
+ "processed": 3,
}
mock_fetch_attendance.return_value = [data, data]
diff --git a/ietf/meeting/tests_views.py b/ietf/meeting/tests_views.py
index b94229d969..17988e50be 100644
--- a/ietf/meeting/tests_views.py
+++ b/ietf/meeting/tests_views.py
@@ -33,6 +33,7 @@
from django.http import QueryDict, FileResponse
from django.template import Context, Template
from django.utils import timezone
+from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.text import slugify
@@ -4754,7 +4755,7 @@ def _approval_url(slidesub):
0,
"second session proposed slides should be linked for approval",
)
-
+
class EditScheduleListTests(TestCase):
def setUp(self):
@@ -7345,6 +7346,67 @@ def test_submit_and_approve_multiple_versions(self, mock_slides_manager_cls):
fd.close()
self.assertIn('third version', contents)
+ @override_settings(
+ MEETECHO_API_CONFIG="fake settings"
+ ) # enough to trigger API calls
+ @patch("ietf.meeting.views.SlidesManager")
+ def test_notify_meetecho_of_all_slides(self, mock_slides_manager_cls):
+ for meeting_type in ["ietf", "interim"]:
+ # Reset for the sake of the second iteration
+ self.client.logout()
+ mock_slides_manager_cls.reset_mock()
+
+ session = SessionFactory(meeting__type_id=meeting_type)
+ meeting = session.meeting
+
+ # bad meeting
+ url = urlreverse(
+ "ietf.meeting.views.notify_meetecho_of_all_slides",
+ kwargs={"num": 9999, "acronym": session.group.acronym},
+ )
+ login_testing_unauthorized(self, "secretary", url)
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 404)
+ r = self.client.post(url)
+ self.assertEqual(r.status_code, 404)
+ self.assertFalse(mock_slides_manager_cls.called)
+ self.client.logout()
+
+ # good meeting
+ url = urlreverse(
+ "ietf.meeting.views.notify_meetecho_of_all_slides",
+ kwargs={"num": meeting.number, "acronym": session.group.acronym},
+ )
+ login_testing_unauthorized(self, "secretary", url)
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 405)
+ self.assertFalse(mock_slides_manager_cls.called)
+ mock_slides_manager = mock_slides_manager_cls.return_value
+ mock_slides_manager.send_update.return_value = True
+ r = self.client.post(url)
+ self.assertEqual(r.status_code, 302)
+ self.assertEqual(mock_slides_manager.send_update.call_count, 1)
+ self.assertEqual(mock_slides_manager.send_update.call_args, call(session))
+ r = self.client.get(r["Location"])
+ messages = list(r.context["messages"])
+ self.assertEqual(len(messages), 1)
+ self.assertEqual(
+ str(messages[0]), f"Notified Meetecho about slides for {session}"
+ )
+
+ mock_slides_manager.send_update.reset_mock()
+ mock_slides_manager.send_update.return_value = False
+ r = self.client.post(url)
+ self.assertEqual(r.status_code, 302)
+ self.assertEqual(mock_slides_manager.send_update.call_count, 1)
+ self.assertEqual(mock_slides_manager.send_update.call_args, call(session))
+ r = self.client.get(r["Location"])
+ messages = list(r.context["messages"])
+ self.assertEqual(len(messages), 1)
+ self.assertIn(
+ "No sessions were eligible for Meetecho slides update.", str(messages[0])
+ )
+
@override_settings(IETF_NOTES_URL='https://notes.ietf.org/')
class ImportNotesTests(TestCase):
@@ -8946,6 +9008,8 @@ def test_proceedings_attendees(self):
- assert onsite checkedin=True appears, not onsite checkedin=False
- assert remote attended appears, not remote not attended
- prefer onsite checkedin=True to remote attended when same person has both
+ - summary stats row shows correct counts
+ - chart data JSON is embedded with correct values
"""
m = MeetingFactory(type_id='ietf', date=datetime.date(2023, 11, 4), number="118")
@@ -8967,6 +9031,17 @@ def test_proceedings_attendees(self):
text = q('#id_attendees tbody tr').text().replace('\n', ' ')
self.assertEqual(text, f"A Person {areg.affiliation} {areg.country_code} onsite C Person {creg.affiliation} {creg.country_code} remote")
+ # Summary stats row: Onsite / Remote / Total (matches registration.ietf.org)
+ self.assertContains(response, 'Onsite:')
+ self.assertContains(response, 'Remote:')
+ self.assertContains(response, 'Total:')
+ self.assertContains(response, '1') # onsite and remote
+ self.assertContains(response, '2') # total
+
+ # Chart data embedded in page
+ chart_json = json.loads(q('#attendees-chart-data').text())
+ self.assertEqual(chart_json['type'], [['Onsite', 1], ['Remote', 1]])
+
def test_proceedings_overview(self):
'''Test proceedings IETF Overview page.
Note: old meetings aren't supported so need to add a new meeting then test.
@@ -9417,7 +9492,7 @@ def test_session_attendance(self):
self.assertEqual(r.status_code, 200)
self.assertContains(r, '3 attendees')
for person in persons:
- self.assertContains(r, person.plain_name())
+ self.assertContains(r, escape(person.plain_name()))
# Test for the "I was there" button.
def _test_button(person, expected):
@@ -9437,14 +9512,14 @@ def _test_button(person, expected):
# attempt to POST anyway is ignored
r = self.client.post(attendance_url)
self.assertEqual(r.status_code, 200)
- self.assertNotContains(r, persons[3].plain_name())
+ self.assertNotContains(r, escape(persons[3].plain_name()))
self.assertEqual(session.attended_set.count(), 3)
# button is shown, and POST is accepted
meeting.importantdate_set.update(name_id='revsub',date=date_today() + datetime.timedelta(days=20))
_test_button(persons[3], True)
r = self.client.post(attendance_url)
self.assertEqual(r.status_code, 200)
- self.assertContains(r, persons[3].plain_name())
+ self.assertContains(r, escape(persons[3].plain_name()))
self.assertEqual(session.attended_set.count(), 4)
# When the meeting is finalized, a bluesheet file is generated,
diff --git a/ietf/meeting/urls.py b/ietf/meeting/urls.py
index af36a6656c..a038e1cfe6 100644
--- a/ietf/meeting/urls.py
+++ b/ietf/meeting/urls.py
@@ -15,6 +15,7 @@ def get_redirect_url(self, *args, **kwargs):
safe_for_all_meeting_types = [
url(r'^session/(?P[-a-z0-9]+)/?$', views.session_details),
+ url(r'^session/(?P[-a-z0-9]+)/send_slide_notifications$', views.notify_meetecho_of_all_slides),
url(r'^session/(?P\d+)/drafts$', views.add_session_drafts),
url(r'^session/(?P\d+)/recordings$', views.add_session_recordings),
url(r'^session/(?P\d+)/attendance$', views.session_attendance),
@@ -30,7 +31,7 @@ def get_redirect_url(self, *args, **kwargs):
url(r'^session/(?P\d+)/doc/%(name)s/remove$' % settings.URL_REGEXPS, views.remove_sessionpresentation),
url(r'^session/(?P\d+)\.ics$', views.agenda_ical),
url(r'^sessions/(?P[-a-z0-9]+)\.ics$', views.agenda_ical),
- url(r'^slidesubmission/(?P\d+)$', views.approve_proposed_slides)
+ url(r'^slidesubmission/(?P\d+)$', views.approve_proposed_slides),
]
diff --git a/ietf/meeting/utils.py b/ietf/meeting/utils.py
index bdf3d3d3d3..10ae0d3667 100644
--- a/ietf/meeting/utils.py
+++ b/ietf/meeting/utils.py
@@ -1025,9 +1025,18 @@ def resolve_materials_for_one_meeting(meeting: Meeting):
)
def resolve_uploaded_material(meeting: Meeting, doc: Document):
- resolved = []
+ resolved: list[ResolvedMaterial] = []
+ remove = ResolvedMaterial.objects.none()
blob = resolve_one_material(doc, rev=None, ext=None)
- if blob is not None:
+ if blob is None:
+ # Versionless file does not exist. Remove the versionless ResolvedMaterial
+ # if it existed. This is to avoid leaving behind a stale link to a replaced
+ # version. This comes up e.g. if a ProceedingsMaterial is changed from having
+ # an uploaded file to being an external URL.
+ remove = ResolvedMaterial.objects.filter(
+ name=doc.name, meeting_number=meeting.number
+ )
+ else:
resolved.append(
ResolvedMaterial(
name=doc.name,
@@ -1047,12 +1056,15 @@ def resolve_uploaded_material(meeting: Meeting, doc: Document):
blob=blob.name,
)
)
+ # Create the new record(s)
ResolvedMaterial.objects.bulk_create(
resolved,
update_conflicts=True,
unique_fields=["name", "meeting_number"],
update_fields=["bucket", "blob"],
)
+ # and remove one if necessary (will be a none() queryset if not)
+ remove.delete()
def store_blob_for_one_material_file(doc: Document, rev: str, filepath: Path):
diff --git a/ietf/meeting/views.py b/ietf/meeting/views.py
index 903e3c7e79..67a81305b4 100644
--- a/ietf/meeting/views.py
+++ b/ietf/meeting/views.py
@@ -40,7 +40,7 @@
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import URLValidator
-from django.urls import reverse,reverse_lazy
+from django.urls import reverse, reverse_lazy, NoReverseMatch
from django.db.models import F, Max, Q
from django.forms.models import modelform_factory, inlineformset_factory
from django.template import TemplateDoesNotExist
@@ -109,7 +109,7 @@
from ietf.meeting.utils import get_activity_stats, post_process, create_recording, delete_recording
from ietf.meeting.utils import participants_for_meeting, generate_bluesheet, bluesheet_data, save_bluesheet
from ietf.message.utils import infer_message
-from ietf.name.models import SlideSubmissionStatusName, ProceedingsMaterialTypeName, SessionPurposeName
+from ietf.name.models import SlideSubmissionStatusName, ProceedingsMaterialTypeName, SessionPurposeName, CountryName
from ietf.utils import markdown
from ietf.utils.decorators import require_api_key
from ietf.utils.hedgedoc import Note, NoteError
@@ -1859,18 +1859,22 @@ def generate_agenda_data(num=None, force_refresh=False):
:num: meeting number
:force_refresh: True to force a refresh of the cache
"""
- cache = caches["default"]
- cache_timeout = 6 * 60
-
meeting = get_ietf_meeting(num)
if meeting is None:
raise Http404("No such full IETF meeting")
elif int(meeting.number) <= 64:
- return Http404("Pre-IETF 64 meetings are not available through this API")
- else:
- pass
+ raise Http404("Pre-IETF 64 meetings are not available through this API")
+ is_current_meeting = meeting.number == get_current_ietf_meeting_num()
+
+ cache = caches["agenda"]
+ cache_timeout = (
+ settings.AGENDA_CACHE_TIMEOUT_CURRENT_MEETING
+ if is_current_meeting
+ else settings.AGENDA_CACHE_TIMEOUT_DEFAULT
+ )
+ cache_format = "1" # bump this on backward-incompatible data format changes
- cache_key = f"generate_agenda_data_{meeting.number}"
+ cache_key = f"generate_agenda_data:{meeting.number}:v{cache_format}"
if not force_refresh:
cached_value = cache.get(cache_key)
if cached_value is not None:
@@ -1890,8 +1894,6 @@ def generate_agenda_data(num=None, force_refresh=False):
filter_organizer = AgendaFilterOrganizer(assignments=filtered_assignments)
- is_current_meeting = (num is None) or (num == get_current_ietf_meeting_num())
-
# Get Floor Plans
floors = FloorPlan.objects.filter(meeting=meeting).order_by('order')
@@ -1966,21 +1968,32 @@ def api_get_session_materials(request, session_id=None):
)
-def agenda_extract_schedule (item):
+def agenda_extract_schedule(item):
+ if item.session.current_status == "resched":
+ resched_to = item.session.tombstone_for.official_timeslotassignment()
+ else:
+ resched_to = None
return {
"id": item.id,
+ "slug": item.slug(),
"sessionId": item.session.id,
- "room": item.room_name if item.timeslot.show_location else None,
+ "room": (item.timeslot.get_location() or None) if item.timeslot else None,
"location": {
"short": item.timeslot.location.floorplan.short,
"name": item.timeslot.location.floorplan.name,
} if (item.timeslot.show_location and item.timeslot.location and item.timeslot.location.floorplan) else {},
"acronym": item.acronym,
- "duration": item.timeslot.duration.seconds,
+ "duration": item.timeslot.duration.total_seconds(),
"name": item.session.name,
+ "slotId": item.timeslot.id,
"slotName": item.timeslot.name,
+ "slotModified": item.timeslot.modified.isoformat(),
"startDateTime": item.timeslot.time.isoformat(),
"status": item.session.current_status,
+ "rescheduledTo": {
+ "startDateTime": resched_to.timeslot.time.isoformat(),
+ "duration": resched_to.timeslot.duration.total_seconds(),
+ } if resched_to is not None else {},
"type": item.session.type.slug,
"purpose": item.session.purpose.slug,
"isBoF": item.session.group_at_the_time().state_id == "bof",
@@ -1998,7 +2011,7 @@ def agenda_extract_schedule (item):
"showAgenda": True if (item.session.agenda() is not None or item.session.remote_instructions) else False
},
"agenda": {
- "url": item.session.agenda().get_href()
+ "url": item.session.agenda().get_versionless_href()
} if item.session.agenda() is not None else {
"url": None
},
@@ -2290,10 +2303,131 @@ def ical_session_status(assignment):
else:
return "CONFIRMED"
+
+def render_icalendar_precomp(agenda_data):
+ ical_content = generate_agenda_ical_precomp(agenda_data)
+ return HttpResponse(ical_content, content_type="text/calendar")
+
+
def render_icalendar(schedule, assignments):
ical_content = generate_agenda_ical(schedule, assignments)
return HttpResponse(ical_content, content_type="text/calendar")
+
+def generate_agenda_ical_precomp(agenda_data):
+ """Generate iCalendar from precomputed data using the icalendar library"""
+
+ cal = Calendar()
+ cal.add("prodid", "-//IETF//datatracker.ietf.org ical agenda//EN")
+ cal.add("version", "2.0")
+ cal.add("method", "PUBLISH")
+
+ meeting_data = agenda_data["meeting"]
+ for item in agenda_data["schedule"]:
+ event = Event()
+
+ uid = f"ietf-{meeting_data["number"]}-{item["slotId"]}-{item["acronym"]}"
+ event.add("uid", uid)
+
+ # add custom field with meeting's local TZ
+ event.add("x-meeting-tz", meeting_data["timezone"])
+
+ if item["name"]:
+ summary = item["name"]
+ else:
+ summary = f"{item["groupAcronym"]} - {item["groupName"]}"
+
+ if item["note"]:
+ summary += f" ({item["note"]})"
+
+ event.add("summary", summary)
+
+ if item["room"]:
+ event.add("location", item["room"]) # room name
+
+ if item["status"] == "canceled":
+ status = "CANCELLED"
+ elif item["status"] == "resched":
+ resched_to = item["rescheduledTo"]
+ if resched_to is None:
+ status = "RESCHEDULED"
+ else:
+ resched_start = datetime.datetime.fromisoformat(
+ resched_to["startDateTime"]
+ )
+ dur = datetime.timedelta(seconds=resched_to["duration"])
+ resched_end = resched_start + dur
+ formatted_start = resched_start.strftime("%A %H:%M").upper()
+ formatted_end = resched_end.strftime("%H:%M")
+ status = f"RESCHEDULED TO {formatted_start}-{formatted_end}"
+ else:
+ status = "CONFIRMED"
+ event.add("status", status)
+
+ event.add("class", "PUBLIC")
+
+ start_time = datetime.datetime.fromisoformat(item["startDateTime"])
+ duration = datetime.timedelta(seconds=item["duration"])
+ event.add("dtstart", start_time)
+ event.add("dtend", start_time + duration)
+
+ # DTSTAMP: when the event was created or last modified (in UTC)
+ # n.b. timeslot.modified may not be an accurate measure of this
+ event.add("dtstamp", datetime.datetime.fromisoformat(item["slotModified"]))
+
+ description_parts = [item["slotName"]]
+
+ if item["note"]:
+ description_parts.append(f"Note: {item["note"]}")
+
+ links = item["links"]
+ if links["onsiteTool"]:
+ description_parts.append(f"Onsite tool: {links["onsiteTool"]}")
+
+ if links["videoStream"]:
+ description_parts.append(f"Meetecho: {links["videoStream"]}")
+
+ if links["webex"]:
+ description_parts.append(f"Webex: {links["webex"]}")
+
+ if item["remoteInstructions"]:
+ description_parts.append(
+ f"Remote instructions: {item["remoteInstructions"]}"
+ )
+
+ try:
+ materials_url = absurl(
+ "ietf.meeting.views.session_details",
+ num=meeting_data["number"],
+ acronym=item["acronym"],
+ )
+ except NoReverseMatch:
+ pass
+ else:
+ description_parts.append(f"Session materials: {materials_url}")
+ event.add("url", materials_url)
+
+ if meeting_data["number"].isdigit():
+ try:
+ agenda_url = absurl("agenda", num=meeting_data["number"])
+ except NoReverseMatch:
+ pass
+ else:
+ description_parts.append(f"See in schedule: {agenda_url}#row-{item["slug"]}")
+
+ if item["agenda"] and item["agenda"]["url"]:
+ description_parts.append(f"Agenda {item["agenda"]["url"]}")
+
+ # Join all description parts with 2 newlines
+ description = "\n\n".join(description_parts)
+ event.add("description", description)
+
+ # Add event to calendar
+ cal.add_component(event)
+
+ return cal.to_ical().decode("utf-8")
+
+
def generate_agenda_ical(schedule, assignments):
"""Generate iCalendar using the icalendar library"""
@@ -2428,10 +2562,66 @@ def parse_agenda_filter_params(querydict):
def should_include_assignment(filter_params, assignment):
"""Decide whether to include an assignment"""
- shown = len(set(filter_params['show']).intersection(assignment.filter_keywords)) > 0
- hidden = len(set(filter_params['hide']).intersection(assignment.filter_keywords)) > 0
+ if hasattr(assignment, "filter_keywords"):
+ kw = assignment.filter_keywords
+ elif isinstance(assignment, dict):
+ kw = assignment.get("filterKeywords", [])
+ else:
+ raise ValueError("Unsupported assignment instance")
+ shown = len(set(filter_params['show']).intersection(kw)) > 0
+ hidden = len(set(filter_params['hide']).intersection(kw)) > 0
return shown and not hidden
+
+def agenda_ical_ietf(meeting, filt_params, acronym=None, session_id=None):
+ agenda_data = generate_agenda_data(meeting.number, force_refresh=False)
+ if acronym:
+ agenda_data["schedule"] = [
+ item
+ for item in agenda_data["schedule"]
+ if item["groupAcronym"] == acronym
+ ]
+ elif session_id:
+ agenda_data["schedule"] = [
+ item
+ for item in agenda_data["schedule"]
+ if item["sessionId"] == session_id
+ ]
+ if filt_params is not None:
+ # Apply the filter
+ agenda_data["schedule"] = [
+ item
+ for item in agenda_data["schedule"]
+ if should_include_assignment(filt_params, item)
+ ]
+ return render_icalendar_precomp(agenda_data)
+
+
+def agenda_ical_interim(meeting, filt_params, acronym=None, session_id=None):
+ schedule = get_schedule(meeting)
+
+ if schedule is None and acronym is None and session_id is None:
+ raise Http404
+
+ assignments = SchedTimeSessAssignment.objects.filter(
+ schedule__in=[schedule, schedule.base],
+ session__on_agenda=True,
+ )
+ assignments = preprocess_assignments_for_agenda(assignments, meeting)
+ AgendaKeywordTagger(assignments=assignments).apply()
+
+ if filt_params is not None:
+ # Apply the filter
+ assignments = [a for a in assignments if should_include_assignment(filt_params, a)]
+
+ if acronym:
+ assignments = [ a for a in assignments if a.session.group_at_the_time().acronym == acronym ]
+ elif session_id:
+ assignments = [ a for a in assignments if a.session_id == int(session_id) ]
+
+ return render_icalendar(schedule, assignments)
+
+
def agenda_ical(request, num=None, acronym=None, session_id=None):
"""Agenda ical view
@@ -2459,33 +2649,20 @@ def agenda_ical(request, num=None, acronym=None, session_id=None):
raise Http404
else:
meeting = get_meeting(num, type_in=None) # get requested meeting, whatever its type
- schedule = get_schedule(meeting)
- if schedule is None and acronym is None and session_id is None:
- raise Http404
-
- assignments = SchedTimeSessAssignment.objects.filter(
- schedule__in=[schedule, schedule.base],
- session__on_agenda=True,
- )
- assignments = preprocess_assignments_for_agenda(assignments, meeting)
- AgendaKeywordTagger(assignments=assignments).apply()
+ if isinstance(session_id, str) and session_id.isdigit():
+ session_id = int(session_id)
try:
filt_params = parse_agenda_filter_params(request.GET)
except ValueError as e:
return HttpResponseBadRequest(str(e))
- if filt_params is not None:
- # Apply the filter
- assignments = [a for a in assignments if should_include_assignment(filt_params, a)]
+ if meeting.type_id == "ietf":
+ return agenda_ical_ietf(meeting, filt_params, acronym, session_id)
+ else:
+ return agenda_ical_interim(meeting, filt_params, acronym, session_id)
- if acronym:
- assignments = [ a for a in assignments if a.session.group_at_the_time().acronym == acronym ]
- elif session_id:
- assignments = [ a for a in assignments if a.session_id == int(session_id) ]
-
- return render_icalendar(schedule, assignments)
@cache_page(15 * 60)
def agenda_json(request, num=None):
@@ -4635,15 +4812,36 @@ def proceedings_attendees(request, num=None):
template = None
registrations = None
+ stats = None
+ chart_data = None
+
if int(meeting.number) >= 118:
checked_in, attended = participants_for_meeting(meeting)
regs = list(Registration.objects.onsite().filter(meeting__number=num, checkedin=True))
-
- for reg in Registration.objects.remote().filter(meeting__number=num).select_related('person'):
- if reg.person.pk in attended and reg.person.pk not in checked_in:
- regs.append(reg)
+ onsite_count = len(regs)
+ regs += [
+ reg
+ for reg in Registration.objects.remote().filter(meeting__number=num).select_related('person')
+ if reg.person.pk in attended and reg.person.pk not in checked_in
+ ]
+ remote_count = len(regs) - onsite_count
registrations = sorted(regs, key=lambda x: (x.last_name, x.first_name))
+
+ country_codes = [r.country_code for r in registrations if r.country_code]
+ stats = {
+ 'total': onsite_count + remote_count,
+ 'onsite': onsite_count,
+ 'remote': remote_count,
+ }
+
+ code_to_name = dict(CountryName.objects.values_list('slug', 'name'))
+ country_counts = Counter(code_to_name.get(c, c) for c in country_codes).most_common()
+
+ chart_data = {
+ 'type': [['Onsite', onsite_count], ['Remote', remote_count]],
+ 'countries': country_counts,
+ }
else:
overview_template = "/meeting/proceedings/%s/attendees.html" % meeting.number
try:
@@ -4655,6 +4853,8 @@ def proceedings_attendees(request, num=None):
'meeting': meeting,
'registrations': registrations,
'template': template,
+ 'stats': stats,
+ 'chart_data': chart_data,
})
def proceedings_overview(request, num=None):
@@ -5533,6 +5733,52 @@ def approve_proposed_slides(request, slidesubmission_id, num):
})
+@role_required("Secretariat")
+def notify_meetecho_of_all_slides(request, num, acronym):
+ """Notify meetecho of state of all slides for the group
+
+ Respects the usual notification window around each session. Meetecho will ignore
+ notices outside that window anyway, so no sense sending them.
+ """
+ meeting = get_meeting(num=num, type_in=None) # raises 404
+ if request.method != "POST":
+ return HttpResponseNotAllowed(
+ content="Method not allowed",
+ content_type=f"text/plain; charset={settings.DEFAULT_CHARSET}",
+ permitted_methods=("POST",),
+ )
+ scheduled_sessions = [
+ session
+ for session in get_sessions(meeting.number, acronym)
+ if session.current_status == "sched"
+ ]
+ sm = SlidesManager(api_config=settings.MEETECHO_API_CONFIG)
+ updated = []
+ for session in scheduled_sessions:
+ if sm.send_update(session):
+ updated.append(session)
+ if len(updated) > 0:
+ messages.success(
+ request,
+ f"Notified Meetecho about slides for {','.join(str(s) for s in updated)}",
+ )
+ elif sm.slides_notify_time is not None:
+ messages.warning(
+ request,
+ "No sessions were eligible for Meetecho slides update. Updates are "
+ f"only sent within {sm.slides_notify_time} before or after the session.",
+ )
+ else:
+ messages.warning(
+ request,
+ "No sessions were eligible for Meetecho slides update. Updates are "
+ "currently disabled.",
+ )
+ return redirect(
+ "ietf.meeting.views.session_details", num=meeting.number, acronym=acronym
+ )
+
+
def import_session_minutes(request, session_id, num):
"""Import session minutes from the ietf.notes.org site
diff --git a/ietf/meeting/views_proceedings.py b/ietf/meeting/views_proceedings.py
index d1169bff2d..639efa1da4 100644
--- a/ietf/meeting/views_proceedings.py
+++ b/ietf/meeting/views_proceedings.py
@@ -14,7 +14,7 @@
from ietf.meeting.models import Meeting, MeetingHost
from ietf.meeting.helpers import get_meeting
from ietf.name.models import ProceedingsMaterialTypeName
-from ietf.meeting.utils import handle_upload_file
+from ietf.meeting.utils import handle_upload_file, resolve_uploaded_material
from ietf.utils.text import xslugify
class UploadProceedingsMaterialForm(FileUploadForm):
@@ -150,7 +150,7 @@ def save_proceedings_material_doc(meeting, material_type, title, request, file=N
if events:
doc.save_with_history(events)
-
+ resolve_uploaded_material(meeting, doc)
return doc
diff --git a/ietf/message/admin.py b/ietf/message/admin.py
index 250e1eb596..6a876cdc70 100644
--- a/ietf/message/admin.py
+++ b/ietf/message/admin.py
@@ -27,7 +27,8 @@ def queryset(self, request, queryset):
class MessageAdmin(admin.ModelAdmin):
- list_display = ["sent_status", "subject", "by", "time", "groups"]
+ list_display = ["sent_status", "display_subject", "by", "time", "groups"]
+ list_display_links = ["display_subject"]
search_fields = ["subject", "body"]
raw_id_fields = ["by", "related_groups", "related_docs"]
list_filter = [
@@ -37,6 +38,10 @@ class MessageAdmin(admin.ModelAdmin):
ordering = ["-time"]
actions = ["retry_send"]
+ @admin.display(description="Subject", empty_value="(no subject)")
+ def display_subject(self, instance):
+ return instance.subject or None # None triggers the empty_value
+
def groups(self, instance):
return ", ".join(g.acronym for g in instance.related_groups.all())
diff --git a/ietf/name/admin.py b/ietf/name/admin.py
index 4336e0569c..b89d6d141c 100644
--- a/ietf/name/admin.py
+++ b/ietf/name/admin.py
@@ -57,6 +57,7 @@
from ietf.stats.models import CountryAlias
+from ietf.utils.admin import SaferTabularInline
class NameAdmin(admin.ModelAdmin):
@@ -86,7 +87,7 @@ class GroupTypeNameAdmin(NameAdmin):
admin.site.register(GroupTypeName, GroupTypeNameAdmin)
-class CountryAliasInline(admin.TabularInline):
+class CountryAliasInline(SaferTabularInline):
model = CountryAlias
extra = 1
diff --git a/ietf/name/serializers.py b/ietf/name/serializers.py
new file mode 100644
index 0000000000..a764f56051
--- /dev/null
+++ b/ietf/name/serializers.py
@@ -0,0 +1,11 @@
+# Copyright The IETF Trust 2024, All Rights Reserved
+"""django-rest-framework serializers"""
+from rest_framework import serializers
+
+from .models import StreamName
+
+
+class StreamNameSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = StreamName
+ fields = ["slug", "name", "desc"]
diff --git a/ietf/nomcom/tests.py b/ietf/nomcom/tests.py
index dcdb9ef836..210788ce07 100644
--- a/ietf/nomcom/tests.py
+++ b/ietf/nomcom/tests.py
@@ -1,5 +1,4 @@
-# Copyright The IETF Trust 2012-2023, All Rights Reserved
-# -*- coding: utf-8 -*-
+# Copyright The IETF Trust 2012-2025, All Rights Reserved
import datetime
@@ -27,8 +26,14 @@
from ietf.api.views import EmailIngestionError
from ietf.dbtemplate.factories import DBTemplateFactory
from ietf.dbtemplate.models import DBTemplate
-from ietf.doc.factories import DocEventFactory, WgDocumentAuthorFactory, \
- NewRevisionDocEventFactory, DocumentAuthorFactory
+from ietf.doc.factories import (
+ DocEventFactory,
+ WgDocumentAuthorFactory,
+ NewRevisionDocEventFactory,
+ DocumentAuthorFactory,
+ RfcAuthorFactory,
+ WgDraftFactory, WgRfcFactory,
+)
from ietf.group.factories import GroupFactory, GroupHistoryFactory, RoleFactory, RoleHistoryFactory
from ietf.group.models import Group, Role
from ietf.meeting.factories import MeetingFactory, AttendedFactory, RegistrationFactory
@@ -45,10 +50,20 @@
nomcom_kwargs_for_year, provide_private_key_to_test_client, \
key
from ietf.nomcom.tasks import send_nomcom_reminders_task
-from ietf.nomcom.utils import get_nomcom_by_year, make_nomineeposition, \
- get_hash_nominee_position, is_eligible, list_eligible, \
- get_eligibility_date, suggest_affiliation, ingest_feedback_email, \
- decorate_volunteers_with_qualifications, send_reminders, _is_time_to_send_reminder
+from ietf.nomcom.utils import (
+ get_nomcom_by_year,
+ make_nomineeposition,
+ get_hash_nominee_position,
+ is_eligible,
+ list_eligible,
+ get_eligibility_date,
+ suggest_affiliation,
+ ingest_feedback_email,
+ decorate_volunteers_with_qualifications,
+ send_reminders,
+ _is_time_to_send_reminder,
+ get_qualified_author_queryset,
+)
from ietf.person.factories import PersonFactory, EmailFactory
from ietf.person.models import Email, Person
from ietf.utils.mail import outbox, empty_outbox, get_payload_text
@@ -2440,6 +2455,85 @@ def test_get_eligibility_date(self):
NomComFactory(group__acronym=f'nomcom{this_year}', first_call_for_volunteers=datetime.date(this_year,5,6))
self.assertEqual(get_eligibility_date(),datetime.date(this_year,5,6))
+ def test_get_qualified_author_queryset(self):
+ """get_qualified_author_queryset implements the eligiblity rules correctly
+
+ This is not an exhaustive test of corner cases. Overlaps considerably with
+ rfc8989EligibilityTests.test_elig_by_author().
+ """
+ people = PersonFactory.create_batch(2)
+ extra_person = PersonFactory()
+ base_qs = Person.objects.filter(pk__in=[person.pk for person in people])
+ now = datetime.datetime.now(tz=datetime.UTC)
+ one_year = datetime.timedelta(days=365)
+
+ # Authors with no qualifying drafts
+ self.assertCountEqual(
+ get_qualified_author_queryset(base_qs, now - 5 * one_year, now), []
+ )
+
+ # Authors with one qualifying draft
+ approved_draft = WgDraftFactory(authors=people, states=[("draft", "active")])
+ DocEventFactory(
+ type="iesg_approved",
+ doc=approved_draft,
+ time=now - 4 * one_year,
+ )
+ self.assertCountEqual(
+ get_qualified_author_queryset(base_qs, now - 5 * one_year, now), []
+ )
+
+ # Create a draft that was published into an RFC. Give it an extra author who
+ # should not be eligible.
+ published_draft = WgDraftFactory(authors=people, states=[("draft", "rfc")])
+ DocEventFactory(
+ type="iesg_approved",
+ doc=published_draft,
+ time=now - 5.5 * one_year, # < 6 years ago
+ )
+ rfc = WgRfcFactory(
+ authors=people + [extra_person],
+ group=published_draft.group,
+ )
+ DocEventFactory(
+ type="published_rfc",
+ doc=rfc,
+ time=now - 0.5 * one_year, # < 1 year ago
+ )
+ # Period 6 years ago to 1 year ago - authors are eligible due to the
+ # iesg-approved draft in this window
+ self.assertCountEqual(
+ get_qualified_author_queryset(base_qs, now - 6 * one_year, now - one_year),
+ people,
+ )
+
+ # Period 5 years ago to now - authors are eligible due to the RFC publication
+ self.assertCountEqual(
+ get_qualified_author_queryset(base_qs, now - 5 * one_year, now),
+ people,
+ )
+
+ # Use the extra_person to check that a single doc can't count both as an
+ # RFC _and_ an approved draft. Use an eligibility interval that includes both
+ # the approval and the RFC publication
+ self.assertCountEqual(
+ get_qualified_author_queryset(base_qs, now - 6 * one_year, now),
+ people, # does not include extra_person!
+ )
+
+ # Now add an RfcAuthor for only one of the two authors to the RFC. This should
+ # remove the other author from the eligibility list because the DocumentAuthor
+ # records are no longer used.
+ RfcAuthorFactory(
+ document=rfc,
+ person=people[0],
+ titlepage_name="P. Zero",
+ )
+ self.assertCountEqual(
+ get_qualified_author_queryset(base_qs, now - 5 * one_year, now),
+ [people[0]],
+ )
+
class rfc8713EligibilityTests(TestCase):
@@ -2724,33 +2818,41 @@ def test_elig_by_author(self):
ineligible = set()
p = PersonFactory()
- ineligible.add(p)
-
+ ineligible.add(p) # no RFCs or iesg-approved drafts
p = PersonFactory()
- da = WgDocumentAuthorFactory(person=p)
- DocEventFactory(type='published_rfc',doc=da.document,time=middle_date)
- ineligible.add(p)
+ doc = WgRfcFactory(authors=[p])
+ DocEventFactory(type='published_rfc', doc=doc, time=middle_date)
+ ineligible.add(p) # only one RFC
p = PersonFactory()
- da = WgDocumentAuthorFactory(person=p)
+ da = WgDocumentAuthorFactory(
+ person=p,
+ document__states=[("draft", "active"), ("draft-rfceditor", "ref")],
+ )
DocEventFactory(type='iesg_approved',doc=da.document,time=last_date)
- da = WgDocumentAuthorFactory(person=p)
- DocEventFactory(type='published_rfc',doc=da.document,time=first_date)
- eligible.add(p)
+ doc = WgRfcFactory(authors=[p])
+ DocEventFactory(type='published_rfc', doc=doc, time=first_date)
+ eligible.add(p) # one RFC and one iesg-approved draft
p = PersonFactory()
- da = WgDocumentAuthorFactory(person=p)
+ da = WgDocumentAuthorFactory(
+ person=p,
+ document__states=[("draft", "active"), ("draft-rfceditor", "ref")],
+ )
DocEventFactory(type='iesg_approved',doc=da.document,time=middle_date)
- da = WgDocumentAuthorFactory(person=p)
- DocEventFactory(type='published_rfc',doc=da.document,time=day_before_first_date)
- ineligible.add(p)
+ doc = WgRfcFactory(authors=[p])
+ DocEventFactory(type='published_rfc', doc=doc, time=day_before_first_date)
+ ineligible.add(p) # RFC is out of the eligibility window
p = PersonFactory()
- da = WgDocumentAuthorFactory(person=p)
+ da = WgDocumentAuthorFactory(
+ person=p,
+ document__states=[("draft", "active"), ("draft-rfceditor", "ref")],
+ )
DocEventFactory(type='iesg_approved',doc=da.document,time=day_after_last_date)
- da = WgDocumentAuthorFactory(person=p)
- DocEventFactory(type='published_rfc',doc=da.document,time=middle_date)
- ineligible.add(p)
+ doc = WgRfcFactory(authors=[p])
+ DocEventFactory(type='published_rfc', doc=doc, time=middle_date)
+ ineligible.add(p) # iesg approval is outside the eligibility window
for person in eligible:
self.assertTrue(is_eligible(person,nomcom))
@@ -2878,15 +2980,38 @@ def test_volunteer(self):
def test_suggest_affiliation(self):
person = PersonFactory()
- self.assertEqual(suggest_affiliation(person), '')
- da = DocumentAuthorFactory(person=person,affiliation='auth_affil')
+ self.assertEqual(suggest_affiliation(person), "")
+ rfc_da = DocumentAuthorFactory(
+ person=person,
+ document__type_id="rfc",
+ affiliation="",
+ )
+ rfc = rfc_da.document
+ DocEventFactory(doc=rfc, type="published_rfc")
+ self.assertEqual(suggest_affiliation(person), "")
+
+ rfc_da.affiliation = "rfc_da_affil"
+ rfc_da.save()
+ self.assertEqual(suggest_affiliation(person), "rfc_da_affil")
+
+ rfc_ra = RfcAuthorFactory(person=person, document=rfc, affiliation="")
+ self.assertEqual(suggest_affiliation(person), "")
+
+ rfc_ra.affiliation = "rfc_ra_affil"
+ rfc_ra.save()
+ self.assertEqual(suggest_affiliation(person), "rfc_ra_affil")
+
+ da = DocumentAuthorFactory(person=person, affiliation="auth_affil")
NewRevisionDocEventFactory(doc=da.document)
- self.assertEqual(suggest_affiliation(person), 'auth_affil')
+ self.assertEqual(suggest_affiliation(person), "auth_affil")
+
nc = NomComFactory()
- nc.volunteer_set.create(person=person,affiliation='volunteer_affil')
- self.assertEqual(suggest_affiliation(person), 'volunteer_affil')
- RegistrationFactory(person=person, affiliation='meeting_affil')
- self.assertEqual(suggest_affiliation(person), 'meeting_affil')
+ nc.volunteer_set.create(person=person, affiliation="volunteer_affil")
+ self.assertEqual(suggest_affiliation(person), "volunteer_affil")
+
+ RegistrationFactory(person=person, affiliation="meeting_affil")
+ self.assertEqual(suggest_affiliation(person), "meeting_affil")
+
class VolunteerDecoratorUnitTests(TestCase):
def test_decorate_volunteers_with_qualifications(self):
@@ -2922,10 +3047,10 @@ def test_decorate_volunteers_with_qualifications(self):
author_person = PersonFactory()
for i in range(2):
- da = WgDocumentAuthorFactory(person=author_person)
+ doc = WgRfcFactory(authors=[author_person])
DocEventFactory(
type='published_rfc',
- doc=da.document,
+ doc=doc,
time=datetime.datetime(
elig_date.year - 3,
elig_date.month,
diff --git a/ietf/nomcom/utils.py b/ietf/nomcom/utils.py
index dd651c2941..a2ab680df6 100644
--- a/ietf/nomcom/utils.py
+++ b/ietf/nomcom/utils.py
@@ -18,7 +18,7 @@
from email.utils import parseaddr
from textwrap import dedent
-from django.db.models import Q, Count
+from django.db.models import Q, Count, F, QuerySet
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist
@@ -27,7 +27,7 @@
from django.shortcuts import get_object_or_404
from ietf.dbtemplate.models import DBTemplate
-from ietf.doc.models import DocEvent, NewRevisionDocEvent
+from ietf.doc.models import DocEvent, NewRevisionDocEvent, Document
from ietf.group.models import Group, Role
from ietf.person.models import Email, Person
from ietf.mailtrigger.utils import gather_address_lists
@@ -576,6 +576,70 @@ def get_8989_eligibility_querysets(date, base_qs):
def get_9389_eligibility_querysets(date, base_qs):
return get_threerule_eligibility_querysets(date, base_qs, three_of_five_callable=three_of_five_eligible_9389)
+
+def get_qualified_author_queryset(
+ base_qs: QuerySet[Person],
+ eligibility_period_start: datetime.datetime,
+ eligibility_period_end: datetime.datetime,
+):
+ """Filter a Person queryset, keeping those qualified by RFC 8989's author path
+
+ The author path is defined by "path 3" in section 4 of RFC 8989. It qualifies
+ a person who has been a front-page listed author or editor of at least two IETF-
+ stream RFCs within the last five years. An I-D in the RFC Editor queue that was
+ approved by the IESG is treated as an RFC, using the date of entry to the RFC
+ Editor queue as the date for qualification.
+
+ This method does not strictly enforce "in the RFC Editor queue" for IESG-approved
+ drafts when computing eligibility. In the overwhelming majority of cases, an IESG-
+ approved draft immediately enters the queue and goes on to be published, so this
+ simplification makes the calculation much easier and virtually never affects
+ eligibility.
+
+ Arguments eligibility_period_start and eligibility_period_end are datetimes that
+ mark the start and end of the eligibility period. These should be five years apart.
+ """
+ # First, get the RFCs using publication date
+ qualifying_rfc_pub_events = DocEvent.objects.filter(
+ type='published_rfc',
+ time__gte=eligibility_period_start,
+ time__lte=eligibility_period_end,
+ )
+ qualifying_rfcs = Document.objects.filter(
+ type_id="rfc",
+ docevent__in=qualifying_rfc_pub_events
+ ).annotate(
+ rfcauthor_count=Count("rfcauthor")
+ )
+ rfcs_with_rfcauthors = qualifying_rfcs.filter(rfcauthor_count__gt=0).distinct()
+ rfcs_without_rfcauthors = qualifying_rfcs.filter(rfcauthor_count=0).distinct()
+
+ # Second, get the IESG-approved I-Ds excluding any we're already counting as rfcs
+ qualifying_approval_events = DocEvent.objects.filter(
+ type='iesg_approved',
+ time__gte=eligibility_period_start,
+ time__lte=eligibility_period_end,
+ )
+ qualifying_drafts = Document.objects.filter(
+ type_id="draft",
+ docevent__in=qualifying_approval_events,
+ ).exclude(
+ relateddocument__relationship_id="became_rfc",
+ relateddocument__target__in=qualifying_rfcs,
+ ).distinct()
+
+ return base_qs.filter(
+ Q(documentauthor__document__in=qualifying_drafts)
+ | Q(rfcauthor__document__in=rfcs_with_rfcauthors)
+ | Q(documentauthor__document__in=rfcs_without_rfcauthors)
+ ).annotate(
+ document_author_count=Count('documentauthor'),
+ rfc_author_count=Count("rfcauthor")
+ ).annotate(
+ authorship_count=F("document_author_count") + F("rfc_author_count")
+ ).filter(authorship_count__gte=2)
+
+
def get_threerule_eligibility_querysets(date, base_qs, three_of_five_callable):
if not base_qs:
base_qs = Person.objects.all()
@@ -608,14 +672,7 @@ def get_threerule_eligibility_querysets(date, base_qs, three_of_five_callable):
)
).distinct()
- rfc_pks = set(DocEvent.objects.filter(type='published_rfc', time__gte=five_years_ago, time__lte=date_as_dt).values_list('doc__pk', flat=True))
- iesgappr_pks = set(DocEvent.objects.filter(type='iesg_approved', time__gte=five_years_ago, time__lte=date_as_dt).values_list('doc__pk',flat=True))
- qualifying_pks = rfc_pks.union(iesgappr_pks.difference(rfc_pks))
- author_qs = base_qs.filter(
- documentauthor__document__pk__in=qualifying_pks
- ).annotate(
- document_author_count = Count('documentauthor')
- ).filter(document_author_count__gte=2)
+ author_qs = get_qualified_author_queryset(base_qs, five_years_ago, date_as_dt)
return three_of_five_qs, officer_qs, author_qs
def list_eligible_8989(date, base_qs=None):
@@ -691,18 +748,42 @@ def three_of_five_eligible_9389(previous_five, queryset=None):
counts[id] += 1
return queryset.filter(pk__in=[id for id, count in counts.items() if count >= 3])
-def suggest_affiliation(person):
+def suggest_affiliation(person) -> str:
+ """Heuristically suggest a current affiliation for a Person"""
recent_meeting = person.registration_set.order_by('-meeting__date').first()
- affiliation = recent_meeting.affiliation if recent_meeting else ''
- if not affiliation:
- recent_volunteer = person.volunteer_set.order_by('-nomcom__group__acronym').first()
- if recent_volunteer:
- affiliation = recent_volunteer.affiliation
- if not affiliation:
- recent_draft_revision = NewRevisionDocEvent.objects.filter(doc__type_id='draft',doc__documentauthor__person=person).order_by('-time').first()
- if recent_draft_revision:
- affiliation = recent_draft_revision.doc.documentauthor_set.filter(person=person).first().affiliation
- return affiliation
+ if recent_meeting and recent_meeting.affiliation:
+ return recent_meeting.affiliation
+
+ recent_volunteer = person.volunteer_set.order_by('-nomcom__group__acronym').first()
+ if recent_volunteer and recent_volunteer.affiliation:
+ return recent_volunteer.affiliation
+
+ recent_draft_revision = NewRevisionDocEvent.objects.filter(
+ doc__type_id="draft",
+ doc__documentauthor__person=person,
+ ).order_by("-time").first()
+ if recent_draft_revision:
+ draft_author = recent_draft_revision.doc.documentauthor_set.filter(
+ person=person
+ ).first()
+ if draft_author and draft_author.affiliation:
+ return draft_author.affiliation
+
+ recent_rfc_publication = DocEvent.objects.filter(
+ Q(doc__documentauthor__person=person) | Q(doc__rfcauthor__person=person),
+ doc__type_id="rfc",
+ type="published_rfc",
+ ).order_by("-time").first()
+ if recent_rfc_publication:
+ rfc = recent_rfc_publication.doc
+ if rfc.rfcauthor_set.exists():
+ rfc_author = rfc.rfcauthor_set.filter(person=person).first()
+ else:
+ rfc_author = rfc.documentauthor_set.filter(person=person).first()
+ if rfc_author and rfc_author.affiliation:
+ return rfc_author.affiliation
+ return ""
+
def extract_volunteers(year):
nomcom = get_nomcom_by_year(year)
diff --git a/ietf/person/admin.py b/ietf/person/admin.py
index cd8ca2abf1..f46edcf8ae 100644
--- a/ietf/person/admin.py
+++ b/ietf/person/admin.py
@@ -7,6 +7,7 @@
from ietf.person.models import Email, Alias, Person, PersonalApiKey, PersonEvent, PersonApiKeyEvent, PersonExtResource
from ietf.person.name import name_parts
+from ietf.utils.admin import SaferStackedInline, SaferTabularInline
from ietf.utils.validators import validate_external_resource_value
@@ -16,7 +17,7 @@ class EmailAdmin(simple_history.admin.SimpleHistoryAdmin):
search_fields = ["address", "person__name", ]
admin.site.register(Email, EmailAdmin)
-class EmailInline(admin.TabularInline):
+class EmailInline(SaferTabularInline):
model = Email
class AliasAdmin(admin.ModelAdmin):
@@ -25,7 +26,7 @@ class AliasAdmin(admin.ModelAdmin):
raw_id_fields = ["person"]
admin.site.register(Alias, AliasAdmin)
-class AliasInline(admin.StackedInline):
+class AliasInline(SaferStackedInline):
model = Alias
class PersonAdmin(simple_history.admin.SimpleHistoryAdmin):
diff --git a/ietf/person/forms.py b/ietf/person/forms.py
index 81ee362561..7eef8aa17b 100644
--- a/ietf/person/forms.py
+++ b/ietf/person/forms.py
@@ -1,15 +1,26 @@
-# Copyright The IETF Trust 2018-2020, All Rights Reserved
+# Copyright The IETF Trust 2018-2025, All Rights Reserved
# -*- coding: utf-8 -*-
from django import forms
+
from ietf.person.models import Person
+from ietf.utils.fields import MultiEmailField, NameAddrEmailField
class MergeForm(forms.Form):
source = forms.IntegerField(label='Source Person ID')
target = forms.IntegerField(label='Target Person ID')
+ def __init__(self, *args, **kwargs):
+ self.readonly = False
+ if 'readonly' in kwargs:
+ self.readonly = kwargs.pop('readonly')
+ super().__init__(*args, **kwargs)
+ if self.readonly:
+ self.fields['source'].widget.attrs['readonly'] = True
+ self.fields['target'].widget.attrs['readonly'] = True
+
def clean_source(self):
return self.get_person(self.cleaned_data['source'])
@@ -21,3 +32,11 @@ def get_person(self, pk):
return Person.objects.get(pk=pk)
except Person.DoesNotExist:
raise forms.ValidationError("ID does not exist")
+
+
+class MergeRequestForm(forms.Form):
+ to = MultiEmailField()
+ frm = NameAddrEmailField()
+ reply_to = MultiEmailField()
+ subject = forms.CharField()
+ body = forms.CharField(widget=forms.Textarea)
diff --git a/ietf/person/models.py b/ietf/person/models.py
index 03cf0c87fb..3ab89289a6 100644
--- a/ietf/person/models.py
+++ b/ietf/person/models.py
@@ -87,7 +87,7 @@ def short(self):
else:
prefix, first, middle, last, suffix = self.ascii_parts()
return (first and first[0]+"." or "")+(middle or "")+" "+last+(suffix and " "+suffix or "")
- def plain_name(self):
+ def plain_name(self) -> str:
if not hasattr(self, '_cached_plain_name'):
if self.plain:
self._cached_plain_name = self.plain
@@ -203,7 +203,10 @@ def has_drafts(self):
def rfcs(self):
from ietf.doc.models import Document
- rfcs = list(Document.objects.filter(documentauthor__person=self, type='rfc'))
+ # When RfcAuthors are populated, this may over-return if an author is dropped
+ # from the author list between the final draft and the published RFC. Should
+ # ignore DocumentAuthors when an RfcAuthor exists for a draft.
+ rfcs = list(Document.objects.filter(type="rfc").filter(models.Q(documentauthor__person=self)|models.Q(rfcauthor__person=self)).distinct())
rfcs.sort(key=lambda d: d.name )
return rfcs
@@ -266,11 +269,16 @@ def available_api_endpoints(self):
def cdn_photo_url(self, size=80):
if self.photo:
if settings.SERVE_CDN_PHOTOS:
+ if settings.SERVER_MODE != "production":
+ original_media_dir = settings.MEDIA_URL
+ settings.MEDIA_URL = "https://www.ietf.org/lib/dt/media/"
source_url = self.photo.url
if source_url.startswith(settings.IETF_HOST_URL):
source_url = source_url[len(settings.IETF_HOST_URL):]
elif source_url.startswith('/'):
source_url = source_url[1:]
+ if settings.SERVER_MODE != "production":
+ settings.MEDIA_URL = original_media_dir
return f'{settings.IETF_HOST_URL}cdn-cgi/image/fit=scale-down,width={size},height={size}/{source_url}'
else:
datatracker_photo_path = urlreverse('ietf.person.views.photo', kwargs={'email_or_name': self.email()})
diff --git a/ietf/person/tests.py b/ietf/person/tests.py
index 6326362fd8..f55d8b8a34 100644
--- a/ietf/person/tests.py
+++ b/ietf/person/tests.py
@@ -1,4 +1,4 @@
-# Copyright The IETF Trust 2014-2022, All Rights Reserved
+# Copyright The IETF Trust 2014-2025, All Rights Reserved
# -*- coding: utf-8 -*-
@@ -10,7 +10,6 @@
from PIL import Image
from pyquery import PyQuery
-
from django.core.exceptions import ValidationError
from django.http import HttpRequest
from django.test import override_settings
@@ -23,6 +22,7 @@
from ietf.community.models import CommunityList
from ietf.group.factories import RoleFactory
from ietf.group.models import Group
+from ietf.message.models import Message
from ietf.nomcom.models import NomCom
from ietf.nomcom.test_data import nomcom_test_data
from ietf.nomcom.factories import NomComFactory, NomineeFactory, NominationFactory, FeedbackFactory, PositionFactory
@@ -208,13 +208,13 @@ def test_merge(self):
def test_merge_with_params(self):
p1 = get_person_no_user()
p2 = PersonFactory()
- url = urlreverse("ietf.person.views.merge") + "?source={}&target={}".format(p1.pk, p2.pk)
+ url = urlreverse("ietf.person.views.merge_submit") + "?source={}&target={}".format(p1.pk, p2.pk)
login_testing_unauthorized(self, "secretary", url)
r = self.client.get(url)
self.assertContains(r, 'retaining login', status_code=200)
def test_merge_with_params_bad_id(self):
- url = urlreverse("ietf.person.views.merge") + "?source=1000&target=2000"
+ url = urlreverse("ietf.person.views.merge_submit") + "?source=1000&target=2000"
login_testing_unauthorized(self, "secretary", url)
r = self.client.get(url)
self.assertContains(r, 'ID does not exist', status_code=200)
@@ -222,7 +222,7 @@ def test_merge_with_params_bad_id(self):
def test_merge_post(self):
p1 = get_person_no_user()
p2 = PersonFactory()
- url = urlreverse("ietf.person.views.merge")
+ url = urlreverse("ietf.person.views.merge_submit")
expected_url = urlreverse("ietf.secr.rolodex.views.view", kwargs={'id': p2.pk})
login_testing_unauthorized(self, "secretary", url)
data = {'source': p1.pk, 'target': p2.pk}
@@ -451,6 +451,30 @@ def test_dots(self):
ncchair = RoleFactory(group__acronym='nomcom2020',group__type_id='nomcom',name_id='chair').person
self.assertEqual(get_dots(ncchair),['nomcom'])
+ def test_send_merge_request(self):
+ empty_outbox()
+ message_count_before = Message.objects.count()
+ source = PersonFactory()
+ target = PersonFactory()
+ url = urlreverse('ietf.person.views.send_merge_request')
+ url = url + f'?source={source.pk}&target={target.pk}'
+ login_testing_unauthorized(self, 'secretary', url)
+ r = self.client.get(url)
+ initial = r.context['form'].initial
+ subject = 'Action requested: Merging possible duplicate IETF Datatracker accounts'
+ self.assertEqual(initial['to'], ', '.join([source.user.username, target.user.username]))
+ self.assertEqual(initial['subject'], subject)
+ self.assertEqual(initial['reply_to'], 'support@ietf.org')
+ self.assertEqual(r.status_code, 200)
+ r = self.client.post(url, data=initial)
+ self.assertEqual(r.status_code, 302)
+ self.assertEqual(len(outbox), 1)
+ self.assertIn(source.user.username, outbox[0]['To'])
+ message_count_after = Message.objects.count()
+ message = Message.objects.last()
+ self.assertEqual(message_count_after, message_count_before + 1)
+ self.assertIn(source.user.username, message.to)
+
class TaskTests(TestCase):
@mock.patch("ietf.person.tasks.log.log")
diff --git a/ietf/person/urls.py b/ietf/person/urls.py
index 867646fe39..f3eccd04b7 100644
--- a/ietf/person/urls.py
+++ b/ietf/person/urls.py
@@ -1,8 +1,12 @@
+# Copyright The IETF Trust 2009-2025, All Rights Reserved
+# -*- coding: utf-8 -*-
from ietf.person import views, ajax
from ietf.utils.urls import url
urlpatterns = [
url(r'^merge/?$', views.merge),
+ url(r'^merge/submit/?$', views.merge_submit),
+ url(r'^merge/send_request/?$', views.send_merge_request),
url(r'^search/(?P(person|email))/$', views.ajax_select2_search),
url(r'^(?P[0-9]+)/email.json$', ajax.person_email_json),
url(r'^(?P[^/]+)$', views.profile),
diff --git a/ietf/person/views.py b/ietf/person/views.py
index a37b164311..d0b5912431 100644
--- a/ietf/person/views.py
+++ b/ietf/person/views.py
@@ -1,14 +1,16 @@
-# Copyright The IETF Trust 2012-2020, All Rights Reserved
+# Copyright The IETF Trust 2012-2025, All Rights Reserved
# -*- coding: utf-8 -*-
from io import StringIO, BytesIO
from PIL import Image
+from django.conf import settings
from django.contrib import messages
from django.db.models import Q
from django.http import HttpResponse, Http404
from django.shortcuts import render, redirect
+from django.template.loader import render_to_string
from django.utils import timezone
import debug # pyflakes:ignore
@@ -16,8 +18,9 @@
from ietf.ietfauth.utils import role_required
from ietf.person.models import Email, Person
from ietf.person.fields import select2_id_name_json
-from ietf.person.forms import MergeForm
+from ietf.person.forms import MergeForm, MergeRequestForm
from ietf.person.utils import handle_users, merge_persons, lookup_persons
+from ietf.utils.mail import send_mail_text
def ajax_select2_search(request, model_name):
@@ -98,16 +101,19 @@ def photo(request, email_or_name):
@role_required("Secretariat")
def merge(request):
form = MergeForm()
- method = 'get'
+ return render(request, 'person/merge.html', {'form': form})
+
+
+@role_required("Secretariat")
+def merge_submit(request):
change_details = ''
warn_messages = []
source = None
target = None
if request.method == "GET":
- form = MergeForm()
if request.GET:
- form = MergeForm(request.GET)
+ form = MergeForm(request.GET, readonly=True)
if form.is_valid():
source = form.cleaned_data.get('source')
target = form.cleaned_data.get('target')
@@ -116,12 +122,9 @@ def merge(request):
if source.user.last_login and target.user.last_login and source.user.last_login > target.user.last_login:
warn_messages.append('WARNING: The most recently used login is being deleted!')
change_details = handle_users(source, target, check_only=True)
- method = 'post'
- else:
- method = 'get'
if request.method == "POST":
- form = MergeForm(request.POST)
+ form = MergeForm(request.POST, readonly=True)
if form.is_valid():
source = form.cleaned_data.get('source')
source_id = source.id
@@ -136,11 +139,72 @@ def merge(request):
messages.error(request, output)
return redirect('ietf.secr.rolodex.views.view', id=target.pk)
- return render(request, 'person/merge.html', {
+ return render(request, 'person/merge_submit.html', {
'form': form,
- 'method': method,
'change_details': change_details,
'source': source,
'target': target,
'warn_messages': warn_messages,
})
+
+
+@role_required("Secretariat")
+def send_merge_request(request):
+ if request.method == 'GET':
+ merge_form = MergeForm(request.GET)
+ if merge_form.is_valid():
+ source = merge_form.cleaned_data['source']
+ target = merge_form.cleaned_data['target']
+ to = []
+ if source.email():
+ to.append(source.email().address)
+ if target.email():
+ to.append(target.email().address)
+ if source.user:
+ source_account = source.user.username
+ else:
+ source_account = source.email()
+ if target.user:
+ target_account = target.user.username
+ else:
+ target_account = target.email()
+ sender_name = request.user.person.name
+ subject = 'Action requested: Merging possible duplicate IETF Datatracker accounts'
+ context = {
+ 'source_account': source_account,
+ 'target_account': target_account,
+ 'sender_name': sender_name,
+ }
+ body = render_to_string('person/merge_request_email.txt', context)
+ initial = {
+ 'to': ', '.join(to),
+ 'frm': settings.DEFAULT_FROM_EMAIL,
+ 'reply_to': 'support@ietf.org',
+ 'subject': subject,
+ 'body': body,
+ 'by': request.user.person.pk,
+ }
+ form = MergeRequestForm(initial=initial)
+ else:
+ messages.error(request, "Error requesting merge email: " + merge_form.errors.as_text())
+ return redirect("ietf.person.views.merge")
+
+ if request.method == 'POST':
+ form = MergeRequestForm(request.POST)
+ if form.is_valid():
+ extra = {"Reply-To": form.cleaned_data.get("reply_to")}
+ send_mail_text(
+ request,
+ form.cleaned_data.get("to"),
+ form.cleaned_data.get("frm"),
+ form.cleaned_data.get("subject"),
+ form.cleaned_data.get("body"),
+ extra=extra,
+ )
+
+ messages.success(request, "The merge confirmation email was sent.")
+ return redirect("ietf.person.views.merge")
+
+ return render(request, "person/send_merge_request.html", {
+ "form": form,
+ })
diff --git a/ietf/secr/telechat/tests.py b/ietf/secr/telechat/tests.py
index fa26d33a5c..91ccde2187 100644
--- a/ietf/secr/telechat/tests.py
+++ b/ietf/secr/telechat/tests.py
@@ -256,7 +256,7 @@ def test_doc_detail_post_update_state_action_holder_automation(self):
self.assertEqual(response.status_code,302)
draft = Document.objects.get(name=draft.name)
self.assertEqual(draft.get_state('draft-iesg').slug,'defer')
- self.assertCountEqual(draft.action_holders.all(), [draft.ad] + draft.authors())
+ self.assertCountEqual(draft.action_holders.all(), [draft.ad] + draft.author_persons())
self.assertEqual(draft.docevent_set.filter(type='changed_action_holders').count(), 1)
# Removing need-rev should remove authors
@@ -273,7 +273,7 @@ def test_doc_detail_post_update_state_action_holder_automation(self):
# Setting to approved should remove all action holders
# noinspection DjangoOrm
- draft.action_holders.add(*(draft.authors())) # add() with through model ok in Django 2.2+
+ draft.action_holders.add(*(draft.author_persons())) # add() with through model ok in Django 2.2+
response = self.client.post(url,{
'submit': 'update_state',
'state': State.objects.get(type_id='draft-iesg', slug='approved').pk,
diff --git a/ietf/settings.py b/ietf/settings.py
index f8d8a28d65..3aa45a453c 100644
--- a/ietf/settings.py
+++ b/ietf/settings.py
@@ -1,4 +1,4 @@
-# Copyright The IETF Trust 2007-2025, All Rights Reserved
+# Copyright The IETF Trust 2007-2026, All Rights Reserved
# -*- coding: utf-8 -*-
@@ -13,6 +13,7 @@
import warnings
from hashlib import sha384
from typing import Any, Dict, List, Tuple # pyflakes:ignore
+from django.http import UnreadablePostError
# DeprecationWarnings are suppressed by default, enable them
warnings.simplefilter("always", DeprecationWarning)
@@ -22,6 +23,7 @@
warnings.filterwarnings("ignore", message="The django.utils.timezone.utc alias is deprecated.", module="oidc_provider")
warnings.filterwarnings("ignore", message="The django.utils.datetime_safe module is deprecated.", module="tastypie")
warnings.filterwarnings("ignore", message="The USE_DEPRECATED_PYTZ setting,") # https://github.com/ietf-tools/datatracker/issues/5635
+warnings.filterwarnings("ignore", message="The is_dst argument to make_aware\\(\\)") # caused by django-filters when USE_DEPRECATED_PYTZ is true
warnings.filterwarnings("ignore", message="The USE_L10N setting is deprecated.") # https://github.com/ietf-tools/datatracker/issues/5648
warnings.filterwarnings("ignore", message="django.contrib.auth.hashers.CryptPasswordHasher is deprecated.") # https://github.com/ietf-tools/datatracker/issues/5663
@@ -225,159 +227,124 @@
BLOBSTORAGE_CONNECT_TIMEOUT = 10 # seconds; boto3 default is 60
BLOBSTORAGE_READ_TIMEOUT = 10 # seconds; boto3 default is 60
+# Caching for agenda data in seconds
+AGENDA_CACHE_TIMEOUT_DEFAULT = 8 * 24 * 60 * 60 # 8 days
+AGENDA_CACHE_TIMEOUT_CURRENT_MEETING = 6 * 60 # 6 minutes
+
WSGI_APPLICATION = "ietf.wsgi.application"
AUTHENTICATION_BACKENDS = ( 'ietf.ietfauth.backends.CaseInsensitiveModelBackend', )
-FILE_UPLOAD_PERMISSIONS = 0o644
+FILE_UPLOAD_PERMISSIONS = 0o644
-# ------------------------------------------------------------------------
-# Django/Python Logging Framework Modifications
+FIRST_V3_RFC = 8650
-# Filter out "Invalid HTTP_HOST" emails
-# Based on http://www.tiwoc.de/blog/2013/03/django-prevent-email-notification-on-suspiciousoperation/
-from django.core.exceptions import SuspiciousOperation
-def skip_suspicious_operations(record):
- if record.exc_info:
- exc_value = record.exc_info[1]
- if isinstance(exc_value, SuspiciousOperation):
- return False
- return True
-# Filter out UreadablePostError:
-from django.http import UnreadablePostError
+#
+# Logging config
+#
+
+# Callback to filter out UnreadablePostError:
def skip_unreadable_post(record):
if record.exc_info:
- exc_type, exc_value = record.exc_info[:2] # pylint: disable=unused-variable
+ exc_type, exc_value = record.exc_info[:2] # pylint: disable=unused-variable
if isinstance(exc_value, UnreadablePostError):
return False
return True
-# Copied from DEFAULT_LOGGING as of Django 1.10.5 on 22 Feb 2017, and modified
-# to incorporate html logging, invalid http_host filtering, and more.
-# Changes from the default has comments.
-
-# The Python logging flow is as follows:
-# (see https://docs.python.org/2.7/howto/logging.html#logging-flow)
-#
-# Init: get a Logger: logger = logging.getLogger(name)
-#
-# Logging call, e.g. logger.error(level, msg, *args, exc_info=(...), extra={...})
-# --> Logger (discard if level too low for this logger)
-# (create log record from level, msg, args, exc_info, extra)
-# --> Filters (discard if any filter attach to logger rejects record)
-# --> Handlers (discard if level too low for handler)
-# --> Filters (discard if any filter attached to handler rejects record)
-# --> Formatter (format log record and emit)
-#
-
LOGGING = {
- 'version': 1,
- 'disable_existing_loggers': False,
- #
- 'loggers': {
- 'django': {
- 'handlers': ['console', 'mail_admins'],
- 'level': 'INFO',
- },
- 'django.request': {
- 'handlers': ['console'],
- 'level': 'ERROR',
+ "version": 1,
+ "disable_existing_loggers": False,
+ "loggers": {
+ "celery": {
+ "handlers": ["console"],
+ "level": "INFO",
},
- 'django.server': {
- 'handlers': ['django.server'],
- 'level': 'INFO',
+ "datatracker": {
+ "handlers": ["console"],
+ "level": "INFO",
},
- 'django.security': {
- 'handlers': ['console', ],
- 'level': 'INFO',
+ "django": {
+ "handlers": ["console", "mail_admins"],
+ "level": "INFO",
},
- 'oidc_provider': {
- 'handlers': ['console', ],
- 'level': 'DEBUG',
+ "django.request": {"level": "ERROR"}, # only log 5xx, ignore 4xx
+ "django.security": {
+ # SuspiciousOperation errors - log to console only
+ "handlers": ["console"],
+ "propagate": False, # no further handling please
},
- 'datatracker': {
- 'handlers': ['console'],
- 'level': 'INFO',
+ "django.server": {
+ # Only used by Django's runserver development server
+ "handlers": ["django.server"],
+ "level": "INFO",
},
- 'celery': {
- 'handlers': ['console'],
- 'level': 'INFO',
+ "oidc_provider": {
+ "handlers": ["console"],
+ "level": "DEBUG",
},
},
- #
- # No logger filters
- #
- 'handlers': {
- 'console': {
- 'level': 'DEBUG',
- 'class': 'logging.StreamHandler',
- 'formatter': 'plain',
+ "handlers": {
+ "console": {
+ "level": "DEBUG",
+ "class": "logging.StreamHandler",
+ "formatter": "plain",
},
- 'debug_console': {
- # Active only when DEBUG=True
- 'level': 'DEBUG',
- 'filters': ['require_debug_true'],
- 'class': 'logging.StreamHandler',
- 'formatter': 'plain',
+ "debug_console": {
+ "level": "DEBUG",
+ "filters": ["require_debug_true"],
+ "class": "logging.StreamHandler",
+ "formatter": "plain",
},
- 'django.server': {
- 'level': 'INFO',
- 'class': 'logging.StreamHandler',
- 'formatter': 'django.server',
+ "django.server": {
+ "level": "INFO",
+ "class": "logging.StreamHandler",
+ "formatter": "django.server",
},
- 'mail_admins': {
- 'level': 'ERROR',
- 'filters': [
- 'require_debug_false',
- 'skip_suspicious_operations', # custom
- 'skip_unreadable_posts', # custom
+ "mail_admins": {
+ "level": "ERROR",
+ "filters": [
+ "require_debug_false",
+ "skip_unreadable_posts",
],
- 'class': 'django.utils.log.AdminEmailHandler',
- 'include_html': True, # non-default
- }
+ "class": "django.utils.log.AdminEmailHandler",
+ "include_html": True,
+ },
},
- #
# All these are used by handlers
- 'filters': {
- 'require_debug_false': {
- '()': 'django.utils.log.RequireDebugFalse',
- },
- 'require_debug_true': {
- '()': 'django.utils.log.RequireDebugTrue',
+ "filters": {
+ "require_debug_false": {
+ "()": "django.utils.log.RequireDebugFalse",
},
- # custom filter, function defined above:
- 'skip_suspicious_operations': {
- '()': 'django.utils.log.CallbackFilter',
- 'callback': skip_suspicious_operations,
+ "require_debug_true": {
+ "()": "django.utils.log.RequireDebugTrue",
},
# custom filter, function defined above:
- 'skip_unreadable_posts': {
- '()': 'django.utils.log.CallbackFilter',
- 'callback': skip_unreadable_post,
+ "skip_unreadable_posts": {
+ "()": "django.utils.log.CallbackFilter",
+ "callback": skip_unreadable_post,
},
},
- # And finally the formatters
- 'formatters': {
- 'django.server': {
- '()': 'django.utils.log.ServerFormatter',
- 'format': '[%(server_time)s] %(message)s',
+ "formatters": {
+ "django.server": {
+ "()": "django.utils.log.ServerFormatter",
+ "format": "[%(server_time)s] %(message)s",
},
- 'plain': {
- 'style': '{',
- 'format': '{levelname}: {name}:{lineno}: {message}',
+ "plain": {
+ "style": "{",
+ "format": "{levelname}: {name}:{lineno}: {message}",
},
- 'json' : {
+ "json": {
"class": "ietf.utils.jsonlogger.DatatrackerJsonFormatter",
"style": "{",
- "format": "{asctime}{levelname}{message}{name}{pathname}{lineno}{funcName}{process}",
- }
+ "format": (
+ "{asctime}{levelname}{message}{name}{pathname}{lineno}{funcName}"
+ "{process}{status_code}"
+ ),
+ },
},
}
-# End logging
-# ------------------------------------------------------------------------
-
X_FRAME_OPTIONS = 'SAMEORIGIN'
CSRF_TRUSTED_ORIGINS = [
@@ -451,6 +418,7 @@ def skip_unreadable_post(record):
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
+ "ietf.middleware.is_authenticated_header_middleware",
"django.middleware.http.ConditionalGetMiddleware",
"simple_history.middleware.HistoryRequestMiddleware",
# comment in this to get logging of SQL insert and update statements:
@@ -461,7 +429,6 @@ def skip_unreadable_post(record):
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
"ietf.middleware.unicode_nfkc_normalization_middleware",
- "ietf.middleware.is_authenticated_header_middleware",
]
ROOT_URLCONF = 'ietf.urls'
@@ -500,6 +467,7 @@ def skip_unreadable_post(record):
'django_celery_results',
'corsheaders',
'django_markup',
+ 'django_filters',
'oidc_provider',
'drf_spectacular',
'drf_standardized_errors',
@@ -807,6 +775,7 @@ def skip_unreadable_post(record):
"polls",
"procmaterials",
"review",
+ "rfc",
"slides",
"staging",
"statchg",
@@ -831,6 +800,11 @@ def skip_unreadable_post(record):
"slides",
]
+# Other storages
+STORAGES["red_bucket"] = {
+ "BACKEND": "django.core.files.storage.InMemoryStorage",
+ "OPTIONS": {"location": "red_bucket"},
+}
# Override this in settings_local.py if needed
# *_PATH variables ends with a slash/ .
@@ -925,10 +899,11 @@ def skip_unreadable_post(record):
RFC_EDITOR_QUEUE_URL = "https://www.rfc-editor.org/queue2.xml"
RFC_EDITOR_INDEX_URL = "https://www.rfc-editor.org/rfc/rfc-index.xml"
RFC_EDITOR_ERRATA_JSON_URL = "https://www.rfc-editor.org/errata.json"
-RFC_EDITOR_ERRATA_URL = "https://www.rfc-editor.org/errata_search.php?rfc={rfc_number}"
RFC_EDITOR_INLINE_ERRATA_URL = "https://www.rfc-editor.org/rfc/inline-errata/rfc{rfc_number}.html"
+RFC_EDITOR_ERRATA_BASE_URL = "https://www.rfc-editor.org/errata/"
RFC_EDITOR_INFO_BASE_URL = "https://www.rfc-editor.org/info/"
+
# NomCom Tool settings
ROLODEX_URL = ""
NOMCOM_PUBLIC_KEYS_DIR = '/a/www/nomcom/public_keys/'
@@ -1361,6 +1336,11 @@ def skip_unreadable_post(record):
MEETECHO_AUDIO_STREAM_URL = "https://mp3.conf.meetecho.com/ietf{session.meeting.number}/{session.pk}.m3u"
MEETECHO_SESSION_RECORDING_URL = "https://meetecho-player.ietf.org/playout/?session={session_label}"
+# Errata system api configuration
+# settings should provide
+# ERRATA_METADATA_NOTIFICATION_URL
+# ERRATA_METADATA_NOTIFICATION_API_KEY
+
# Put the production SECRET_KEY in settings_local.py, and also any other
# sensitive or site-specific changes. DO NOT commit settings_local.py to svn.
from ietf.settings_local import * # pyflakes:ignore pylint: disable=wildcard-import
@@ -1395,6 +1375,16 @@ def skip_unreadable_post(record):
f"{key_prefix}:{version}:{sha384(str(key).encode('utf8')).hexdigest()}"
),
},
+ "agenda": {
+ "BACKEND": "ietf.utils.cache.LenientMemcacheCache",
+ "LOCATION": f"{MEMCACHED_HOST}:{MEMCACHED_PORT}",
+ # No release-specific VERSION setting.
+ "KEY_PREFIX": "ietf:dt:agenda",
+ # Key function is default except with sha384-encoded key
+ "KEY_FUNCTION": lambda key, key_prefix, version: (
+ f"{key_prefix}:{version}:{sha384(str(key).encode('utf8')).hexdigest()}"
+ ),
+ },
"proceedings": {
"BACKEND": "ietf.utils.cache.LenientMemcacheCache",
"LOCATION": f"{MEMCACHED_HOST}:{MEMCACHED_PORT}",
@@ -1448,6 +1438,17 @@ def skip_unreadable_post(record):
"VERSION": __version__,
"KEY_PREFIX": "ietf:dt",
},
+ "agenda": {
+ "BACKEND": "django.core.cache.backends.dummy.DummyCache",
+ # "BACKEND": "ietf.utils.cache.LenientMemcacheCache",
+ # "LOCATION": "127.0.0.1:11211",
+ # No release-specific VERSION setting.
+ "KEY_PREFIX": "ietf:dt:agenda",
+ # Key function is default except with sha384-encoded key
+ "KEY_FUNCTION": lambda key, key_prefix, version: (
+ f"{key_prefix}:{version}:{sha384(str(key).encode('utf8')).hexdigest()}"
+ ),
+ },
"proceedings": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache",
# "BACKEND": "ietf.utils.cache.LenientMemcacheCache",
@@ -1514,11 +1515,17 @@ def skip_unreadable_post(record):
NOMCOM_APP_SECRET = b'\x9b\xdas1\xec\xd5\xa0SI~\xcb\xd4\xf5t\x99\xc4i\xd7\x9f\x0b\xa9\xe8\xfeY\x80$\x1e\x12tN:\x84'
ALLOWED_HOSTS = ['*',]
-
+
try:
# see https://github.com/omarish/django-cprofile-middleware
- import django_cprofile_middleware # pyflakes:ignore
- MIDDLEWARE = MIDDLEWARE + ['django_cprofile_middleware.middleware.ProfilerMiddleware', ]
+ import django_cprofile_middleware # pyflakes:ignore
+
+ MIDDLEWARE = MIDDLEWARE + [
+ "django_cprofile_middleware.middleware.ProfilerMiddleware",
+ ]
+ DJANGO_CPROFILE_MIDDLEWARE_REQUIRE_STAFF = (
+ False # Do not use this setting for a public site!
+ )
except ImportError:
pass
@@ -1531,3 +1538,5 @@ def skip_unreadable_post(record):
YOUTUBE_DOMAINS = ['www.youtube.com', 'youtube.com', 'youtu.be', 'm.youtube.com', 'youtube-nocookie.com', 'www.youtube-nocookie.com']
+
+IETF_DOI_PREFIX = "10.17487"
diff --git a/ietf/settings_test.py b/ietf/settings_test.py
index 6479069db0..e7ebc13eb2 100755
--- a/ietf/settings_test.py
+++ b/ietf/settings_test.py
@@ -14,7 +14,7 @@
import shutil
import tempfile
from ietf.settings import * # pyflakes:ignore
-from ietf.settings import ORIG_AUTH_PASSWORD_VALIDATORS
+from ietf.settings import ORIG_AUTH_PASSWORD_VALIDATORS, STORAGES
import debug # pyflakes:ignore
debug.debug = True
@@ -114,3 +114,13 @@ def tempdir_with_cleanup(**kwargs):
AUTH_PASSWORD_VALIDATORS = ORIG_AUTH_PASSWORD_VALIDATORS
except NameError:
pass
+
+# Use InMemoryStorage for red bucket and r2-rfc storages
+STORAGES["red_bucket"] = {
+ "BACKEND": "django.core.files.storage.InMemoryStorage",
+ "OPTIONS": {"location": "red_bucket"},
+}
+STORAGES["r2-rfc"] = {
+ "BACKEND": "django.core.files.storage.InMemoryStorage",
+ "OPTIONS": {"location": "r2-rfc"},
+}
diff --git a/ietf/settings_testcrawl.py b/ietf/settings_testcrawl.py
index 40744a228d..edb978757a 100644
--- a/ietf/settings_testcrawl.py
+++ b/ietf/settings_testcrawl.py
@@ -27,6 +27,9 @@
'MAX_ENTRIES': 10000,
},
},
+ 'agenda': {
+ 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
+ },
'proceedings': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
diff --git a/ietf/static/css/datepicker.scss b/ietf/static/css/datepicker.scss
index 88f9e835fd..b193ccda3a 100644
--- a/ietf/static/css/datepicker.scss
+++ b/ietf/static/css/datepicker.scss
@@ -4,3 +4,29 @@
$dp-cell-focus-background-color: $dropdown-link-hover-bg !default;
@import "vanillajs-datepicker/sass/datepicker-bs5";
+
+[data-bs-theme="dark"] .datepicker-picker {
+ .datepicker-header,
+ .datepicker-controls .btn,
+ .datepicker-main,
+ .datepicker-footer {
+ background-color: $gray-800;
+ }
+
+ .datepicker-cell:hover {
+ background-color: $gray-700;
+ }
+
+ .datepicker-cell.day.focused {
+ background-color: $gray-600;
+ }
+
+ .datepicker-cell.day.selected.focused {
+ background-color: $blue;
+ }
+
+ .datepicker-controls .btn:hover {
+ background-color:$gray-700;
+ color: $gray-400;
+ }
+}
diff --git a/ietf/static/css/ietf.scss b/ietf/static/css/ietf.scss
index df973863d5..6695c57b13 100644
--- a/ietf/static/css/ietf.scss
+++ b/ietf/static/css/ietf.scss
@@ -1216,3 +1216,20 @@ iframe.status {
.overflow-shadows--bottom-only {
box-shadow: inset 0px -21px 18px -20px var(--bs-body-color);
}
+
+#navbar-doc-search-wrapper {
+ position: relative;
+}
+
+#navbar-doc-search-results {
+ max-height: 400px;
+ overflow-y: auto;
+ min-width: auto;
+ left: 0;
+ right: 0;
+
+ .dropdown-item {
+ white-space: normal;
+ overflow-wrap: break-word;
+ }
+}
diff --git a/ietf/static/js/attendees-chart.js b/ietf/static/js/attendees-chart.js
new file mode 100644
index 0000000000..fed3b1289c
--- /dev/null
+++ b/ietf/static/js/attendees-chart.js
@@ -0,0 +1,58 @@
+(function () {
+ var raw = document.getElementById('attendees-chart-data');
+ if (!raw) return;
+ var chartData = JSON.parse(raw.textContent);
+ var chart = null;
+ var currentBreakdown = 'type';
+
+ // Override the global transparent background set by highcharts.js so the
+ // export menu and fullscreen view use the page background color.
+ var container = document.getElementById('attendees-pie-chart');
+ var bodyBg = getComputedStyle(document.body).backgroundColor;
+ container.style.setProperty('--highcharts-background-color', bodyBg);
+
+ function renderChart(breakdown) {
+ var seriesData = chartData[breakdown].map(function (item) {
+ return { name: item[0], y: item[1] };
+ });
+ if (chart) chart.destroy();
+ chart = Highcharts.chart(container, {
+ chart: { type: 'pie', height: 400 },
+ title: { text: null },
+ tooltip: { pointFormat: '{point.name}: {point.y} ({point.percentage:.1f}%)' },
+ plotOptions: {
+ pie: {
+ dataLabels: {
+ enabled: true,
+ format: '{point.name}
{point.y} ({point.percentage:.1f}%)',
+ },
+ showInLegend: false,
+ }
+ },
+ series: [{ name: 'Attendees', data: seriesData }],
+ });
+ }
+
+ var modal = document.getElementById('attendees-chart-modal');
+
+ // Render (or re-render) the chart each time the modal becomes fully visible,
+ // so Highcharts can measure the container dimensions correctly.
+ modal.addEventListener('shown.bs.modal', function () {
+ renderChart(currentBreakdown);
+ });
+
+ // Release the chart when the modal closes to avoid stale renders.
+ modal.addEventListener('hidden.bs.modal', function () {
+ if (chart) {
+ chart.destroy();
+ chart = null;
+ }
+ });
+
+ document.querySelectorAll('[name="attendees-breakdown"]').forEach(function (radio) {
+ radio.addEventListener('change', function () {
+ currentBreakdown = this.value;
+ renderChart(currentBreakdown);
+ });
+ });
+})();
diff --git a/ietf/static/js/document_html.js b/ietf/static/js/document_html.js
index 6e8861739a..3e609f3965 100644
--- a/ietf/static/js/document_html.js
+++ b/ietf/static/js/document_html.js
@@ -117,4 +117,83 @@ document.addEventListener("DOMContentLoaded", function (event) {
}
});
}
+
+ // Rewrite these CSS properties so that the values are available for restyling.
+ document.querySelectorAll("svg [style]").forEach(el => {
+ // Push these CSS properties into their own attributes
+ const SVG_PRESENTATION_ATTRS = new Set([
+ 'alignment-baseline', 'baseline-shift', 'clip', 'clip-path', 'clip-rule',
+ 'color', 'color-interpolation', 'color-interpolation-filters',
+ 'color-rendering', 'cursor', 'direction', 'display', 'dominant-baseline',
+ 'fill', 'fill-opacity', 'fill-rule', 'filter', 'flood-color',
+ 'flood-opacity', 'font-family', 'font-size', 'font-size-adjust',
+ 'font-stretch', 'font-style', 'font-variant', 'font-weight',
+ 'image-rendering', 'letter-spacing', 'lighting-color', 'marker-end',
+ 'marker-mid', 'marker-start', 'mask', 'opacity', 'overflow', 'paint-order',
+ 'pointer-events', 'shape-rendering', 'stop-color', 'stop-opacity',
+ 'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
+ 'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity', 'stroke-width',
+ 'text-anchor', 'text-decoration', 'text-rendering', 'unicode-bidi',
+ 'vector-effect', 'visibility', 'word-spacing', 'writing-mode',
+ ]);
+
+ // Simple CSS splitter: respects quoted strings and parens so semicolons
+ // inside url(...) or "..." don't get treated as declaration boundaries.
+ function parseDeclarations(styleText) {
+ const decls = [];
+ let buf = '';
+ let inStr = false;
+ let strChar = '';
+ let escaped = false;
+ let depth = 0;
+
+ for (const ch of styleText) {
+ if (inStr) {
+ if (escaped) {
+ escaped = false;
+ } else if (ch === '\\') {
+ escaped = true;
+ } else if (ch === strChar) {
+ inStr = false;
+ }
+ } else if (ch === '"' || ch === "'") {
+ inStr = true;
+ strChar = ch;
+ } else if (ch === '(') {
+ depth++;
+ } else if (ch === ')') {
+ depth--;
+ } else if (ch === ';' && depth === 0) {
+ const trimmed = buf.trim();
+ if (trimmed) {
+ decls.push(trimmed);
+ }
+ buf = '';
+ continue;
+ }
+ buf += ch;
+ }
+ const trimmed = buf.trim();
+ if (trimmed) {
+ decls.push(trimmed);
+ }
+ return decls;
+ }
+
+ const remainder = [];
+ for (const decl of parseDeclarations(el.getAttribute('style'))) {
+ const [prop, val] = decl.split(":", 2).map(v => v.trim());
+ if (val && !/!important$/.test(val) && SVG_PRESENTATION_ATTRS.has(prop)) {
+ el.setAttribute(prop, val);
+ } else {
+ remainder.push(decl);
+ }
+ }
+
+ if (remainder.length > 0) {
+ el.setAttribute('style', remainder.join('; '));
+ } else {
+ el.removeAttribute('style');
+ }
+ });
});
diff --git a/ietf/static/js/navbar-doc-search.js b/ietf/static/js/navbar-doc-search.js
new file mode 100644
index 0000000000..c36c032310
--- /dev/null
+++ b/ietf/static/js/navbar-doc-search.js
@@ -0,0 +1,113 @@
+$(function () {
+ var $input = $('#navbar-doc-search');
+ var $results = $('#navbar-doc-search-results');
+ var ajaxUrl = $input.data('ajax-url');
+ var debounceTimer = null;
+ var highlightedIndex = -1;
+ var keyboardHighlight = false;
+ var currentItems = [];
+
+ function showDropdown() {
+ $results.addClass('show');
+ }
+
+ function hideDropdown() {
+ $results.removeClass('show');
+ highlightedIndex = -1;
+ keyboardHighlight = false;
+ updateHighlight();
+ }
+
+ function updateHighlight() {
+ $results.find('.dropdown-item').removeClass('active');
+ if (highlightedIndex >= 0 && highlightedIndex < currentItems.length) {
+ $results.find('.dropdown-item').eq(highlightedIndex).addClass('active');
+ }
+ }
+
+ function doSearch(query) {
+ if (query.length < 2) {
+ hideDropdown();
+ return;
+ }
+ $.ajax({
+ url: ajaxUrl,
+ dataType: 'json',
+ data: { q: query },
+ success: function (data) {
+ currentItems = data;
+ highlightedIndex = -1;
+ $results.empty();
+ if (data.length === 0) {
+ $results.append('No results found');
+ } else {
+ data.forEach(function (item) {
+ var $li = $('');
+ var $a = $('' + item.text + '');
+ $li.append($a);
+ $results.append($li);
+ });
+ }
+ showDropdown();
+ }
+ });
+ }
+
+ $input.on('input', function () {
+ clearTimeout(debounceTimer);
+ var query = $(this).val().trim();
+ debounceTimer = setTimeout(function () {
+ doSearch(query);
+ }, 250);
+ });
+
+ $input.on('keydown', function (e) {
+ if (e.key === 'ArrowDown') {
+ e.preventDefault();
+ if (highlightedIndex < currentItems.length - 1) {
+ highlightedIndex++;
+ keyboardHighlight = true;
+ updateHighlight();
+ }
+ } else if (e.key === 'ArrowUp') {
+ e.preventDefault();
+ if (highlightedIndex > 0) {
+ highlightedIndex--;
+ keyboardHighlight = true;
+ updateHighlight();
+ }
+ } else if (e.key === 'Enter') {
+ e.preventDefault();
+ if (keyboardHighlight && highlightedIndex >= 0 && highlightedIndex < currentItems.length) {
+ window.location.href = currentItems[highlightedIndex].url;
+ } else {
+ var query = $(this).val().trim();
+ if (query) {
+ window.location.href = '/doc/search/?name=' + encodeURIComponent(query) + '&rfcs=on&activedrafts=on&olddrafts=on';
+ }
+ }
+ } else if (e.key === 'Escape') {
+ hideDropdown();
+ $input.blur();
+ }
+ });
+
+ // Hover highlights (visual only — Enter still submits the text)
+ $results.on('mouseenter', '.dropdown-item', function () {
+ highlightedIndex = $results.find('.dropdown-item').index(this);
+ keyboardHighlight = false;
+ updateHighlight();
+ });
+
+ $results.on('mouseleave', '.dropdown-item', function () {
+ highlightedIndex = -1;
+ updateHighlight();
+ });
+
+ // Click outside closes dropdown
+ $(document).on('click', function (e) {
+ if (!$(e.target).closest('#navbar-doc-search-wrapper').length) {
+ hideDropdown();
+ }
+ });
+});
diff --git a/ietf/static/js/session_details.js b/ietf/static/js/session_details.js
new file mode 100644
index 0000000000..03d1b2d3d9
--- /dev/null
+++ b/ietf/static/js/session_details.js
@@ -0,0 +1,53 @@
+// Copyright The IETF Trust 2026, All Rights Reserved
+// Relies on other scripts being loaded, see usage in session_details.html
+document.addEventListener('DOMContentLoaded', () => {
+ // Init with best guess at local timezone.
+ ietf_timezone.set_tz_change_callback(timezone_changed) // cb is in upcoming.js
+ ietf_timezone.initialize('local')
+
+ // Set up sortable elements if the user can manage materials
+ if (document.getElementById('can-manage-materials-flag')) {
+ const sortables = []
+ const options = {
+ group: 'slides',
+ animation: 150,
+ handle: '.drag-handle',
+ onAdd: function (event) {onAdd(event)},
+ onRemove: function (event) {onRemove(event)},
+ onEnd: function (event) {onEnd(event)}
+ }
+
+ function onAdd (event) {
+ const old_session = event.from.getAttribute('data-session')
+ const new_session = event.to.getAttribute('data-session')
+ $.post(event.to.getAttribute('data-add-to-session'), {
+ 'order': event.newIndex + 1,
+ 'name': event.item.getAttribute('name')
+ })
+ $(event.item).find('td:eq(1)').find('a').each(function () {
+ $(this).attr('href', $(this).attr('href').replace(old_session, new_session))
+ })
+ }
+
+ function onRemove (event) {
+ const old_session = event.from.getAttribute('data-session')
+ $.post(event.from.getAttribute('data-remove-from-session'), {
+ 'oldIndex': event.oldIndex + 1,
+ 'name': event.item.getAttribute('name')
+ })
+ }
+
+ function onEnd (event) {
+ if (event.to == event.from) {
+ $.post(event.from.getAttribute('data-reorder-in-session'), {
+ 'oldIndex': event.oldIndex + 1,
+ 'newIndex': event.newIndex + 1
+ })
+ }
+ }
+
+ for (const elt of document.querySelectorAll('.slides tbody')) {
+ sortables.push(Sortable.create(elt, options))
+ }
+ }
+})
diff --git a/ietf/submit/tests.py b/ietf/submit/tests.py
index ede63d2752..ad361d31b2 100644
--- a/ietf/submit/tests.py
+++ b/ietf/submit/tests.py
@@ -1,4 +1,4 @@
-# Copyright The IETF Trust 2011-2023, All Rights Reserved
+# Copyright The IETF Trust 2011-2026, All Rights Reserved
# -*- coding: utf-8 -*-
@@ -207,20 +207,24 @@ def test_manualpost_view(self):
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
- self.assertIn(
- urlreverse(
- "ietf.submit.views.submission_status",
- kwargs=dict(submission_id=submission.pk)
- ),
- q("#manual.submissions td a").attr("href")
- )
- self.assertIn(
- submission.name,
- q("#manual.submissions td a").text()
+ # Validate that the basic submission status URL is on the manual post page
+ # _without_ an access token, even if logged in as various users.
+ expected_url = urlreverse(
+ "ietf.submit.views.submission_status",
+ kwargs=dict(submission_id=submission.pk)
)
+ selected_elts = q("#manual.submissions td a")
+ self.assertEqual(expected_url, selected_elts.attr("href"))
+ self.assertIn(submission.name, selected_elts.text())
+ for username in ["plain", "secretary"]:
+ self.client.login(username=username, password=username + "+password")
+ r = self.client.get(url)
+ self.assertEqual(r.status_code, 200)
+ q = PyQuery(r.content)
+ selected_elts = q("#manual.submissions td a")
+ self.assertEqual(expected_url, selected_elts.attr("href"))
+ self.assertIn(submission.name, selected_elts.text())
- def test_manualpost_cancel(self):
- pass
class SubmitTests(BaseSubmitTestCase):
def setUp(self):
@@ -595,7 +599,7 @@ def submit_existing(self, formats, change_authors=True, group_type='wg', stream_
TestBlobstoreManager().emptyTestBlobstores()
def _assert_authors_are_action_holders(draft, expect=True):
- for author in draft.authors():
+ for author in draft.author_persons():
if expect:
self.assertIn(author, draft.action_holders.all())
else:
@@ -2404,7 +2408,7 @@ def test_upload_draft(self):
response = r.json()
self.assertCountEqual(
response.keys(),
- ['id', 'name', 'rev', 'status_url'],
+ ['id', 'name', 'rev', 'status_url', 'submission_url'],
)
submission_id = int(response['id'])
self.assertEqual(response['name'], 'draft-somebody-test')
@@ -2416,6 +2420,13 @@ def test_upload_draft(self):
kwargs={'submission_id': submission_id},
),
)
+ self.assertEqual(
+ response['submission_url'],
+ 'https://datatracker.example.com' + urlreverse(
+ 'ietf.submit.views.submission_status',
+ kwargs={'submission_id': submission_id},
+ )
+ )
self.assertEqual(mock_task.delay.call_count, 1)
self.assertEqual(mock_task.delay.call_args.args, (submission_id,))
submission = Submission.objects.get(pk=submission_id)
diff --git a/ietf/submit/utils.py b/ietf/submit/utils.py
index a0c7dd8511..7e3106f723 100644
--- a/ietf/submit/utils.py
+++ b/ietf/submit/utils.py
@@ -395,10 +395,7 @@ def post_submission(request, submission, approved_doc_desc, approved_subm_desc):
log.log(f"{submission.name}: updated state and info")
- trouble = rebuild_reference_relations(draft, find_submission_filenames(draft))
- if trouble:
- log.log('Rebuild_reference_relations trouble: %s'%trouble)
- log.log(f"{submission.name}: rebuilt reference relations")
+ rebuild_reference_relations(draft, find_submission_filenames(draft))
if draft.stream_id == "ietf" and draft.group.type_id == "wg" and draft.rev == "00":
# automatically set state "WG Document"
@@ -1268,7 +1265,7 @@ def process_submission_text(filename, revision):
if title:
title = _normalize_title(title)
- # Translation taable drops \r, \n, <, >.
+ # Translation table drops \r, \n, <, >.
trans_table = str.maketrans("", "", "\r\n<>")
authors = [
{
diff --git a/ietf/submit/views.py b/ietf/submit/views.py
index 8329a312bb..2db3f51098 100644
--- a/ietf/submit/views.py
+++ b/ietf/submit/views.py
@@ -182,6 +182,10 @@ def err(code, error, messages=None):
settings.IDTRACKER_BASE_URL,
urlreverse(api_submission_status, kwargs={'submission_id': submission.pk}),
),
+ 'submission_url': urljoin(
+ settings.IDTRACKER_BASE_URL,
+ urlreverse("ietf.submit.views.submission_status", kwargs={'submission_id': submission.pk}),
+ ),
}
)
else:
diff --git a/ietf/sync/errata.py b/ietf/sync/errata.py
new file mode 100644
index 0000000000..113d987291
--- /dev/null
+++ b/ietf/sync/errata.py
@@ -0,0 +1,184 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+import datetime
+import json
+from collections import defaultdict
+from typing import DefaultDict
+
+from django.conf import settings
+from django.core.files.storage import storages
+from django.db import transaction
+from django.db.models import Q
+
+from ietf.doc.models import Document, DocEvent
+from ietf.name.models import DocTagName
+from ietf.person.models import Person
+from ietf.utils.log import log
+from ietf.utils.models import DirtyBits
+
+
+DEFAULT_ERRATA_JSON_BLOB_NAME = "other/errata.json"
+
+type ErrataJsonEntry = dict[str, str]
+
+def get_errata_last_updated() -> datetime.datetime:
+ """Get timestamp of the last errata.json update
+
+ May raise FileNotFoundError or other storage/S3 exceptions. Be prepared.
+ """
+ red_bucket = storages["red_bucket"]
+ return red_bucket.get_modified_time(
+ getattr(settings, "ERRATA_JSON_BLOB_NAME", DEFAULT_ERRATA_JSON_BLOB_NAME)
+ )
+
+
+def get_errata_data() -> list[ErrataJsonEntry]:
+ red_bucket = storages["red_bucket"]
+ with red_bucket.open(
+ getattr(settings, "ERRATA_JSON_BLOB_NAME", DEFAULT_ERRATA_JSON_BLOB_NAME), "r"
+ ) as f:
+ errata_data = json.load(f)
+ return errata_data
+
+
+def errata_map_from_json(errata_data: list[ErrataJsonEntry]):
+ """Create a dict mapping RFC number to a list of applicable errata records"""
+ errata = defaultdict(list)
+ for item in errata_data:
+ doc_id = item["doc-id"]
+ if doc_id.upper().startswith("RFC"):
+ rfc_number = int(doc_id[3:])
+ errata[rfc_number].append(item)
+ return dict(errata)
+
+
+def update_errata_tags(errata_data: list[ErrataJsonEntry]):
+ tag_has_errata = DocTagName.objects.get(slug="errata")
+ tag_has_verified_errata = DocTagName.objects.get(slug="verified-errata")
+ system = Person.objects.get(name="(System)")
+
+ errata_map = errata_map_from_json(errata_data)
+ nums_with_errata = [
+ num
+ for num, errata in errata_map.items()
+ if any(er["errata_status_code"] != "Rejected" for er in errata)
+ ]
+ nums_with_verified_errata = [
+ num
+ for num, errata in errata_map.items()
+ if any(er["errata_status_code"] == "Verified" for er in errata)
+ ]
+
+ rfcs_gaining_errata_tag = Document.objects.filter(
+ type_id="rfc", rfc_number__in=nums_with_errata
+ ).exclude(tags=tag_has_errata)
+
+ rfcs_gaining_verified_errata_tag = Document.objects.filter(
+ type_id="rfc", rfc_number__in=nums_with_verified_errata
+ ).exclude(tags=tag_has_verified_errata)
+
+ rfcs_losing_errata_tag = Document.objects.filter(
+ type_id="rfc", tags=tag_has_errata
+ ).exclude(rfc_number__in=nums_with_errata)
+
+ rfcs_losing_verified_errata_tag = Document.objects.filter(
+ type_id="rfc", tags=tag_has_verified_errata
+ ).exclude(rfc_number__in=nums_with_verified_errata)
+
+ # map rfc_number to add/remove lists
+ changes: DefaultDict[Document, dict[str, list[DocTagName]]] = defaultdict(
+ lambda: {"add": [], "remove": []}
+ )
+ for rfc in rfcs_gaining_errata_tag:
+ changes[rfc]["add"].append(tag_has_errata)
+ for rfc in rfcs_gaining_verified_errata_tag:
+ changes[rfc]["add"].append(tag_has_verified_errata)
+ for rfc in rfcs_losing_errata_tag:
+ changes[rfc]["remove"].append(tag_has_errata)
+ for rfc in rfcs_losing_verified_errata_tag:
+ changes[rfc]["remove"].append(tag_has_verified_errata)
+
+ for rfc, changeset in changes.items():
+ # Update in a transaction per RFC to keep tags and DocEvents consistent.
+ # With this in place, an interrupted task will be cleanly completed on the
+ # next run.
+ with transaction.atomic():
+ change_descs = []
+ for tag in changeset["add"]:
+ rfc.tags.add(tag)
+ change_descs.append(f"added {tag.slug} tag")
+ for tag in changeset["remove"]:
+ rfc.tags.remove(tag)
+ change_descs.append(f"removed {tag.slug} tag")
+ summary = "Update from RFC Editor: " + ", ".join(change_descs)
+ if rfc.rfc_number in errata_map and all(
+ er["errata_status_code"] == "Rejected"
+ for er in errata_map[rfc.rfc_number]
+ ):
+ summary += " (all errata rejected)"
+ DocEvent.objects.create(
+ doc=rfc,
+ rev=rfc.rev, # expect no rev
+ by=system,
+ type="sync_from_rfc_editor",
+ desc=summary,
+ )
+
+
+def update_errata_from_rfceditor():
+ errata_data = get_errata_data()
+ update_errata_tags(errata_data)
+
+
+## DirtyBits management for the errata tags
+
+ERRATA_SLUG = DirtyBits.Slugs.ERRATA
+
+
+def update_errata_dirty_time() -> DirtyBits | None:
+ try:
+ last_update = get_errata_last_updated()
+ except Exception as err:
+ log(f"Error in get_errata_last_updated: {err}")
+ return None
+ else:
+ dirty_work, created = DirtyBits.objects.update_or_create(
+ slug=ERRATA_SLUG, defaults={"dirty_time": last_update}
+ )
+ if created:
+ log(f"Created DirtyBits(slug='{ERRATA_SLUG}')")
+ return dirty_work
+
+
+def mark_errata_as_processed(when: datetime.datetime):
+ n_updated = DirtyBits.objects.filter(
+ Q(processed_time__isnull=True) | Q(processed_time__lt=when),
+ slug=ERRATA_SLUG,
+ ).update(processed_time=when)
+ if n_updated > 0:
+ log(f"processed_time is now {when.isoformat()}")
+ else:
+ log("processed_time not updated, no matching record found")
+
+
+def errata_are_dirty():
+ """Does the rfc index need to be updated?"""
+ dirty_work = update_errata_dirty_time() # creates DirtyBits if needed
+ if dirty_work is None:
+ # A None indicates we could not check the timestamp of errata.json. In that
+ # case, we are not likely to be able to read the blob either, so don't try
+ # to process it. An error was already logged.
+ return False
+ display_processed_time = (
+ dirty_work.processed_time.isoformat()
+ if dirty_work.processed_time is not None
+ else "never"
+ )
+ log(
+ f"DirtyBits(slug='{ERRATA_SLUG}'): "
+ f"dirty_time={dirty_work.dirty_time.isoformat()} "
+ f"processed_time={display_processed_time}"
+ )
+ return (
+ dirty_work.processed_time is None
+ or dirty_work.dirty_time >= dirty_work.processed_time
+ )
diff --git a/ietf/sync/rfceditor.py b/ietf/sync/rfceditor.py
index b3234a87e2..aa0e643b20 100644
--- a/ietf/sync/rfceditor.py
+++ b/ietf/sync/rfceditor.py
@@ -468,14 +468,18 @@ def update_docs_from_rfc_index(
doc.set_state(rfc_published_state)
if draft:
doc.formal_languages.set(draft.formal_languages.all())
- for author in draft.documentauthor_set.all():
+ # Create authors based on the last draft in the datatracker. This
+ # path will go away when we publish via the modernized RPC workflow
+ # but until then, these are the only data we have for authors that
+ # are easily connected to Person records.
+ for documentauthor in draft.documentauthor_set.all():
# Copy the author but point at the new doc.
# See https://docs.djangoproject.com/en/4.2/topics/db/queries/#copying-model-instances
- author.pk = None
- author.id = None
- author._state.adding = True
- author.document = doc
- author.save()
+ documentauthor.pk = None
+ documentauthor.id = None
+ documentauthor._state.adding = True
+ documentauthor.document = doc
+ documentauthor.save()
if draft:
draft_events = []
@@ -632,43 +636,70 @@ def update_docs_from_rfc_index(
)
rfc_published = True
- def parse_relation_list(l):
- res = []
- for x in l:
- for a in Document.objects.filter(name=x.lower(), type_id="rfc"):
- if a not in res:
- res.append(a)
- return res
+ def parse_relation_list(rel_list: list[str]) -> list[Document]:
+ return list(
+ Document.objects.filter(
+ name__in=[name.strip().lower() for name in rel_list],
+ type_id="rfc"
+ )
+ )
- for x in parse_relation_list(obsoletes):
- if not RelatedDocument.objects.filter(
- source=doc, target=x, relationship=relationship_obsoletes
+ # Create missing obsoletes relations
+ docs_this_obsoletes = parse_relation_list(obsoletes)
+ for obs_doc in docs_this_obsoletes:
+ if not doc.relateddocument_set.filter(
+ target=obs_doc, relationship=relationship_obsoletes
):
- r = RelatedDocument.objects.create(
- source=doc, target=x, relationship=relationship_obsoletes
+ r = doc.relateddocument_set.create(
+ target=obs_doc, relationship=relationship_obsoletes
)
rfc_changes.append(
- "created {rel_name} relation between {src_name} and {tgt_name}".format(
+ "created {rel_name} relation between {src} and {tgt}".format(
rel_name=r.relationship.name.lower(),
- src_name=prettify_std_name(r.source.name),
- tgt_name=prettify_std_name(r.target.name),
+ src=prettify_std_name(r.source.name),
+ tgt=prettify_std_name(r.target.name),
)
)
+ # Remove stale obsoletes relations
+ for r in doc.relateddocument_set.filter(
+ relationship=relationship_obsoletes
+ ).exclude(target_id__in=[d.pk for d in docs_this_obsoletes]):
+ r.delete()
+ rfc_changes.append(
+ "removed {rel_name} relation between {src} and {tgt}".format(
+ rel_name=r.relationship.name.lower(),
+ src=prettify_std_name(r.source.name),
+ tgt=prettify_std_name(r.target.name),
+ )
+ )
- for x in parse_relation_list(updates):
+ docs_this_updates = parse_relation_list(updates)
+ for upd_doc in docs_this_updates:
if not RelatedDocument.objects.filter(
- source=doc, target=x, relationship=relationship_updates
+ source=doc, target=upd_doc, relationship=relationship_updates
):
- r = RelatedDocument.objects.create(
- source=doc, target=x, relationship=relationship_updates
+ r = doc.relateddocument_set.create(
+ target=upd_doc, relationship=relationship_updates
)
rfc_changes.append(
- "created {rel_name} relation between {src_name} and {tgt_name}".format(
+ "created {rel_name} relation between {src} and {tgt}".format(
rel_name=r.relationship.name.lower(),
- src_name=prettify_std_name(r.source.name),
- tgt_name=prettify_std_name(r.target.name),
+ src=prettify_std_name(r.source.name),
+ tgt=prettify_std_name(r.target.name),
)
)
+ # Remove stale updates relations
+ for r in doc.relateddocument_set.filter(
+ relationship=relationship_updates
+ ).exclude(target_id__in=[d.pk for d in docs_this_updates]):
+ r.delete()
+ rfc_changes.append(
+ "removed {rel_name} relation between {src} and {tgt}".format(
+ rel_name=r.relationship.name.lower(),
+ src=prettify_std_name(r.source.name),
+ tgt=prettify_std_name(r.target.name),
+ )
+ )
if also:
# recondition also to have proper subseries document names:
diff --git a/ietf/sync/rfcindex.py b/ietf/sync/rfcindex.py
new file mode 100644
index 0000000000..d1a0ed432f
--- /dev/null
+++ b/ietf/sync/rfcindex.py
@@ -0,0 +1,791 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+import datetime
+import json
+from collections import defaultdict
+from collections.abc import Container
+from dataclasses import dataclass
+from itertools import chain
+from operator import attrgetter, itemgetter
+from pathlib import Path
+from textwrap import fill
+from urllib.parse import urljoin
+
+from django.conf import settings
+from django.core.files.base import ContentFile
+from django.db.models import Q
+from lxml import etree
+
+from django.core.files.storage import storages
+from django.db import models
+from django.db.models.functions import Substr, Cast
+from django.template.loader import render_to_string
+from django.utils import timezone
+
+from ietf.doc.models import Document
+from ietf.name.models import StdLevelName
+from ietf.utils.log import log
+from ietf.utils.models import DirtyBits
+
+FORMATS_FOR_INDEX = ["txt", "html", "pdf", "xml", "ps"]
+SS_TXT_MARGIN = 3
+SS_TXT_CUE_COL_WIDTH = 14
+
+
+def format_rfc_number(n):
+ """Format an RFC number (or subseries doc number)
+
+ Set settings.RFCINDEX_MATCH_LEGACY_XML=True for the legacy (leading-zero) format.
+ That is for debugging only - tests will fail.
+ """
+ if getattr(settings, "RFCINDEX_MATCH_LEGACY_XML", False):
+ return format(n, "04")
+ else:
+ return format(n)
+
+
+def errata_url(rfc: Document):
+ return urljoin(settings.RFC_EDITOR_ERRATA_BASE_URL + "/", f"rfc{rfc.rfc_number}")
+
+
+def save_to_red_bucket(filename: str, content: str | bytes):
+ red_bucket = storages["red_bucket"]
+ bucket_path = str(Path(getattr(settings, "RFCINDEX_OUTPUT_PATH", "")) / filename)
+ if getattr(settings, "RFCINDEX_DELETE_THEN_WRITE", True):
+ # Django 4.2's FileSystemStorage does not support allow_overwrite.
+ red_bucket.delete(bucket_path)
+ red_bucket.save(
+ bucket_path,
+ ContentFile(content if isinstance(content, bytes) else content.encode("utf-8")),
+ )
+ log(f"Saved {bucket_path} in red_bucket storage")
+
+
+@dataclass
+class UnusableRfcNumber:
+ rfc_number: int
+ comment: str
+
+
+def get_unusable_rfc_numbers() -> list[UnusableRfcNumber]:
+ FILENAME = "unusable-rfc-numbers.json"
+ bucket_path = str(Path(getattr(settings, "RFCINDEX_INPUT_PATH", "")) / FILENAME)
+ try:
+ with storages["red_bucket"].open(bucket_path) as urn_file:
+ records = json.load(urn_file)
+ except FileNotFoundError:
+ if settings.SERVER_MODE == "development":
+ log(
+ f"Unable to open {bucket_path} in red_bucket storage. This is okay in dev "
+ "but generated rfc-index will not agree with RFC Editor values."
+ ) # pragma: no cover
+ return [] # pragma: no cover
+ log(f"Error: unable to open {bucket_path} in red_bucket storage")
+ raise
+ except json.JSONDecodeError:
+ log(f"Error: unable to parse {bucket_path} in red_bucket storage")
+ if settings.SERVER_MODE == "development":
+ return [] # pragma: no cover
+ raise
+ assert all(isinstance(record["number"], int) for record in records)
+ assert all(isinstance(record["comment"], str) for record in records)
+ return [
+ UnusableRfcNumber(rfc_number=record["number"], comment=record["comment"])
+ for record in sorted(records, key=itemgetter("number"))
+ ]
+
+
+def get_april1_rfc_numbers() -> Container[int]:
+ FILENAME = "april-first-rfc-numbers.json"
+ bucket_path = str(Path(getattr(settings, "RFCINDEX_INPUT_PATH", "")) / FILENAME)
+ try:
+ with storages["red_bucket"].open(bucket_path) as urn_file:
+ records = json.load(urn_file)
+ except FileNotFoundError:
+ if settings.SERVER_MODE == "development":
+ log(
+ f"Unable to open {bucket_path} in red_bucket storage. This is okay in dev "
+ "but generated rfc-index will not agree with RFC Editor values."
+ ) # pragma: no cover
+ return [] # pragma: no cover
+ log(f"Error: unable to open {bucket_path} in red_bucket storage")
+ raise
+ except json.JSONDecodeError:
+ log(f"Error: unable to parse {bucket_path} in red_bucket storage")
+ if settings.SERVER_MODE == "development":
+ return [] # pragma: no cover
+ raise
+ assert all(isinstance(record, int) for record in records)
+ return records
+
+
+def get_publication_std_levels() -> dict[int, StdLevelName]:
+ FILENAME = "publication-std-levels.json"
+ bucket_path = str(Path(getattr(settings, "RFCINDEX_INPUT_PATH", "")) / FILENAME)
+ values: dict[int, StdLevelName] = {}
+ try:
+ with storages["red_bucket"].open(bucket_path) as urn_file:
+ records = json.load(urn_file)
+ except FileNotFoundError:
+ if settings.SERVER_MODE == "development":
+ log(
+ f"Unable to open {bucket_path} in red_bucket storage. This is okay in dev "
+ "but generated rfc-index will not agree with RFC Editor values."
+ ) # pragma: no cover
+ # intentionally fall through instead of return here
+ else:
+ log(f"Error: unable to open {bucket_path} in red_bucket storage")
+ raise
+ except json.JSONDecodeError:
+ log(f"Error: unable to parse {bucket_path} in red_bucket storage")
+ if settings.SERVER_MODE != "development":
+ raise
+ else:
+ assert all(isinstance(record["number"], int) for record in records)
+ values = {
+ record["number"]: StdLevelName.objects.get(
+ slug=record["publication_std_level"]
+ )
+ for record in records
+ }
+ # defaultdict to return "unknown" for any missing values
+ unknown_std_level = StdLevelName.objects.get(slug="unkn")
+ return defaultdict(lambda: unknown_std_level, values)
+
+
+def format_ordering(rfc_number):
+ if rfc_number < settings.FIRST_V3_RFC:
+ ordering = ["txt", "ps", "pdf", "html", "xml"]
+ else:
+ ordering = ["html", "txt", "ps", "pdf", "xml"]
+ return ordering.index # return the method
+
+
+def get_rfc_text_index_entries():
+ """Returns RFC entries for rfc-index.txt"""
+ entries = []
+ april1_rfc_numbers = get_april1_rfc_numbers()
+ published_rfcs = Document.objects.filter(type_id="rfc").order_by("rfc_number")
+ rfcs = sorted(
+ chain(published_rfcs, get_unusable_rfc_numbers()), key=attrgetter("rfc_number")
+ )
+ for rfc in rfcs:
+ if isinstance(rfc, UnusableRfcNumber):
+ entries.append(f"{format_rfc_number(rfc.rfc_number)} Not Issued.")
+ else:
+ assert isinstance(rfc, Document)
+ authors = ", ".join(
+ author.format_for_titlepage() for author in rfc.rfcauthor_set.all()
+ )
+ published_at = rfc.pub_date()
+ date = (
+ published_at.strftime("1 %B %Y")
+ if rfc.rfc_number in april1_rfc_numbers
+ else published_at.strftime("%B %Y")
+ )
+
+ # formats
+ formats = ", ".join(
+ sorted(
+ [
+ format["fmt"]
+ for format in rfc.formats()
+ if format["fmt"] in FORMATS_FOR_INDEX
+ ],
+ key=format_ordering(rfc.rfc_number),
+ )
+ ).upper()
+
+ # obsoletes
+ obsoletes = ""
+ obsoletes_documents = sorted(
+ rfc.related_that_doc("obs"),
+ key=attrgetter("rfc_number"),
+ )
+ if len(obsoletes_documents) > 0:
+ obsoletes_names = ", ".join(
+ f"RFC{format_rfc_number(doc.rfc_number)}"
+ for doc in obsoletes_documents
+ )
+ obsoletes = f" (Obsoletes {obsoletes_names})"
+
+ # obsoleted by
+ obsoleted_by = ""
+ obsoleted_by_documents = sorted(
+ rfc.related_that("obs"),
+ key=attrgetter("rfc_number"),
+ )
+ if len(obsoleted_by_documents) > 0:
+ obsoleted_by_names = ", ".join(
+ f"RFC{format_rfc_number(doc.rfc_number)}"
+ for doc in obsoleted_by_documents
+ )
+ obsoleted_by = f" (Obsoleted by {obsoleted_by_names})"
+
+ # updates
+ updates = ""
+ updates_documents = sorted(
+ rfc.related_that_doc("updates"),
+ key=attrgetter("rfc_number"),
+ )
+ if len(updates_documents) > 0:
+ updates_names = ", ".join(
+ f"RFC{format_rfc_number(doc.rfc_number)}"
+ for doc in updates_documents
+ )
+ updates = f" (Updates {updates_names})"
+
+ # updated by
+ updated_by = ""
+ updated_by_documents = sorted(
+ rfc.related_that("updates"),
+ key=attrgetter("rfc_number"),
+ )
+ if len(updated_by_documents) > 0:
+ updated_by_names = ", ".join(
+ f"RFC{format_rfc_number(doc.rfc_number)}"
+ for doc in updated_by_documents
+ )
+ updated_by = f" (Updated by {updated_by_names})"
+
+ doc_relations = f"{obsoletes}{obsoleted_by}{updates}{updated_by} "
+
+ # subseries
+ subseries = ",".join(
+ f"{container.type.slug}{format_rfc_number(int(container.name[3:]))}"
+ for container in rfc.part_of()
+ ).upper()
+ if subseries:
+ subseries = f"(Also {subseries}) "
+
+ entry = fill(
+ (
+ f"{format_rfc_number(rfc.rfc_number)} {rfc.title}. {authors}. {date}. "
+ f"(Format: {formats}){doc_relations}{subseries}"
+ f"(Status: {str(rfc.std_level).upper()}) "
+ f"(DOI: {rfc.doi})"
+ ),
+ width=73,
+ subsequent_indent=" " * 5,
+ )
+ entries.append(entry)
+
+ return entries
+
+
+def subseries_text_line(line, first=False):
+ """Return subseries text entry line"""
+ indent = " " * SS_TXT_CUE_COL_WIDTH
+ if first:
+ initial_indent = " " * SS_TXT_MARGIN
+ else:
+ initial_indent = indent
+ return fill(
+ line,
+ initial_indent=initial_indent,
+ subsequent_indent=indent,
+ width=80,
+ break_on_hyphens=False,
+ )
+
+
+def get_bcp_text_index_entries():
+ """Returns BCP entries for bcp-index.txt"""
+ entries = []
+
+ highest_bcp_number = (
+ Document.objects.filter(type_id="bcp")
+ .annotate(
+ number=Cast(
+ Substr("name", 4, None),
+ output_field=models.IntegerField(),
+ )
+ )
+ .order_by("-number")
+ .first()
+ .number
+ )
+
+ for bcp_number in range(1, highest_bcp_number + 1):
+ bcp_name = f"BCP{bcp_number}"
+ bcp = Document.objects.filter(type_id="bcp", name=f"{bcp_name.lower()}").first()
+
+ if bcp:
+ entry = subseries_text_line(
+ (
+ f"[{bcp_name}]"
+ f"{' ' * (SS_TXT_CUE_COL_WIDTH - len(bcp_name) - 2 - SS_TXT_MARGIN)}"
+ f"Best Current Practice {bcp_number},"
+ ),
+ first=True,
+ )
+ entry += "\n"
+ entry += subseries_text_line(
+ f"<{settings.RFC_EDITOR_INFO_BASE_URL}{bcp_name.lower()}>."
+ )
+ entry += "\n"
+ entry += subseries_text_line(
+ "At the time of writing, this BCP comprises the following:"
+ )
+ entry += "\n\n"
+ rfcs = sorted(bcp.contains(), key=lambda x: x.rfc_number)
+ for rfc in rfcs:
+ authors = ", ".join(
+ author.format_for_titlepage() for author in rfc.rfcauthor_set.all()
+ )
+ entry += subseries_text_line(
+ (
+ f'{authors}, "{rfc.title}", BCP¶{bcp_number}, RFC¶{rfc.rfc_number}, '
+ f"DOI¶{rfc.doi}, {rfc.pub_date().strftime('%B %Y')}, "
+ f"<{settings.RFC_EDITOR_INFO_BASE_URL}rfc{rfc.rfc_number}>."
+ )
+ ).replace("¶", " ")
+ entry += "\n\n"
+ else:
+ entry = subseries_text_line(
+ (
+ f"[{bcp_name}]"
+ f"{' ' * (SS_TXT_CUE_COL_WIDTH - len(bcp_name) - 2 - SS_TXT_MARGIN)}"
+ f"Best Current Practice {bcp_number} currently contains no RFCs"
+ ),
+ first=True,
+ )
+ entries.append(entry)
+ return entries
+
+
+def get_std_text_index_entries():
+ """Returns STD entries for std-index.txt"""
+ entries = []
+
+ highest_std_number = (
+ Document.objects.filter(type_id="std")
+ .annotate(
+ number=Cast(
+ Substr("name", 4, None),
+ output_field=models.IntegerField(),
+ )
+ )
+ .order_by("-number")
+ .first()
+ .number
+ )
+
+ for std_number in range(1, highest_std_number + 1):
+ std_name = f"STD{std_number}"
+ std = Document.objects.filter(type_id="std", name=f"{std_name.lower()}").first()
+
+ if std and std.contains():
+ entry = subseries_text_line(
+ (
+ f"[{std_name}]"
+ f"{' ' * (SS_TXT_CUE_COL_WIDTH - len(std_name) - 2 - SS_TXT_MARGIN)}"
+ f"Internet Standard {std_number},"
+ ),
+ first=True,
+ )
+ entry += "\n"
+ entry += subseries_text_line(
+ f"<{settings.RFC_EDITOR_INFO_BASE_URL}{std_name.lower()}>."
+ )
+ entry += "\n"
+ entry += subseries_text_line(
+ "At the time of writing, this STD comprises the following:"
+ )
+ entry += "\n\n"
+ rfcs = sorted(std.contains(), key=lambda x: x.rfc_number)
+ for rfc in rfcs:
+ authors = ", ".join(
+ author.format_for_titlepage() for author in rfc.rfcauthor_set.all()
+ )
+ entry += subseries_text_line(
+ (
+ f'{authors}, "{rfc.title}", STD¶{std_number}, RFC¶{rfc.rfc_number}, '
+ f"DOI¶{rfc.doi}, {rfc.pub_date().strftime('%B %Y')}, "
+ f"<{settings.RFC_EDITOR_INFO_BASE_URL}rfc{rfc.rfc_number}>."
+ )
+ ).replace("¶", " ")
+ entry += "\n\n"
+ else:
+ entry = subseries_text_line(
+ (
+ f"[{std_name}]"
+ f"{' ' * (SS_TXT_CUE_COL_WIDTH - len(std_name) - 2 - SS_TXT_MARGIN)}"
+ f"Internet Standard {std_number} currently contains no RFCs"
+ ),
+ first=True,
+ )
+ entries.append(entry)
+ return entries
+
+
+def get_fyi_text_index_entries():
+ """Returns FYI entries for fyi-index.txt"""
+ entries = []
+
+ highest_fyi_number = (
+ Document.objects.filter(type_id="fyi")
+ .annotate(
+ number=Cast(
+ Substr("name", 4, None),
+ output_field=models.IntegerField(),
+ )
+ )
+ .order_by("-number")
+ .first()
+ .number
+ )
+
+ for fyi_number in range(1, highest_fyi_number + 1):
+ fyi_name = f"FYI{fyi_number}"
+ fyi = Document.objects.filter(type_id="fyi", name=f"{fyi_name.lower()}").first()
+
+ if fyi and fyi.contains():
+ entry = subseries_text_line(
+ (
+ f"[{fyi_name}]"
+ f"{' ' * (SS_TXT_CUE_COL_WIDTH - len(fyi_name) - 2 - SS_TXT_MARGIN)}"
+ f"For Your Information {fyi_number},"
+ ),
+ first=True,
+ )
+ entry += "\n"
+ entry += subseries_text_line(
+ f"<{settings.RFC_EDITOR_INFO_BASE_URL}{fyi_name.lower()}>."
+ )
+ entry += "\n"
+ entry += subseries_text_line(
+ "At the time of writing, this FYI comprises the following:"
+ )
+ entry += "\n\n"
+ rfcs = sorted(fyi.contains(), key=lambda x: x.rfc_number)
+ for rfc in rfcs:
+ authors = ", ".join(
+ author.format_for_titlepage() for author in rfc.rfcauthor_set.all()
+ )
+ entry += subseries_text_line(
+ (
+ f'{authors}, "{rfc.title}", FYI¶{fyi_number}, RFC¶{rfc.rfc_number}, '
+ f"DOI¶{rfc.doi}, {rfc.pub_date().strftime('%B %Y')}, "
+ f"<{settings.RFC_EDITOR_INFO_BASE_URL}rfc{rfc.rfc_number}>."
+ )
+ ).replace("¶", " ")
+ entry += "\n\n"
+ else:
+ entry = subseries_text_line(
+ (
+ f"[{fyi_name}]"
+ f"{' ' * (SS_TXT_CUE_COL_WIDTH - len(fyi_name) - 2 - SS_TXT_MARGIN)}"
+ f"For Your Information {fyi_number} currently contains no RFCs"
+ ),
+ first=True,
+ )
+ entries.append(entry)
+ return entries
+
+
+def add_subseries_xml_index_entries(rfc_index, ss_type, include_all=False):
+ """Add subseries entries for rfc-index.xml"""
+ # subseries docs annotated with numeric number
+ ss_docs = list(
+ Document.objects.filter(type_id=ss_type)
+ .annotate(
+ number=Cast(
+ Substr("name", 4, None),
+ output_field=models.IntegerField(),
+ )
+ )
+ .order_by("-number")
+ )
+ if len(ss_docs) == 0:
+ return # very much not expected
+ highest_number = ss_docs[0].number
+ for ss_number in range(1, highest_number + 1):
+ if ss_docs[-1].number == ss_number:
+ this_ss_doc = ss_docs.pop()
+ contained_rfcs = this_ss_doc.contains()
+ else:
+ contained_rfcs = []
+ if len(contained_rfcs) == 0 and not include_all:
+ continue
+ entry = etree.SubElement(rfc_index, f"{ss_type}-entry")
+ etree.SubElement(
+ entry, "doc-id"
+ ).text = f"{ss_type.upper()}{format_rfc_number(ss_number)}"
+ if len(contained_rfcs) > 0:
+ is_also = etree.SubElement(entry, "is-also")
+ for rfc in sorted(contained_rfcs, key=attrgetter("rfc_number")):
+ etree.SubElement(
+ is_also, "doc-id"
+ ).text = f"RFC{format_rfc_number(rfc.rfc_number)}"
+
+
+def add_related_xml_index_entries(root: etree.Element, rfc: Document, tag: str):
+ relation_getter = {
+ "obsoletes": lambda doc: doc.related_that_doc("obs"),
+ "obsoleted-by": lambda doc: doc.related_that("obs"),
+ "updates": lambda doc: doc.related_that_doc("updates"),
+ "updated-by": lambda doc: doc.related_that("updates"),
+ }
+ related_docs = sorted(
+ relation_getter[tag](rfc),
+ key=attrgetter("rfc_number"),
+ )
+ if len(related_docs) > 0:
+ element = etree.SubElement(root, tag)
+ for doc in related_docs:
+ etree.SubElement(
+ element, "doc-id"
+ ).text = f"RFC{format_rfc_number(doc.rfc_number)}"
+
+
+def add_rfc_xml_index_entries(rfc_index):
+ """Add RFC entries for rfc-index.xml"""
+ entries = []
+ april1_rfc_numbers = get_april1_rfc_numbers()
+ publication_statuses = get_publication_std_levels()
+
+ published_rfcs = Document.objects.filter(type_id="rfc").order_by("rfc_number")
+
+ # Iterators for unpublished and published, both sorted by number
+ unpublished_iter = iter(get_unusable_rfc_numbers())
+ published_iter = iter(published_rfcs)
+
+ # Prime the next_* values
+ next_unpublished = next(unpublished_iter, None)
+ next_published = next(published_iter, None)
+
+ while next_published is not None or next_unpublished is not None:
+ if next_unpublished is not None and (
+ next_published is None
+ or next_unpublished.rfc_number < next_published.rfc_number
+ ):
+ entry = etree.SubElement(rfc_index, "rfc-not-issued-entry")
+ etree.SubElement(
+ entry, "doc-id"
+ ).text = f"RFC{format_rfc_number(next_unpublished.rfc_number)}"
+ entries.append(entry)
+ next_unpublished = next(unpublished_iter, None)
+ continue
+
+ rfc = next_published # hang on to this
+ next_published = next(published_iter, None) # prep for next iteration
+ entry = etree.SubElement(rfc_index, "rfc-entry")
+
+ etree.SubElement(
+ entry, "doc-id"
+ ).text = f"RFC{format_rfc_number(rfc.rfc_number)}"
+ etree.SubElement(entry, "title").text = rfc.title
+
+ for author in rfc.rfcauthor_set.all():
+ author_element = etree.SubElement(entry, "author")
+ etree.SubElement(author_element, "name").text = author.titlepage_name
+ if author.is_editor:
+ etree.SubElement(author_element, "title").text = "Editor"
+
+ date = etree.SubElement(entry, "date")
+ published_at = rfc.pub_date()
+ etree.SubElement(date, "month").text = published_at.strftime("%B")
+ if rfc.rfc_number in april1_rfc_numbers:
+ etree.SubElement(date, "day").text = str(published_at.day)
+ etree.SubElement(date, "year").text = str(published_at.year)
+
+ format_ = etree.SubElement(entry, "format")
+ fmts = [ff["fmt"] for ff in rfc.formats() if ff["fmt"] in FORMATS_FOR_INDEX]
+ for fmt in sorted(fmts, key=format_ordering(rfc.rfc_number)):
+ match_legacy = getattr(settings, "RFCINDEX_MATCH_LEGACY_XML", False)
+ etree.SubElement(format_, "file-format").text = (
+ "ASCII" if match_legacy and fmt == "txt" else fmt.upper()
+ )
+
+ etree.SubElement(entry, "page-count").text = str(rfc.pages)
+
+ if len(rfc.keywords) > 0:
+ keywords = etree.SubElement(entry, "keywords")
+ for keyword in rfc.keywords:
+ etree.SubElement(keywords, "kw").text = keyword.strip()
+
+ if rfc.abstract:
+ abstract = etree.SubElement(entry, "abstract")
+ for paragraph in rfc.abstract.split("\n\n"):
+ etree.SubElement(abstract, "p").text = paragraph.strip()
+
+ draft = rfc.came_from_draft()
+ if draft is not None:
+ etree.SubElement(entry, "draft").text = f"{draft.name}-{draft.rev}"
+
+ part_of_documents = rfc.part_of()
+ if len(part_of_documents) > 0:
+ is_also = etree.SubElement(entry, "is-also")
+ for doc in part_of_documents:
+ etree.SubElement(is_also, "doc-id").text = doc.name.upper()
+
+ add_related_xml_index_entries(entry, rfc, "obsoletes")
+ add_related_xml_index_entries(entry, rfc, "obsoleted-by")
+ add_related_xml_index_entries(entry, rfc, "updates")
+ add_related_xml_index_entries(entry, rfc, "updated-by")
+
+ etree.SubElement(entry, "current-status").text = rfc.std_level.name.upper()
+ etree.SubElement(entry, "publication-status").text = publication_statuses[
+ rfc.rfc_number
+ ].name.upper()
+ etree.SubElement(entry, "stream").text = (
+ "INDEPENDENT" if rfc.stream_id == "ise" else rfc.stream.name
+ )
+
+ # Add area / wg_acronym
+ if rfc.stream_id == "ietf":
+ if rfc.group.type_id in ["individ", "area"]:
+ etree.SubElement(entry, "wg_acronym").text = "NON WORKING GROUP"
+ else:
+ if rfc.area is not None:
+ etree.SubElement(entry, "area").text = rfc.area.acronym
+ if rfc.group:
+ etree.SubElement(entry, "wg_acronym").text = rfc.group.acronym
+
+ if rfc.tags.filter(slug="errata").exists():
+ etree.SubElement(entry, "errata-url").text = errata_url(rfc)
+ etree.SubElement(entry, "doi").text = rfc.doi
+ entries.append(entry)
+
+
+def create_rfc_txt_index():
+ """Create text index of published documents"""
+ DATE_FMT = "%m/%d/%Y"
+ created_on = timezone.now().strftime(DATE_FMT)
+ log("Creating rfc-index.txt")
+ index = render_to_string(
+ "sync/rfc-index.txt",
+ {
+ "created_on": created_on,
+ "rfcs": get_rfc_text_index_entries(),
+ },
+ )
+ save_to_red_bucket("rfc-index.txt", index)
+
+
+def create_rfc_xml_index():
+ """Create XML index of published documents"""
+ XSI_NAMESPACE = "http://www.w3.org/2001/XMLSchema-instance"
+ XSI = "{" + XSI_NAMESPACE + "}"
+
+ log("Creating rfc-index.xml")
+ rfc_index = etree.Element(
+ "rfc-index",
+ nsmap={
+ None: "https://www.rfc-editor.org/rfc-index",
+ "xsi": XSI_NAMESPACE,
+ },
+ attrib={
+ XSI + "schemaLocation": (
+ "https://www.rfc-editor.org/rfc-index "
+ "https://www.rfc-editor.org/rfc-index.xsd"
+ ),
+ },
+ )
+
+ # add data
+ add_subseries_xml_index_entries(rfc_index, "bcp", include_all=True)
+ add_subseries_xml_index_entries(rfc_index, "fyi")
+ add_rfc_xml_index_entries(rfc_index)
+ add_subseries_xml_index_entries(rfc_index, "std")
+
+ # make it pretty
+ pretty_index = etree.tostring(
+ rfc_index,
+ encoding="utf-8",
+ xml_declaration=True,
+ pretty_print=4,
+ )
+ save_to_red_bucket("rfc-index.xml", pretty_index)
+
+
+def create_bcp_txt_index():
+ """Create text index of BCPs"""
+ DATE_FMT = "%m/%d/%Y"
+ created_on = timezone.now().strftime(DATE_FMT)
+ log("Creating bcp-index.txt")
+ index = render_to_string(
+ "sync/bcp-index.txt",
+ {
+ "created_on": created_on,
+ "bcps": get_bcp_text_index_entries(),
+ },
+ )
+ save_to_red_bucket("bcp-index.txt", index)
+
+
+def create_std_txt_index():
+ """Create text index of STDs"""
+ DATE_FMT = "%m/%d/%Y"
+ created_on = timezone.now().strftime(DATE_FMT)
+ log("Creating std-index.txt")
+ index = render_to_string(
+ "sync/std-index.txt",
+ {
+ "created_on": created_on,
+ "stds": get_std_text_index_entries(),
+ },
+ )
+ save_to_red_bucket("std-index.txt", index)
+
+
+def create_fyi_txt_index():
+ """Create text index of FYIs"""
+ DATE_FMT = "%m/%d/%Y"
+ created_on = timezone.now().strftime(DATE_FMT)
+ log("Creating fyi-index.txt")
+ index = render_to_string(
+ "sync/fyi-index.txt",
+ {
+ "created_on": created_on,
+ "fyis": get_fyi_text_index_entries(),
+ },
+ )
+ save_to_red_bucket("fyi-index.txt", index)
+
+
+## DirtyBits management for the RFC index
+
+RFCINDEX_SLUG = DirtyBits.Slugs.RFCINDEX
+
+
+def mark_rfcindex_as_dirty():
+ _, created = DirtyBits.objects.update_or_create(
+ slug=RFCINDEX_SLUG, defaults={"dirty_time": timezone.now()}
+ )
+ if created:
+ log(f"Created DirtyBits(slug='{RFCINDEX_SLUG}')")
+
+
+def mark_rfcindex_as_processed(when: datetime.datetime):
+ n_updated = DirtyBits.objects.filter(
+ Q(processed_time__isnull=True) | Q(processed_time__lt=when),
+ slug=RFCINDEX_SLUG,
+ ).update(processed_time=when)
+ if n_updated > 0:
+ log(f"processed_time is now {when.isoformat()}")
+ else:
+ log("processed_time not updated, no matching record found")
+
+
+def rfcindex_is_dirty():
+ """Does the rfc index need to be updated?"""
+ dirty_work, created = DirtyBits.objects.get_or_create(
+ slug=RFCINDEX_SLUG, defaults={"dirty_time": timezone.now()}
+ )
+ if created:
+ log(f"Created DirtyBits(slug='{RFCINDEX_SLUG}')")
+ display_processed_time = (
+ dirty_work.processed_time.isoformat()
+ if dirty_work.processed_time is not None
+ else "never"
+ )
+ log(
+ f"DirtyBits(slug='{RFCINDEX_SLUG}'): "
+ f"dirty_time={dirty_work.dirty_time.isoformat()} "
+ f"processed_time={display_processed_time}"
+ )
+ return (
+ dirty_work.processed_time is None
+ or dirty_work.dirty_time >= dirty_work.processed_time
+ )
diff --git a/ietf/sync/tasks.py b/ietf/sync/tasks.py
index e4174d3729..34b2efeb5c 100644
--- a/ietf/sync/tasks.py
+++ b/ietf/sync/tasks.py
@@ -1,9 +1,11 @@
-# Copyright The IETF Trust 2024, All Rights Reserved
+# Copyright The IETF Trust 2024-2026, All Rights Reserved
#
# Celery task definitions
#
import datetime
import io
+from pathlib import Path
+from tempfile import NamedTemporaryFile
import requests
from celery import shared_task
@@ -12,9 +14,24 @@
from django.utils import timezone
from ietf.doc.models import DocEvent, RelatedDocument
+from ietf.doc.tasks import rebuild_reference_relations_task
from ietf.sync import iana
from ietf.sync import rfceditor
+from ietf.sync.errata import (
+ errata_are_dirty,
+ mark_errata_as_processed,
+ update_errata_from_rfceditor,
+)
from ietf.sync.rfceditor import MIN_QUEUE_RESULTS, parse_queue, update_drafts_from_queue
+from ietf.sync.rfcindex import (
+ create_bcp_txt_index,
+ create_fyi_txt_index,
+ create_rfc_txt_index,
+ create_rfc_xml_index,
+ create_std_txt_index,
+ rfcindex_is_dirty, mark_rfcindex_as_processed, mark_rfcindex_as_dirty,
+)
+from ietf.sync.utils import build_from_file_content, load_rfcs_into_blobdb, rsync_helper
from ietf.utils import log
from ietf.utils.timezone import date_today
@@ -22,13 +39,13 @@
@shared_task
def rfc_editor_index_update_task(full_index=False):
"""Update metadata from the RFC index
-
+
Default is to examine only changes in the past 365 days. Call with full_index=True to update
the full RFC index.
-
+
According to comments on the original script, a year's worth took about 20s on production as of
August 2022
-
+
The original rfc-editor-index-update script had a long-disabled provision for running the
rebuild_reference_relations scripts after the update. That has not been brought over
at all because it should be implemented as its own task if it is needed.
@@ -46,7 +63,7 @@ def rfc_editor_index_update_task(full_index=False):
timeout=30, # seconds
)
except requests.Timeout as exc:
- log.log(f'GET request timed out retrieving RFC editor index: {exc}')
+ log.log(f"GET request timed out retrieving RFC editor index: {exc}")
return # failed
rfc_index_xml = response.text
index_data = rfceditor.parse_index(io.StringIO(rfc_index_xml))
@@ -56,20 +73,25 @@ def rfc_editor_index_update_task(full_index=False):
timeout=30, # seconds
)
except requests.Timeout as exc:
- log.log(f'GET request timed out retrieving RFC editor errata: {exc}')
+ log.log(f"GET request timed out retrieving RFC editor errata: {exc}")
return # failed
- errata_data = response.json()
+ errata_data = response.json()
if len(index_data) < rfceditor.MIN_INDEX_RESULTS:
log.log("Not enough index entries, only %s" % len(index_data))
return # failed
if len(errata_data) < rfceditor.MIN_ERRATA_RESULTS:
log.log("Not enough errata entries, only %s" % len(errata_data))
return # failed
+ newly_published = set()
for rfc_number, changes, doc, rfc_published in rfceditor.update_docs_from_rfc_index(
index_data, errata_data, skip_older_than_date=skip_date
):
for c in changes:
log.log("RFC%s, %s: %s" % (rfc_number, doc.name, c))
+ if rfc_published:
+ newly_published.add(rfc_number)
+ if len(newly_published) > 0:
+ rsync_rfcs_from_rfceditor_task.delay(list(newly_published))
@shared_task
@@ -86,15 +108,15 @@ def rfc_editor_queue_updates_task():
drafts, warnings = parse_queue(io.StringIO(response.text))
for w in warnings:
log.log(f"Warning: {w}")
-
+
if len(drafts) < MIN_QUEUE_RESULTS:
log.log("Not enough results, only %s" % len(drafts))
return # failed
-
+
changed, warnings = update_drafts_from_queue(drafts)
for w in warnings:
log.log(f"Warning: {w}")
-
+
for c in changed:
log.log(f"Updated {c}")
@@ -110,9 +132,11 @@ def iana_changes_update_task():
MAX_INTERVAL_ACCEPTED_BY_IANA = datetime.timedelta(hours=23)
start = (
- timezone.now()
- - datetime.timedelta(hours=23)
- + datetime.timedelta(seconds=CLOCK_SKEW_COMPENSATION,)
+ timezone.now()
+ - datetime.timedelta(hours=23)
+ + datetime.timedelta(
+ seconds=CLOCK_SKEW_COMPENSATION,
+ )
)
end = start + datetime.timedelta(hours=23)
@@ -123,7 +147,9 @@ def iana_changes_update_task():
# requests if necessary
text = iana.fetch_changes_json(
- settings.IANA_SYNC_CHANGES_URL, t, min(end, t + MAX_INTERVAL_ACCEPTED_BY_IANA)
+ settings.IANA_SYNC_CHANGES_URL,
+ t,
+ min(end, t + MAX_INTERVAL_ACCEPTED_BY_IANA),
)
log.log(f"Retrieved the JSON: {text}")
@@ -149,9 +175,9 @@ def iana_protocols_update_task():
# "this needs to be the date where this tool is first deployed" in the original
# iana-protocols-updates script)"
rfc_must_published_later_than = datetime.datetime(
- 2012,
- 11,
- 26,
+ 2012,
+ 11,
+ 26,
tzinfo=datetime.UTC,
)
@@ -161,17 +187,17 @@ def iana_protocols_update_task():
timeout=30,
)
except requests.Timeout as exc:
- log.log(f'GET request timed out retrieving IANA protocols page: {exc}')
+ log.log(f"GET request timed out retrieving IANA protocols page: {exc}")
return
rfc_numbers = iana.parse_protocol_page(response.text)
def batched(l, n):
"""Split list l up in batches of max size n.
-
+
For Python 3.12 or later, replace this with itertools.batched()
"""
- return (l[i:i + n] for i in range(0, len(l), n))
+ return (l[i : i + n] for i in range(0, len(l), n))
for batch in batched(rfc_numbers, 100):
updated = iana.update_rfc_log_from_protocol_page(
@@ -182,6 +208,7 @@ def batched(l, n):
for d in updated:
log.log("Added history entry for %s" % d.display_name())
+
@shared_task
def fix_subseries_docevents_task():
"""Repairs DocEvents related to bugs around removing docs from subseries
@@ -222,3 +249,96 @@ def fix_subseries_docevents_task():
DocEvent.objects.filter(type="sync_from_rfc_editor", desc=desc).update(
time=obsoleting_time
)
+
+
+@shared_task
+def rsync_rfcs_from_rfceditor_task(rfc_numbers: list[int]):
+ log.log(f"Rsyncing rfcs from rfc-editor: {rfc_numbers}")
+ from_file = None
+ with NamedTemporaryFile(mode="w", delete_on_close=False) as fp:
+ fp.write(build_from_file_content(rfc_numbers))
+ fp.close()
+ from_file = Path(fp.name)
+ rsync_helper(
+ [
+ "-a",
+ "--ignore-existing",
+ f"--include-from={from_file}",
+ "--exclude=*",
+ "rsync.rfc-editor.org::rfcs/",
+ f"{settings.RFC_PATH}",
+ ]
+ )
+ load_rfcs_into_blobdb(rfc_numbers)
+
+ rebuild_reference_relations_task.delay([f"rfc{num}" for num in rfc_numbers])
+
+
+@shared_task
+def load_rfcs_into_blobdb_task(start: int, end: int):
+ """Move file content for rfcs from rfc{start} to rfc{end} inclusive
+
+ As this is expected to be removed once the blobdb is populated, it
+ will truncate its work to a coded max end.
+ This will not overwrite any existing blob content, and will only
+ log a small complaint if asked to load a non-exsiting RFC.
+ """
+ # Protect us from ourselves
+ if end < start:
+ return
+ if start < 1:
+ start = 1
+ if end > 11000: # Arbitrarily chosen
+ end = 11000
+ load_rfcs_into_blobdb(list(range(start, end + 1)))
+
+
+@shared_task
+def update_errata_from_rfceditor_task():
+ if errata_are_dirty():
+ # new_processed_time is the *start* of processing so that any changes after
+ # this point will trigger another refresh
+ new_processed_time = timezone.now()
+ update_errata_from_rfceditor()
+ mark_errata_as_processed(new_processed_time)
+ mark_rfcindex_as_dirty() # ensure any changes are reflected in the indexes
+
+
+@shared_task
+def refresh_rfc_index_task():
+ if rfcindex_is_dirty():
+ # new_processed_time is the *start* of processing so that any changes after
+ # this point will trigger another refresh
+ new_processed_time = timezone.now()
+
+ try:
+ create_rfc_txt_index()
+ except Exception as e:
+ log.log(f"Error: failure in creating rfc-index.txt. {e}")
+ pass
+
+ try:
+ create_rfc_xml_index()
+ except Exception as e:
+ log.log(f"Error: failure in creating rfc-index.xml. {e}")
+ pass
+
+ try:
+ create_bcp_txt_index()
+ except Exception as e:
+ log.log(f"Error: failure in creating bcp-index.txt. {e}")
+ pass
+
+ try:
+ create_std_txt_index()
+ except Exception as e:
+ log.log(f"Error: failure in creating std-index.txt. {e}")
+ pass
+
+ try:
+ create_fyi_txt_index()
+ except Exception as e:
+ log.log(f"Error: failure in creating fyi-index.txt. {e}")
+ pass
+
+ mark_rfcindex_as_processed(new_processed_time)
diff --git a/ietf/sync/tests.py b/ietf/sync/tests.py
index 3432f6214a..e83b6a5e0a 100644
--- a/ietf/sync/tests.py
+++ b/ietf/sync/tests.py
@@ -1,5 +1,4 @@
-# Copyright The IETF Trust 2012-2020, All Rights Reserved
-# -*- coding: utf-8 -*-
+# Copyright The IETF Trust 2012-2026, All Rights Reserved
import os
@@ -13,6 +12,8 @@
from dataclasses import dataclass
from django.conf import settings
+from django.core.files.base import ContentFile
+from django.core.files.storage import storages
from django.urls import reverse as urlreverse
from django.utils import timezone
from django.test.utils import override_settings
@@ -25,15 +26,34 @@
RfcFactory,
DocumentAuthorFactory,
DocEventFactory,
- BcpFactory,
+ BcpFactory, WgRfcFactory,
+)
+from ietf.doc.models import (
+ Document,
+ DocEvent,
+ DeletedEvent,
+ DocTagName,
+ RelatedDocument,
+ State,
+ StateDocEvent,
)
-from ietf.doc.models import Document, DocEvent, DeletedEvent, DocTagName, RelatedDocument, State, StateDocEvent
from ietf.doc.utils import add_state_change_event
from ietf.group.factories import GroupFactory
from ietf.person.factories import PersonFactory
from ietf.person.models import Person
from ietf.sync import iana, rfceditor, tasks
+from ietf.sync.errata import (
+ update_errata_from_rfceditor,
+ get_errata_last_updated,
+ get_errata_data,
+ errata_map_from_json,
+ update_errata_dirty_time,
+ mark_errata_as_processed,
+ update_errata_tags,
+)
+from ietf.sync.tasks import update_errata_from_rfceditor_task
from ietf.utils.mail import outbox, empty_outbox
+from ietf.utils.models import DirtyBits
from ietf.utils.test_utils import login_testing_unauthorized
from ietf.utils.test_utils import TestCase
from ietf.utils.timezone import date_today, RPC_TZINFO
@@ -301,6 +321,7 @@ def test_rfc_index(self):
ad=Person.objects.get(user__username='ad'),
external_url="http://my-external-url.example.com",
note="this is a note",
+ pages=54, # make sure this is not 42
)
DocumentAuthorFactory.create_batch(2, document=draft_doc)
draft_doc.action_holders.add(draft_doc.ad) # not normally set, but add to be sure it's cleared
@@ -446,7 +467,7 @@ def test_rfc_index(self):
rfc_doc = Document.objects.filter(rfc_number=1234, type_id="rfc").first()
self.assertIsNotNone(rfc_doc, "RFC document should have been created")
- self.assertEqual(rfc_doc.authors(), draft_doc.authors())
+ self.assertEqual(rfc_doc.author_persons_or_names(), draft_doc.author_persons_or_names())
rfc_events = rfc_doc.docevent_set.all()
self.assertEqual(len(rfc_events), 8)
expected_events = [
@@ -881,6 +902,191 @@ def test_rfceditor_undo(self):
self.assertTrue(StateDocEvent.objects.filter(desc="First", doc=draft))
+class ErrataTests(TestCase):
+ @override_settings(ERRATA_JSON_BLOB_NAME="myblob.json")
+ def test_get_errata_last_update(self):
+ red_bucket = storages["red_bucket"] # InMemoryStorage in test
+ red_bucket.save("myblob.json", ContentFile("file"))
+ self.assertEqual(
+ get_errata_last_updated(), red_bucket.get_modified_time("myblob.json")
+ )
+
+ @override_settings(ERRATA_JSON_BLOB_NAME="myblob.json")
+ def test_get_errata_data(self):
+ red_bucket = storages["red_bucket"] # InMemoryStorage in test
+ red_bucket.save("myblob.json", ContentFile('[{"value": 3}]'))
+ self.assertEqual(
+ get_errata_data(),
+ [{"value": 3}],
+ )
+
+ def test_errata_map_from_json(self):
+ input_data = [
+ {
+ "doc-id": "not-an-rfc",
+ "errata_status_code": "Verified",
+ },
+ {
+ "doc-id": "rfc01234",
+ "errata_status_code": "Reported",
+ },
+ {
+ "doc-id": "RFC1001",
+ "errata_status_code": "Verified"
+ },
+ {
+ "doc-id": "RfC1234",
+ "errata_status_code": "Verified",
+ },
+ ]
+ expected_output = {1001: [input_data[2]], 1234: [input_data[1], input_data[3]]}
+ self.assertDictEqual(errata_map_from_json(input_data), expected_output)
+
+ @mock.patch("ietf.sync.errata.update_errata_tags")
+ @mock.patch("ietf.sync.errata.get_errata_data")
+ def test_update_errata_from_rfceditor(self, mock_get_data, mock_update):
+ fake_data = object()
+ mock_get_data.return_value = fake_data
+ update_errata_from_rfceditor()
+ self.assertTrue(mock_get_data.called)
+ self.assertTrue(mock_update.called)
+ self.assertEqual(mock_update.call_args, mock.call(fake_data))
+
+ def test_update_errata_tags(self):
+ tag_has_errata = DocTagName.objects.get(slug="errata")
+ tag_has_verified_errata = DocTagName.objects.get(slug="verified-errata")
+
+ rfcs = WgRfcFactory.create_batch(10)
+ rfcs[0].tags.set([tag_has_errata])
+ rfcs[1].tags.set([tag_has_errata, tag_has_verified_errata])
+ rfcs[2].tags.set([tag_has_errata])
+ rfcs[3].tags.set([tag_has_errata, tag_has_verified_errata])
+ rfcs[4].tags.set([tag_has_errata])
+ rfcs[5].tags.set([tag_has_errata, tag_has_verified_errata])
+
+ # Only contains the fields we care about, not the full JSON
+ errata_data = [
+ # rfcs[0] had errata and should keep it
+ {"doc-id": rfcs[0].name, "errata_status_code": "Held for Document Update"},
+ {"doc-id": rfcs[0].name, "errata_status_code": "Rejected"},
+ # rfcs[1] had errata+verified-errata and should keep both
+ {"doc-id": rfcs[1].name, "errata_status_code": "Verified"},
+ # rfcs[2] had errata and should gain verified-errata
+ {"doc-id": rfcs[2].name, "errata_status_code": "Verified"},
+ # rfcs[3] had errata+verified errata and should lose both
+ {"doc-id": rfcs[3].name, "errata_status_code": "Rejected"},
+ # rfcs[4] had errata and should gain verified-errata
+ {"doc-id": rfcs[4].name, "errata_status_code": "Verified"},
+ {"doc-id": rfcs[4].name, "errata_status_code": "Reported"},
+ # rfcs[5] had errata+verified-errata and should lose verified-errata
+ {"doc-id": rfcs[5].name, "errata_status_code": "Reported"},
+ # rfcs[6] had none and should gain errata
+ {"doc-id": rfcs[6].name, "errata_status_code": "Reported"},
+ # rfcs[7] had none and should gain errata+verified-errata
+ {"doc-id": rfcs[7].name, "errata_status_code": "Verified"},
+ # rfcs[8] had none and it should stay that way
+ {"doc-id": rfcs[8].name, "errata_status_code": "Rejected"},
+ # rfcs[9] had none and it should stay that way (no entry at all)
+ ]
+ update_errata_tags(errata_data)
+
+ self.assertCountEqual(rfcs[0].tags.all(), [tag_has_errata])
+ self.assertIsNone(rfcs[0].docevent_set.first()) # no change
+
+ self.assertCountEqual(
+ rfcs[1].tags.all(), [tag_has_errata, tag_has_verified_errata]
+ )
+ self.assertIsNone(rfcs[1].docevent_set.first()) # no change
+
+ self.assertCountEqual(
+ rfcs[2].tags.all(), [tag_has_errata, tag_has_verified_errata]
+ )
+ self.assertEqual(rfcs[2].docevent_set.count(), 1)
+ self.assertIn(": added verified-errata tag", rfcs[2].docevent_set.first().desc)
+
+ self.assertCountEqual(rfcs[3].tags.all(), [])
+ self.assertEqual(rfcs[3].docevent_set.count(), 1)
+ self.assertIn(
+ ": removed errata tag, removed verified-errata tag (all errata rejected)",
+ rfcs[3].docevent_set.first().desc,
+ )
+
+ self.assertCountEqual(
+ rfcs[4].tags.all(), [tag_has_errata, tag_has_verified_errata]
+ )
+ self.assertEqual(rfcs[4].docevent_set.count(), 1)
+ self.assertIn(": added verified-errata tag", rfcs[4].docevent_set.first().desc)
+
+ self.assertCountEqual(rfcs[5].tags.all(), [tag_has_errata])
+ self.assertEqual(rfcs[5].docevent_set.count(), 1)
+ self.assertIn(
+ ": removed verified-errata tag", rfcs[5].docevent_set.first().desc
+ )
+
+ self.assertCountEqual(rfcs[6].tags.all(), [tag_has_errata])
+ self.assertEqual(rfcs[6].docevent_set.count(), 1)
+ self.assertIn(": added errata tag", rfcs[6].docevent_set.first().desc)
+
+ self.assertCountEqual(
+ rfcs[7].tags.all(), [tag_has_errata, tag_has_verified_errata]
+ )
+ self.assertEqual(rfcs[7].docevent_set.count(), 1)
+ self.assertIn(
+ ": added errata tag, added verified-errata tag",
+ rfcs[7].docevent_set.first().desc,
+ )
+
+ self.assertCountEqual(rfcs[8].tags.all(), [])
+ self.assertIsNone(rfcs[8].docevent_set.first()) # no change
+
+ self.assertCountEqual(rfcs[9].tags.all(), [])
+ self.assertIsNone(rfcs[9].docevent_set.first()) # no change
+
+ @override_settings(ERRATA_JSON_BLOB_NAME="myblob.json")
+ @mock.patch("ietf.sync.errata.get_errata_last_updated")
+ def test_update_errata_dirty_time(self, mock_last_updated):
+ ERRATA_SLUG = DirtyBits.Slugs.ERRATA
+
+ # No time available
+ mock_last_updated.side_effect = FileNotFoundError
+ self.assertIsNone(DirtyBits.objects.filter(slug=ERRATA_SLUG).first())
+ self.assertIsNone(update_errata_dirty_time()) # no blob yet
+ self.assertIsNone(DirtyBits.objects.filter(slug=ERRATA_SLUG).first())
+
+ # Now set a time
+ first_timestamp = timezone.now() - datetime.timedelta(hours=3)
+ mock_last_updated.return_value = first_timestamp
+ mock_last_updated.side_effect = None
+ result = update_errata_dirty_time()
+ self.assertTrue(isinstance(result, DirtyBits))
+ result.refresh_from_db()
+ self.assertEqual(result.slug, ERRATA_SLUG)
+ self.assertEqual(result.processed_time, None)
+ self.assertEqual(result.dirty_time, first_timestamp)
+
+ # Update the time
+ second_timestamp = timezone.now()
+ mock_last_updated.return_value = second_timestamp
+ second_result = update_errata_dirty_time()
+ self.assertEqual(result.pk, second_result.pk) # should be the same record
+ result.refresh_from_db()
+ self.assertEqual(result.slug, ERRATA_SLUG)
+ self.assertEqual(result.processed_time, None)
+ self.assertEqual(result.dirty_time, second_timestamp)
+
+ def test_mark_errata_as_processed(self):
+ ERRATA_SLUG = DirtyBits.Slugs.ERRATA
+ first_timestamp = timezone.now()
+ mark_errata_as_processed(first_timestamp) # no DirtyBits is not an error
+ self.assertIsNone(DirtyBits.objects.filter(slug=ERRATA_SLUG).first())
+ dbits = DirtyBits.objects.create(slug=ERRATA_SLUG, dirty_time=first_timestamp)
+ second_timestamp = timezone.now()
+ mark_errata_as_processed(second_timestamp)
+ dbits.refresh_from_db()
+ self.assertEqual(dbits.dirty_time, first_timestamp)
+ self.assertEqual(dbits.processed_time, second_timestamp)
+
+
class TaskTests(TestCase):
@override_settings(
RFC_EDITOR_INDEX_URL="https://rfc-editor.example.com/index/",
@@ -889,8 +1095,9 @@ class TaskTests(TestCase):
@mock.patch("ietf.sync.tasks.rfceditor.update_docs_from_rfc_index")
@mock.patch("ietf.sync.tasks.rfceditor.parse_index")
@mock.patch("ietf.sync.tasks.requests.get")
+ @mock.patch("ietf.sync.tasks.rsync_rfcs_from_rfceditor_task.delay")
def test_rfc_editor_index_update_task(
- self, requests_get_mock, parse_index_mock, update_docs_mock
+ self, rsync_task_mock, requests_get_mock, parse_index_mock, update_docs_mock
) -> None: # the annotation here prevents mypy from complaining about annotation-unchecked
"""rfc_editor_index_update_task calls helpers correctly
@@ -922,6 +1129,7 @@ def json(self):
rfc = RfcFactory()
# Test with full_index = False
+ rsync_task_mock.return_value = None
requests_get_mock.side_effect = (index_response, errata_response) # will step through these
parse_index_mock.return_value = MockIndexData(length=rfceditor.MIN_INDEX_RESULTS)
update_docs_mock.return_value = (
@@ -947,10 +1155,13 @@ def json(self):
)
self.assertIsNotNone(update_docs_kwargs["skip_older_than_date"])
+ self.assertFalse(rsync_task_mock.called)
+
# Test again with full_index = True
requests_get_mock.reset_mock()
parse_index_mock.reset_mock()
update_docs_mock.reset_mock()
+ rsync_task_mock.reset_mock()
requests_get_mock.side_effect = (index_response, errata_response) # will step through these
tasks.rfc_editor_index_update_task(full_index=True)
@@ -971,40 +1182,67 @@ def json(self):
)
self.assertIsNone(update_docs_kwargs["skip_older_than_date"])
+ self.assertFalse(rsync_task_mock.called)
+
+ # Test again where the index would cause a new RFC to come into existance
+ requests_get_mock.reset_mock()
+ parse_index_mock.reset_mock()
+ update_docs_mock.reset_mock()
+ rsync_task_mock.reset_mock()
+ requests_get_mock.side_effect = (
+ index_response,
+ errata_response,
+ ) # will step through these
+ update_docs_mock.return_value = (
+ (rfc.rfc_number, ("something changed",), rfc, True),
+ )
+ tasks.rfc_editor_index_update_task(full_index=True)
+ self.assertTrue(rsync_task_mock.called)
+ rsync_task_args, rsync_task_kwargs = rsync_task_mock.call_args
+ self.assertEqual((([rfc.rfc_number],), {}), (rsync_task_args, rsync_task_kwargs))
+
# Test error handling
requests_get_mock.reset_mock()
parse_index_mock.reset_mock()
update_docs_mock.reset_mock()
+ rsync_task_mock.reset_mock()
requests_get_mock.side_effect = requests.Timeout # timeout on every get()
tasks.rfc_editor_index_update_task(full_index=False)
self.assertFalse(parse_index_mock.called)
self.assertFalse(update_docs_mock.called)
+ self.assertFalse(rsync_task_mock.called)
requests_get_mock.reset_mock()
parse_index_mock.reset_mock()
update_docs_mock.reset_mock()
+ rsync_task_mock.reset_mock()
requests_get_mock.side_effect = [index_response, requests.Timeout] # timeout second get()
tasks.rfc_editor_index_update_task(full_index=False)
self.assertFalse(update_docs_mock.called)
+ self.assertFalse(rsync_task_mock.called)
requests_get_mock.reset_mock()
parse_index_mock.reset_mock()
update_docs_mock.reset_mock()
+ rsync_task_mock.reset_mock()
requests_get_mock.side_effect = [index_response, errata_response]
# feed in an index that is too short
parse_index_mock.return_value = MockIndexData(length=rfceditor.MIN_INDEX_RESULTS - 1)
tasks.rfc_editor_index_update_task(full_index=False)
self.assertTrue(parse_index_mock.called)
self.assertFalse(update_docs_mock.called)
+ self.assertFalse(rsync_task_mock.called)
requests_get_mock.reset_mock()
parse_index_mock.reset_mock()
update_docs_mock.reset_mock()
+ rsync_task_mock.reset_mock()
requests_get_mock.side_effect = [index_response, errata_response]
errata_response.json_length = rfceditor.MIN_ERRATA_RESULTS - 1 # too short
parse_index_mock.return_value = MockIndexData(length=rfceditor.MIN_INDEX_RESULTS)
tasks.rfc_editor_index_update_task(full_index=False)
self.assertFalse(update_docs_mock.called)
+ self.assertFalse(rsync_task_mock.called)
@override_settings(RFC_EDITOR_QUEUE_URL="https://rfc-editor.example.com/queue/")
@mock.patch("ietf.sync.tasks.update_drafts_from_queue")
@@ -1134,3 +1372,76 @@ def test_iana_protocols_update_task(
self.assertTrue(requests_get_mock.called)
self.assertFalse(parse_protocols_mock.called)
self.assertFalse(update_rfc_log_mock.called)
+
+ @mock.patch("ietf.sync.tasks.rsync_helper")
+ @mock.patch("ietf.sync.tasks.load_rfcs_into_blobdb")
+ @mock.patch("ietf.sync.tasks.rebuild_reference_relations_task.delay")
+ def test_rsync_rfcs_from_rfceditor_task(
+ self,
+ rebuild_relations_mock,
+ load_blobs_mock,
+ rsync_helper_mock,
+ ):
+ tasks.rsync_rfcs_from_rfceditor_task([12345, 54321])
+ self.assertTrue(rsync_helper_mock.called)
+ self.assertTrue(load_blobs_mock.called)
+ load_blobs_args, load_blobs_kwargs = load_blobs_mock.call_args
+ self.assertEqual(load_blobs_args, ([12345, 54321],))
+ self.assertEqual(load_blobs_kwargs, {})
+ self.assertTrue(rebuild_relations_mock.called)
+ rebuild_args, rebuild_kwargs = rebuild_relations_mock.call_args
+ self.assertEqual(rebuild_args, (["rfc12345", "rfc54321"],))
+ self.assertEqual(rebuild_kwargs, {})
+
+ @mock.patch("ietf.sync.tasks.load_rfcs_into_blobdb")
+ def test_load_rfcs_into_blobdb_task(
+ self,
+ load_blobs_mock,
+ ):
+ tasks.load_rfcs_into_blobdb_task(5, 3)
+ self.assertFalse(load_blobs_mock.called)
+ load_blobs_mock.reset_mock()
+ tasks.load_rfcs_into_blobdb_task(-1, 1)
+ self.assertTrue(load_blobs_mock.called)
+ mock_args, mock_kwargs = load_blobs_mock.call_args
+ self.assertEqual(mock_args, ([1],))
+ self.assertEqual(mock_kwargs, {})
+ load_blobs_mock.reset_mock()
+ tasks.load_rfcs_into_blobdb_task(10999, 50000)
+ self.assertTrue(load_blobs_mock.called)
+ mock_args, mock_kwargs = load_blobs_mock.call_args
+ self.assertEqual(mock_args, ([10999, 11000],))
+ self.assertEqual(mock_kwargs, {})
+ load_blobs_mock.reset_mock()
+ tasks.load_rfcs_into_blobdb_task(3261, 3263)
+ self.assertTrue(load_blobs_mock.called)
+ mock_args, mock_kwargs = load_blobs_mock.call_args
+ self.assertEqual(mock_args, ([3261, 3262, 3263],))
+ self.assertEqual(mock_kwargs, {})
+
+
+ @mock.patch("ietf.sync.tasks.update_errata_from_rfceditor")
+ @mock.patch("ietf.sync.tasks.mark_rfcindex_as_dirty")
+ @mock.patch("ietf.sync.tasks.mark_errata_as_processed")
+ @mock.patch("ietf.sync.tasks.errata_are_dirty")
+ def test_update_errata_from_rfceditor_task(
+ self,
+ mock_errata_are_dirty,
+ mock_mark_errata_processed,
+ mock_mark_rfcindex_dirty,
+ mock_update,
+ ):
+ mock_errata_are_dirty.return_value = False
+ update_errata_from_rfceditor_task()
+ self.assertTrue(mock_errata_are_dirty.called)
+ self.assertFalse(mock_mark_errata_processed.called)
+ self.assertFalse(mock_mark_rfcindex_dirty.called)
+ self.assertFalse(mock_update.called)
+
+ mock_errata_are_dirty.reset_mock()
+ mock_errata_are_dirty.return_value = True
+ update_errata_from_rfceditor_task()
+ self.assertTrue(mock_errata_are_dirty.called)
+ self.assertTrue(mock_mark_errata_processed.called)
+ self.assertTrue(mock_mark_rfcindex_dirty.called)
+ self.assertTrue(mock_update.called)
diff --git a/ietf/sync/tests_rfcindex.py b/ietf/sync/tests_rfcindex.py
new file mode 100644
index 0000000000..541ffbb228
--- /dev/null
+++ b/ietf/sync/tests_rfcindex.py
@@ -0,0 +1,413 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+import json
+from unittest import mock
+
+from django.core.files.base import ContentFile
+from django.core.files.storage import storages
+from django.test.utils import override_settings
+from lxml import etree
+
+from ietf.doc.factories import (
+ BcpFactory,
+ FyiFactory,
+ StdFactory,
+ IndividualRfcFactory,
+ PublishedRfcDocEventFactory,
+)
+from ietf.name.models import DocTagName
+from ietf.sync.rfcindex import (
+ create_bcp_txt_index,
+ create_fyi_txt_index,
+ create_rfc_txt_index,
+ create_rfc_xml_index,
+ create_std_txt_index,
+ format_rfc_number,
+ get_april1_rfc_numbers,
+ get_publication_std_levels,
+ get_unusable_rfc_numbers,
+ save_to_red_bucket,
+ subseries_text_line,
+)
+from ietf.utils.test_utils import TestCase
+
+
+class RfcIndexTests(TestCase):
+ """Tests of rfc-index generation
+
+ Tests are limited and should cover more cases. Needs:
+ * test of subseries docs
+ * test of related docs (obsoletes/updates + reverse directions)
+ * more thorough validation of index contents
+
+ Be careful when calling create_rfc_txt_index() or create_rfc_xml_index(). These
+ will save to a storage by default, which can introduce cross-talk between tests.
+ Best to patch that method with a mock.
+ """
+
+ def setUp(self):
+ super().setUp()
+ red_bucket = storages["red_bucket"]
+
+ # Create an unused RFC number
+ red_bucket.save(
+ "input/unusable-rfc-numbers.json",
+ ContentFile(json.dumps([{"number": 123, "comment": ""}])),
+ )
+
+ # actual April 1 RFC
+ self.april_fools_rfc = PublishedRfcDocEventFactory(
+ time="2020-04-01T12:00:00Z",
+ doc=IndividualRfcFactory(
+ name="rfc4560",
+ rfc_number=4560,
+ stream_id="ise",
+ std_level_id="inf",
+ ),
+ ).doc
+ # Set up a JSON file to flag the April 1 RFC
+ red_bucket.save(
+ "input/april-first-rfc-numbers.json",
+ ContentFile(json.dumps([self.april_fools_rfc.rfc_number])),
+ )
+
+ # non-April Fools RFC that happens to have been published on April 1
+ self.rfc = PublishedRfcDocEventFactory(
+ time="2021-04-01T12:00:00Z",
+ doc__name="rfc10000",
+ doc__rfc_number=10000,
+ doc__std_level_id="std",
+ ).doc
+ self.rfc.tags.add(DocTagName.objects.get(slug="errata"))
+
+ # Create a BCP with non-April Fools RFC
+ self.bcp = BcpFactory(contains=[self.rfc], name="bcp11")
+
+ # Create a STD with non-April Fools RFC
+ self.std = StdFactory(contains=[self.rfc], name="std11")
+
+ # Create a FYI with non-April Fools RFC
+ self.fyi = FyiFactory(contains=[self.rfc], name="fyi11")
+
+ # Set up a publication-std-levels.json file to indicate the publication
+ # standard of self.rfc as different from its current value
+ red_bucket.save(
+ "input/publication-std-levels.json",
+ ContentFile(
+ json.dumps(
+ [{"number": self.rfc.rfc_number, "publication_std_level": "ps"}]
+ )
+ ),
+ )
+
+ def tearDown(self):
+ red_bucket = storages["red_bucket"]
+ red_bucket.delete("input/unusable-rfc-numbers.json")
+ red_bucket.delete("input/april-first-rfc-numbers.json")
+ red_bucket.delete("input/publication-std-levels.json")
+ super().tearDown()
+
+ @override_settings(RFCINDEX_INPUT_PATH="input/")
+ @mock.patch("ietf.sync.rfcindex.save_to_red_bucket")
+ def test_create_rfc_txt_index(self, mock_save):
+ create_rfc_txt_index()
+ self.assertEqual(mock_save.call_count, 1)
+ self.assertEqual(mock_save.call_args[0][0], "rfc-index.txt")
+ contents = mock_save.call_args[0][1]
+ self.assertTrue(isinstance(contents, str))
+ self.assertIn(
+ "123 Not Issued.",
+ contents,
+ )
+ # No zero prefix!
+ self.assertNotIn(
+ "0123 Not Issued.",
+ contents,
+ )
+ self.assertIn(
+ f"{self.april_fools_rfc.rfc_number} {self.april_fools_rfc.title}",
+ contents,
+ )
+ self.assertIn("1 April 2020", contents) # from the April 1 RFC
+ self.assertIn(
+ f"{self.rfc.rfc_number} {self.rfc.title}",
+ contents,
+ )
+ self.assertIn("April 2021", contents) # from the non-April 1 RFC
+ self.assertNotIn("1 April 2021", contents)
+
+ @override_settings(RFCINDEX_INPUT_PATH="input/")
+ @mock.patch("ietf.sync.rfcindex.save_to_red_bucket")
+ def test_create_rfc_xml_index(self, mock_save):
+ create_rfc_xml_index()
+ self.assertEqual(mock_save.call_count, 1)
+ self.assertEqual(mock_save.call_args[0][0], "rfc-index.xml")
+ contents = mock_save.call_args[0][1]
+ self.assertTrue(isinstance(contents, bytes))
+ ns = "{https://www.rfc-editor.org/rfc-index}" # NOT an f-string
+ index = etree.fromstring(contents)
+
+ # We can aspire to validating the schema - currently does not conform because
+ # XSD expects 4-digit RFC numbers (etc).
+ #
+ # xmlschema = etree.XMLSchema(etree.fromstring(
+ # Path(__file__).with_name("rfc-index.xsd").read_bytes())
+ # )
+ # xmlschema.assertValid(index)
+
+ children = list(index) # elements as list
+ # Should be one rfc-not-issued-entry
+ self.assertEqual(len(children), 16)
+ self.assertEqual(
+ [
+ c.find(f"{ns}doc-id").text
+ for c in children
+ if c.tag == f"{ns}rfc-not-issued-entry"
+ ],
+ ["RFC123"],
+ )
+ # Should be two rfc-entries
+ rfc_entries = {
+ c.find(f"{ns}doc-id").text: c for c in children if c.tag == f"{ns}rfc-entry"
+ }
+
+ # Check the April Fool's entry
+ april_fools_entry = rfc_entries[self.april_fools_rfc.name.upper()]
+ self.assertEqual(
+ april_fools_entry.find(f"{ns}title").text,
+ self.april_fools_rfc.title,
+ )
+ self.assertEqual(
+ [(c.tag, c.text) for c in april_fools_entry.find(f"{ns}date")],
+ [(f"{ns}month", "April"), (f"{ns}day", "1"), (f"{ns}year", "2020")],
+ )
+ self.assertEqual(
+ april_fools_entry.find(f"{ns}current-status").text,
+ "INFORMATIONAL",
+ )
+ self.assertEqual(
+ april_fools_entry.find(f"{ns}publication-status").text,
+ "UNKNOWN",
+ )
+
+ # Check the Regular entry
+ rfc_entry = rfc_entries[self.rfc.name.upper()]
+ self.assertEqual(rfc_entry.find(f"{ns}title").text, self.rfc.title)
+ self.assertEqual(
+ rfc_entry.find(f"{ns}current-status").text, "INTERNET STANDARD"
+ )
+ self.assertEqual(
+ rfc_entry.find(f"{ns}publication-status").text, "PROPOSED STANDARD"
+ )
+ self.assertEqual(
+ [(c.tag, c.text) for c in rfc_entry.find(f"{ns}date")],
+ [(f"{ns}month", "April"), (f"{ns}year", "2021")],
+ )
+
+ @override_settings(RFCINDEX_INPUT_PATH="input/")
+ @mock.patch("ietf.sync.rfcindex.save_to_red_bucket")
+ def test_create_bcp_txt_index(self, mock_save):
+ create_bcp_txt_index()
+ self.assertEqual(mock_save.call_count, 1)
+ self.assertEqual(mock_save.call_args[0][0], "bcp-index.txt")
+ contents = mock_save.call_args[0][1]
+ self.assertTrue(isinstance(contents, str))
+ # starts from 1
+ self.assertIn(
+ "[BCP1]",
+ contents,
+ )
+ # fill up to 11
+ self.assertIn(
+ "[BCP10]",
+ contents,
+ )
+ # but not to 12
+ self.assertNotIn(
+ "[BCP12]",
+ contents,
+ )
+ # Test empty BCPs
+ self.assertIn(
+ "Best Current Practice 9 currently contains no RFCs",
+ contents,
+ )
+ # No zero prefix!
+ self.assertNotIn(
+ "[BCP0001]",
+ contents,
+ )
+ # Has BCP11 with a RFC
+ self.assertIn(
+ "Best Current Practice 11,",
+ contents,
+ )
+ self.assertIn(
+ f'"{self.rfc.title}"',
+ contents,
+ )
+ self.assertIn(
+ "BCP 11,",
+ contents,
+ )
+ self.assertIn(
+ f"RFC {self.rfc.rfc_number},",
+ contents,
+ )
+
+ @override_settings(RFCINDEX_INPUT_PATH="input/")
+ @mock.patch("ietf.sync.rfcindex.save_to_red_bucket")
+ def test_create_std_txt_index(self, mock_save):
+ create_std_txt_index()
+ self.assertEqual(mock_save.call_count, 1)
+ self.assertEqual(mock_save.call_args[0][0], "std-index.txt")
+ contents = mock_save.call_args[0][1]
+ self.assertTrue(isinstance(contents, str))
+ # starts from 1
+ self.assertIn(
+ "[STD1]",
+ contents,
+ )
+ # fill up to 11
+ self.assertIn(
+ "[STD10]",
+ contents,
+ )
+ # but not to 12
+ self.assertNotIn(
+ "[STD12]",
+ contents,
+ )
+ # Test empty STDs
+ self.assertIn(
+ "Internet Standard 9 currently contains no RFCs",
+ contents,
+ )
+ # No zero prefix!
+ self.assertNotIn(
+ "[STD0001]",
+ contents,
+ )
+ # Has STD11 with a RFC
+ self.assertIn(
+ "Internet Standard 11,",
+ contents,
+ )
+ self.assertIn(
+ f'"{self.rfc.title}"',
+ contents,
+ )
+ self.assertIn(
+ "STD 11,",
+ contents,
+ )
+ self.assertIn(
+ f"RFC {self.rfc.rfc_number},",
+ contents,
+ )
+
+ @override_settings(RFCINDEX_INPUT_PATH="input/")
+ @mock.patch("ietf.sync.rfcindex.save_to_red_bucket")
+ def test_create_fyi_txt_index(self, mock_save):
+ create_fyi_txt_index()
+ self.assertEqual(mock_save.call_count, 1)
+ self.assertEqual(mock_save.call_args[0][0], "fyi-index.txt")
+ contents = mock_save.call_args[0][1]
+ self.assertTrue(isinstance(contents, str))
+ # starts from 1
+ self.assertIn(
+ "[FYI1]",
+ contents,
+ )
+ # fill up to 11
+ self.assertIn(
+ "[FYI10]",
+ contents,
+ )
+ # but not to 12
+ self.assertNotIn(
+ "[FYI12]",
+ contents,
+ )
+ # Test empty FYIs
+ self.assertIn(
+ "For Your Information 9 currently contains no RFCs",
+ contents,
+ )
+ # No zero prefix!
+ self.assertNotIn(
+ "[FYI0001]",
+ contents,
+ )
+ # Has FYI11 with a RFC
+ self.assertIn(
+ "For Your Information 11,",
+ contents,
+ )
+ self.assertIn(
+ f'"{self.rfc.title}"',
+ contents,
+ )
+ self.assertIn(
+ "FYI 11,",
+ contents,
+ )
+ self.assertIn(
+ f"RFC {self.rfc.rfc_number},",
+ contents,
+ )
+
+
+class HelperTests(TestCase):
+ def test_format_rfc_number(self):
+ self.assertEqual(format_rfc_number(10), "10")
+ with override_settings(RFCINDEX_MATCH_LEGACY_XML=True):
+ self.assertEqual(format_rfc_number(10), "0010")
+
+ def test_save_to_red_bucket(self):
+ red_bucket = storages["red_bucket"]
+ with override_settings(RFCINDEX_DELETE_THEN_WRITE=False):
+ save_to_red_bucket("test", "contents \U0001f600")
+ # Read as binary and explicitly decode to confirm encoding
+ with red_bucket.open("test", "rb") as f:
+ self.assertEqual(f.read().decode("utf-8"), "contents \U0001f600")
+ with override_settings(RFCINDEX_DELETE_THEN_WRITE=True):
+ save_to_red_bucket("test", "new contents \U0001fae0".encode("utf-8"))
+ # Read as binary and explicitly decode to confirm encoding
+ with red_bucket.open("test", "rb") as f:
+ self.assertEqual(f.read().decode("utf-8"), "new contents \U0001fae0")
+ red_bucket.delete("test") # clean up like a good child
+
+ def test_get_unusable_rfc_numbers_raises(self):
+ """get_unusable_rfc_numbers should bail on errors"""
+ with self.assertRaises(FileNotFoundError):
+ get_unusable_rfc_numbers()
+ red_bucket = storages["red_bucket"]
+ red_bucket.save("unusable-rfc-numbers.json", ContentFile("not json"))
+ with self.assertRaises(json.JSONDecodeError):
+ get_unusable_rfc_numbers()
+ red_bucket.delete("unusable-rfc-numbers.json")
+
+ def test_get_april1_rfc_numbers_raises(self):
+ """get_april1_rfc_numbers should bail on errors"""
+ with self.assertRaises(FileNotFoundError):
+ get_april1_rfc_numbers()
+ red_bucket = storages["red_bucket"]
+ red_bucket.save("april-first-rfc-numbers.json", ContentFile("not json"))
+ with self.assertRaises(json.JSONDecodeError):
+ get_april1_rfc_numbers()
+ red_bucket.delete("april-first-rfc-numbers.json")
+
+ def test_get_publication_std_levels_raises(self):
+ """get_publication_std_levels should bail on errors"""
+ with self.assertRaises(FileNotFoundError):
+ get_publication_std_levels()
+ red_bucket = storages["red_bucket"]
+ red_bucket.save("publication-std-levels.json", ContentFile("not json"))
+ with self.assertRaises(json.JSONDecodeError):
+ get_publication_std_levels()
+ red_bucket.delete("publication-std-levels.json")
+
+ def test_subseries_text_line(self):
+ text = "foobar"
+ self.assertEqual(subseries_text_line(line=text, first=True), f" {text}")
+ self.assertEqual(subseries_text_line(line=text), f" {text}")
diff --git a/ietf/sync/tests_utils.py b/ietf/sync/tests_utils.py
new file mode 100644
index 0000000000..bb4a859e30
--- /dev/null
+++ b/ietf/sync/tests_utils.py
@@ -0,0 +1,84 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+
+from pathlib import Path
+from tempfile import TemporaryDirectory
+
+from django.test import override_settings
+from ietf import settings
+from ietf.doc.factories import RfcFactory
+from ietf.doc.storage_utils import exists_in_storage, retrieve_str
+from ietf.sync.utils import build_from_file_content, load_rfcs_into_blobdb, rsync_helper
+from ietf.utils.test_utils import TestCase
+
+
+class RsyncHelperTests(TestCase):
+ def test_rsync_helper(self):
+ with (
+ TemporaryDirectory() as source_dir,
+ TemporaryDirectory() as dest_dir,
+ ):
+ with (Path(source_dir) / "canary.txt").open("w") as canary_source_file:
+ canary_source_file.write("chirp")
+ rsync_helper(
+ [
+ "-a",
+ f"{source_dir}/",
+ f"{dest_dir}/",
+ ]
+ )
+ with (Path(dest_dir) / "canary.txt").open("r") as canary_dest_file:
+ chirp = canary_dest_file.read()
+ self.assertEqual(chirp, "chirp")
+
+ def test_build_from_file_content(self):
+ content = build_from_file_content([12345, 54321])
+ self.assertEqual(
+ content,
+ """prerelease/
+rfc12345.txt
+rfc12345.html
+rfc12345.xml
+rfc12345.pdf
+rfc12345.ps
+rfc12345.json
+prerelease/rfc12345.notprepped.xml
+rfc54321.txt
+rfc54321.html
+rfc54321.xml
+rfc54321.pdf
+rfc54321.ps
+rfc54321.json
+prerelease/rfc54321.notprepped.xml
+""",
+ )
+
+
+class RfcBlobUploadTests(TestCase):
+ def test_load_rfcs_into_blobdb(self):
+ with TemporaryDirectory() as faux_rfc_path:
+ with override_settings(RFC_PATH=faux_rfc_path):
+ rfc_path = Path(faux_rfc_path)
+ (rfc_path / "prerelease").mkdir()
+ for num in [12345, 54321]:
+ RfcFactory(rfc_number=num)
+ for ext in settings.RFC_FILE_TYPES + ("json",):
+ with (rfc_path / f"rfc{num}.{ext}").open("w") as f:
+ f.write(ext)
+ with (rfc_path / "rfc{num}.bogon").open("w") as f:
+ f.write("bogon")
+ with (rfc_path / "prerelease" / f"rfc{num}.notprepped.xml").open(
+ "w"
+ ) as f:
+ f.write("notprepped")
+ load_rfcs_into_blobdb([12345, 54321])
+ for num in [12345, 54321]:
+ for ext in settings.RFC_FILE_TYPES + ("json",):
+ self.assertEqual(
+ retrieve_str("rfc", f"{ext}/rfc{num}.{ext}"),
+ ext,
+ )
+ self.assertFalse(exists_in_storage("rfc", f"bogon/rfc{num}.bogon"))
+ self.assertEqual(
+ retrieve_str("rfc", f"notprepped/rfc{num}.notprepped.xml"),
+ "notprepped",
+ )
diff --git a/ietf/sync/utils.py b/ietf/sync/utils.py
new file mode 100644
index 0000000000..b3bdd8d206
--- /dev/null
+++ b/ietf/sync/utils.py
@@ -0,0 +1,77 @@
+# Copyright The IETF Trust 2026, All Rights Reserved
+
+import datetime
+import subprocess
+
+from pathlib import Path
+
+from django.conf import settings
+from ietf.utils import log
+from ietf.doc.models import Document
+from ietf.doc.storage_utils import AlreadyExistsError, store_bytes
+
+
+def rsync_helper(subprocess_arg_array: list[str]):
+ subprocess.run(["/usr/bin/rsync"]+subprocess_arg_array)
+
+def build_from_file_content(rfc_numbers: list[int]) -> str:
+ types_to_sync = settings.RFC_FILE_TYPES + ("json",)
+ lines = []
+ lines.append("prerelease/")
+ for num in rfc_numbers:
+ for ext in types_to_sync:
+ lines.append(f"rfc{num}.{ext}")
+ lines.append(f"prerelease/rfc{num}.notprepped.xml")
+ return "\n".join(lines)+"\n"
+
+def load_rfcs_into_blobdb(numbers: list[int]):
+ types_to_load = settings.RFC_FILE_TYPES + ("json",)
+ rfc_docs = Document.objects.filter(type="rfc", rfc_number__in=numbers).values_list("rfc_number", flat=True)
+ for num in numbers:
+ if num in rfc_docs:
+ for ext in types_to_load:
+ fs_path = Path(settings.RFC_PATH) / f"rfc{num}.{ext}"
+ if fs_path.is_file():
+ with fs_path.open("rb") as f:
+ bytes = f.read()
+ mtime = fs_path.stat().st_mtime
+ try:
+ store_bytes(
+ kind="rfc",
+ name=f"{ext}/rfc{num}.{ext}",
+ content=bytes,
+ allow_overwrite=False, # Intentionally not allowing overwrite.
+ doc_name=f"rfc{num}",
+ doc_rev=None,
+ # Not setting content_type
+ mtime=datetime.datetime.fromtimestamp(
+ mtime, tz=datetime.UTC
+ ),
+ )
+ except AlreadyExistsError as e:
+ log.log(str(e))
+
+ # store the not-prepped xml
+ name = f"rfc{num}.notprepped.xml"
+ source = Path(settings.RFC_PATH) / "prerelease" / name
+ if source.is_file():
+ with open(source, "rb") as f:
+ bytes = f.read()
+ mtime = source.stat().st_mtime
+ try:
+ store_bytes(
+ kind="rfc",
+ name=f"notprepped/{name}",
+ content=bytes,
+ allow_overwrite=False, # Intentionally not allowing overwrite.
+ doc_name=f"rfc{num}",
+ doc_rev=None,
+ # Not setting content_type
+ mtime=datetime.datetime.fromtimestamp(mtime, tz=datetime.UTC),
+ )
+ except AlreadyExistsError as e:
+ log.log(str(e))
+ else:
+ log.log(
+ f"Skipping loading rfc{num} into blobdb as no matching Document exists"
+ )
diff --git a/ietf/templates/base.html b/ietf/templates/base.html
index 25ce50c467..b0df04f30a 100644
--- a/ietf/templates/base.html
+++ b/ietf/templates/base.html
@@ -67,13 +67,17 @@
{% endif %}
-
+
+
+
+