diff --git a/client/agenda/AgendaDetailsModal.vue b/client/agenda/AgendaDetailsModal.vue index 2582bf2159..69c8ef8b53 100644 --- a/client/agenda/AgendaDetailsModal.vue +++ b/client/agenda/AgendaDetailsModal.vue @@ -274,6 +274,7 @@ async function fetchSessionMaterials () { diff --git a/dev/build/Dockerfile b/dev/build/Dockerfile index ae59ba1440..41ff295eec 100644 --- a/dev/build/Dockerfile +++ b/dev/build/Dockerfile @@ -1,4 +1,4 @@ -FROM ghcr.io/ietf-tools/datatracker-app-base:20251201T1548 +FROM ghcr.io/ietf-tools/datatracker-app-base:20260114T1756 LABEL maintainer="IETF Tools Team " ENV DEBIAN_FRONTEND=noninteractive diff --git a/dev/build/TARGET_BASE b/dev/build/TARGET_BASE index 726f080c67..3ad31c7e25 100644 --- a/dev/build/TARGET_BASE +++ b/dev/build/TARGET_BASE @@ -1 +1 @@ -20251201T1548 +20260114T1756 diff --git a/dev/build/migration-start.sh b/dev/build/migration-start.sh index 901026e53b..578daf5cef 100644 --- a/dev/build/migration-start.sh +++ b/dev/build/migration-start.sh @@ -3,7 +3,11 @@ echo "Running Datatracker migrations..." ./ietf/manage.py migrate --settings=settings_local -echo "Running Blobdb migrations ..." -./ietf/manage.py migrate --settings=settings_local --database=blobdb +# Check whether the blobdb database exists - inspectdb will return a false +# status if not. +if ./ietf/manage.py inspectdb --database blobdb > /dev/null 2>&1; then + echo "Running Blobdb migrations ..." + ./ietf/manage.py migrate --settings=settings_local --database=blobdb +fi echo "Done!" diff --git a/dev/deploy-to-container/settings_local.py b/dev/deploy-to-container/settings_local.py index aacf000093..055b48d0f5 100644 --- a/dev/deploy-to-container/settings_local.py +++ b/dev/deploy-to-container/settings_local.py @@ -71,11 +71,11 @@ DE_GFM_BINARY = '/usr/local/bin/de-gfm' -# No real secrets here, these are public testing values _only_ APP_API_TOKENS = { - "ietf.api.views.ingest_email_test": ["ingestion-test-token"] + "ietf.api.red_api" : ["devtoken", "redtoken"], # Not a real secret + "ietf.api.views.ingest_email_test": ["ingestion-test-token"], # Not a real secret + "ietf.api.views_rpc" : ["devtoken"], # Not a real secret } - # OIDC configuration SITE_URL = 'https://__HOSTNAME__' diff --git a/docker/configs/settings_local.py b/docker/configs/settings_local.py index 3ee7a4295d..e357ce3f73 100644 --- a/docker/configs/settings_local.py +++ b/docker/configs/settings_local.py @@ -100,3 +100,8 @@ bucket_name=f"{storagename}", ), } + +APP_API_TOKENS = { + "ietf.api.red_api" : ["devtoken", "redtoken"], # Not a real secret + "ietf.api.views_rpc" : ["devtoken"], # Not a real secret +} diff --git a/ietf/api/routers.py b/ietf/api/routers.py index 745ddaa811..99afdb242a 100644 --- a/ietf/api/routers.py +++ b/ietf/api/routers.py @@ -3,14 +3,29 @@ from django.core.exceptions import ImproperlyConfigured from rest_framework import routers -class PrefixedSimpleRouter(routers.SimpleRouter): - """SimpleRouter that adds a dot-separated prefix to its basename""" + +class PrefixedBasenameMixin: + """Mixin to add a prefix to the basename of a rest_framework BaseRouter""" def __init__(self, name_prefix="", *args, **kwargs): self.name_prefix = name_prefix if len(self.name_prefix) == 0 or self.name_prefix[-1] == ".": raise ImproperlyConfigured("Cannot use a name_prefix that is empty or ends with '.'") super().__init__(*args, **kwargs) - def get_default_basename(self, viewset): - basename = super().get_default_basename(viewset) - return f"{self.name_prefix}.{basename}" + def register(self, prefix, viewset, basename=None): + # Get the superclass "register" method from the class this is mixed-in with. + # This avoids typing issues with calling super().register() directly in a + # mixin class. + super_register = getattr(super(), "register") + if not super_register or not callable(super_register): + raise TypeError("Must mixin with superclass that has register() method") + super_register(prefix, viewset, basename=f"{self.name_prefix}.{basename}") + + +class PrefixedSimpleRouter(PrefixedBasenameMixin, routers.SimpleRouter): + """SimpleRouter that adds a dot-separated prefix to its basename""" + + +class PrefixedDefaultRouter(PrefixedBasenameMixin, routers.DefaultRouter): + """DefaultRouter that adds a dot-separated prefix to its basename""" + diff --git a/ietf/api/serializers_rpc.py b/ietf/api/serializers_rpc.py new file mode 100644 index 0000000000..2223f04aeb --- /dev/null +++ b/ietf/api/serializers_rpc.py @@ -0,0 +1,609 @@ +# Copyright The IETF Trust 2025, All Rights Reserved +import datetime +from pathlib import Path +from typing import Literal, Optional + +from django.db import transaction +from django.urls import reverse as urlreverse +from drf_spectacular.types import OpenApiTypes +from drf_spectacular.utils import extend_schema_field +from rest_framework import serializers + +from ietf.doc.expire import move_draft_files_to_archive +from ietf.doc.models import ( + DocumentAuthor, + Document, + RelatedDocument, + State, + DocEvent, + RfcAuthor, +) +from ietf.doc.serializers import RfcAuthorSerializer +from ietf.doc.utils import ( + default_consensus, + prettify_std_name, + update_action_holders, + update_rfcauthors, +) +from ietf.group.models import Group +from ietf.name.models import StreamName, StdLevelName, FormalLanguageName +from ietf.person.models import Person +from ietf.utils import log + + +class PersonSerializer(serializers.ModelSerializer): + email = serializers.EmailField(read_only=True) + picture = serializers.URLField(source="cdn_photo_url", read_only=True) + url = serializers.SerializerMethodField( + help_text="relative URL for datatracker person page" + ) + + class Meta: + model = Person + fields = ["id", "plain_name", "email", "picture", "url"] + read_only_fields = ["id", "plain_name", "email", "picture", "url"] + + @extend_schema_field(OpenApiTypes.URI) + def get_url(self, object: Person): + return urlreverse( + "ietf.person.views.profile", + kwargs={"email_or_name": object.email_address() or object.name}, + ) + + +class EmailPersonSerializer(serializers.Serializer): + email = serializers.EmailField(source="address") + person_pk = serializers.IntegerField(source="person.pk") + name = serializers.CharField(source="person.name") + last_name = serializers.CharField(source="person.last_name") + initials = serializers.CharField(source="person.initials") + + +class LowerCaseEmailField(serializers.EmailField): + def to_representation(self, value): + return super().to_representation(value).lower() + + +class AuthorPersonSerializer(serializers.ModelSerializer): + person_pk = serializers.IntegerField(source="pk", read_only=True) + last_name = serializers.CharField() + initials = serializers.CharField() + email_addresses = serializers.ListField( + source="email_set.all", child=LowerCaseEmailField() + ) + + class Meta: + model = Person + fields = ["person_pk", "name", "last_name", "initials", "email_addresses"] + + +class RfcWithAuthorsSerializer(serializers.ModelSerializer): + authors = AuthorPersonSerializer(many=True, source="author_persons") + + class Meta: + model = Document + fields = ["rfc_number", "authors"] + + +class DraftWithAuthorsSerializer(serializers.ModelSerializer): + draft_name = serializers.CharField(source="name") + authors = AuthorPersonSerializer(many=True, source="author_persons") + + class Meta: + model = Document + fields = ["draft_name", "authors"] + + +class DocumentAuthorSerializer(serializers.ModelSerializer): + """Serializer for a Person in a response""" + + plain_name = serializers.SerializerMethodField() + + class Meta: + model = DocumentAuthor + fields = ["person", "plain_name"] + + def get_plain_name(self, document_author: DocumentAuthor) -> str: + return document_author.person.plain_name() + + +class FullDraftSerializer(serializers.ModelSerializer): + # Redefine these fields so they don't pick up the regex validator patterns. + # There seem to be some non-compliant drafts in the system! If this serializer + # is used for a writeable view, the validation will need to be added back. + name = serializers.CharField(max_length=255) + title = serializers.CharField(max_length=255) + + # Other fields we need to add / adjust + source_format = serializers.SerializerMethodField() + authors = DocumentAuthorSerializer(many=True, source="documentauthor_set") + shepherd = serializers.SerializerMethodField() + consensus = serializers.SerializerMethodField() + + class Meta: + model = Document + fields = [ + "id", + "name", + "rev", + "stream", + "title", + "pages", + "source_format", + "authors", + "shepherd", + "intended_std_level", + "consensus", + ] + + def get_consensus(self, doc: Document) -> Optional[bool]: + return default_consensus(doc) + + def get_source_format( + self, doc: Document + ) -> Literal["unknown", "xml-v2", "xml-v3", "txt"]: + submission = doc.submission() + if submission is None: + return "unknown" + if ".xml" in submission.file_types: + if submission.xml_version == "3": + return "xml-v3" + else: + return "xml-v2" + elif ".txt" in submission.file_types: + return "txt" + return "unknown" + + @extend_schema_field(OpenApiTypes.EMAIL) + def get_shepherd(self, doc: Document) -> str: + if doc.shepherd: + return doc.shepherd.formatted_ascii_email() + return "" + + +class DraftSerializer(FullDraftSerializer): + class Meta: + model = Document + fields = [ + "id", + "name", + "rev", + "stream", + "title", + "pages", + "source_format", + "authors", + ] + + +class SubmittedToQueueSerializer(FullDraftSerializer): + submitted = serializers.SerializerMethodField() + consensus = serializers.SerializerMethodField() + + class Meta: + model = Document + fields = [ + "id", + "name", + "stream", + "submitted", + "consensus", + ] + + def get_submitted(self, doc) -> Optional[datetime.datetime]: + event = doc.sent_to_rfc_editor_event() + return None if event is None else event.time + + def get_consensus(self, doc) -> Optional[bool]: + return default_consensus(doc) + + +class OriginalStreamSerializer(serializers.ModelSerializer): + stream = serializers.CharField(read_only=True, source="orig_stream_id") + + class Meta: + model = Document + fields = ["rfc_number", "stream"] + + +class ReferenceSerializer(serializers.ModelSerializer): + class Meta: + model = Document + fields = ["id", "name"] + read_only_fields = ["id", "name"] + + +class EditableRfcSerializer(serializers.ModelSerializer): + # Would be nice to reconcile this with ietf.doc.serializers.RfcSerializer. + # The purposes of that serializer (representing data for Red) and this one + # (accepting updates from Purple) are different enough that separate formats + # may be needed, but if not it'd be nice to have a single RfcSerializer that + # can serve both. + # + # For now, only handles authors + authors = RfcAuthorSerializer(many=True, min_length=1, source="rfcauthor_set") + + class Meta: + model = Document + fields = ["id", "authors"] + + def update(self, instance, validated_data): + assert isinstance(instance, Document) + authors_data = validated_data.pop("rfcauthor_set", None) + if authors_data is not None: + # Construct unsaved instances from validated author data + new_authors = [RfcAuthor(**ad) for ad in authors_data] + # Update the RFC with the new author set + with transaction.atomic(): + change_events = update_rfcauthors(instance, new_authors) + for event in change_events: + event.save() + return instance + + +class RfcPubSerializer(serializers.ModelSerializer): + """Write-only serializer for RFC publication""" + # publication-related fields + published = serializers.DateTimeField(default_timezone=datetime.timezone.utc) + draft_name = serializers.RegexField( + required=False, regex=r"^draft-[a-zA-Z0-9-]+$" + ) + draft_rev = serializers.RegexField( + required=False, regex=r"^[0-9][0-9]$" + ) + + # fields on the RFC Document that need tweaking from ModelSerializer defaults + rfc_number = serializers.IntegerField(min_value=1, required=True) + group = serializers.SlugRelatedField( + slug_field="acronym", queryset=Group.objects.all(), required=False + ) + stream = serializers.PrimaryKeyRelatedField( + queryset=StreamName.objects.filter(used=True) + ) + formal_languages = serializers.PrimaryKeyRelatedField( + many=True, + required=False, + queryset=FormalLanguageName.objects.filter(used=True), + help_text=( + "formal languages used in RFC (defaults to those from draft, send empty" + "list to override)" + ) + ) + std_level = serializers.PrimaryKeyRelatedField( + queryset=StdLevelName.objects.filter(used=True), + ) + ad = serializers.PrimaryKeyRelatedField( + queryset=Person.objects.all(), + allow_null=True, + required=False, + ) + obsoletes = serializers.SlugRelatedField( + many=True, + required=False, + slug_field="rfc_number", + queryset=Document.objects.filter(type_id="rfc"), + ) + updates = serializers.SlugRelatedField( + many=True, + required=False, + slug_field="rfc_number", + queryset=Document.objects.filter(type_id="rfc"), + ) + subseries = serializers.ListField( + child=serializers.RegexField( + required=False, + # pattern: no leading 0, finite length (arbitrarily set to 5 digits) + regex=r"^(bcp|std|fyi)[1-9][0-9]{0,4}$", + ) + ) + # N.b., authors is _not_ a field on Document! + authors = RfcAuthorSerializer(many=True) + + class Meta: + model = Document + fields = [ + "published", + "draft_name", + "draft_rev", + "rfc_number", + "title", + "authors", + "group", + "stream", + "abstract", + "pages", + "words", + "formal_languages", + "std_level", + "ad", + "note", + "obsoletes", + "updates", + "subseries", + ] + + def validate(self, data): + if "draft_name" in data or "draft_rev" in data: + if "draft_name" not in data: + raise serializers.ValidationError( + {"draft_name": "Missing draft_name"}, + code="invalid-draft-spec", + ) + if "draft_rev" not in data: + raise serializers.ValidationError( + {"draft_rev": "Missing draft_rev"}, + code="invalid-draft-spec", + ) + return data + + def create(self, validated_data): + """Publish an RFC""" + published = validated_data.pop("published") + draft_name = validated_data.pop("draft_name", None) + draft_rev = validated_data.pop("draft_rev", None) + obsoletes = validated_data.pop("obsoletes", []) + updates = validated_data.pop("updates", []) + subseries = validated_data.pop("subseries", []) + + system_person = Person.objects.get(name="(System)") + + # If specified, retrieve draft and extract RFC default values from it + if draft_name is None: + draft = None + defaults_from_draft = { + "group": Group.objects.get(acronym="none", type_id="individ"), + } + else: + # validation enforces that draft_name and draft_rev are both present + draft = Document.objects.filter( + type_id="draft", + name=draft_name, + rev=draft_rev, + ).first() + if draft is None: + raise serializers.ValidationError( + { + "draft_name": "No such draft", + "draft_rev": "No such draft", + }, + code="invalid-draft" + ) + elif draft.get_state_slug() == "rfc": + raise serializers.ValidationError( + { + "draft_name": "Draft already published as RFC", + }, + code="already-published-draft", + ) + defaults_from_draft = { + "ad": draft.ad, + "formal_languages": draft.formal_languages.all(), + "group": draft.group, + "note": draft.note, + } + + # Transaction to clean up if something fails + with transaction.atomic(): + # create rfc, letting validated request data override draft defaults + rfc = self._create_rfc(defaults_from_draft | validated_data) + DocEvent.objects.create( + doc=rfc, + rev=rfc.rev, + type="published_rfc", + time=published, + by=system_person, + desc="RFC published", + ) + rfc.set_state(State.objects.get(used=True, type_id="rfc", slug="published")) + + # create updates / obsoletes relations + for obsoleted_rfc_pk in obsoletes: + RelatedDocument.objects.get_or_create( + source=rfc, target=obsoleted_rfc_pk, relationship_id="obs" + ) + for updated_rfc_pk in updates: + RelatedDocument.objects.get_or_create( + source=rfc, target=updated_rfc_pk, relationship_id="updates" + ) + + # create subseries relations + for subseries_doc_name in subseries: + ss_slug = subseries_doc_name[:3] + subseries_doc, ss_doc_created = Document.objects.get_or_create( + type_id=ss_slug, name=subseries_doc_name + ) + if ss_doc_created: + subseries_doc.docevent_set.create( + type=f"{ss_slug}_doc_created", + by=system_person, + desc=f"Created {subseries_doc_name} via publication of {rfc.name}", + ) + _, ss_rel_created = subseries_doc.relateddocument_set.get_or_create( + relationship_id="contains", target=rfc + ) + if ss_rel_created: + subseries_doc.docevent_set.create( + type="sync_from_rfc_editor", + by=system_person, + desc=f"Added {rfc.name} to {subseries_doc.name}", + ) + rfc.docevent_set.create( + type="sync_from_rfc_editor", + by=system_person, + desc=f"Added {rfc.name} to {subseries_doc.name}", + ) + + + # create relation with draft and update draft state + if draft is not None: + draft_changes = [] + draft_events = [] + if draft.get_state_slug() != "rfc": + draft.set_state( + State.objects.get(used=True, type="draft", slug="rfc") + ) + move_draft_files_to_archive(draft, draft.rev) + draft_changes.append(f"changed state to {draft.get_state()}") + + r, created_relateddoc = RelatedDocument.objects.get_or_create( + source=draft, target=rfc, relationship_id="became_rfc", + ) + if created_relateddoc: + change = "created {rel_name} relationship between {pretty_draft_name} and {pretty_rfc_name}".format( + rel_name=r.relationship.name.lower(), + pretty_draft_name=prettify_std_name(draft_name), + pretty_rfc_name=prettify_std_name(rfc.name), + ) + draft_changes.append(change) + + # Always set the "draft-iesg" state. This state should be set for all drafts, so + # log a warning if it is not set. What should happen here is that ietf stream + # RFCs come in as "rfcqueue" and are set to "pub" when they appear in the RFC index. + # Other stream documents should normally be "idexists" and be left that way. The + # code here *actually* leaves "draft-iesg" state alone if it is "idexists" or "pub", + # and changes any other state to "pub". If unset, it changes it to "idexists". + # This reflects historical behavior and should probably be updated, but a migration + # of existing drafts (and validation of the change) is needed before we change the + # handling. + prev_iesg_state = draft.get_state("draft-iesg") + if prev_iesg_state is None: + log.log(f'Warning while processing {rfc.name}: {draft.name} has no "draft-iesg" state') + new_iesg_state = State.objects.get(type_id="draft-iesg", slug="idexists") + elif prev_iesg_state.slug not in ("pub", "idexists"): + if prev_iesg_state.slug != "rfcqueue": + log.log( + 'Warning while processing {}: {} is in "draft-iesg" state {} (expected "rfcqueue")'.format( + rfc.name, draft.name, prev_iesg_state.slug + ) + ) + new_iesg_state = State.objects.get(type_id="draft-iesg", slug="pub") + else: + new_iesg_state = prev_iesg_state + + if new_iesg_state != prev_iesg_state: + draft.set_state(new_iesg_state) + draft_changes.append(f"changed {new_iesg_state.type.label} to {new_iesg_state}") + e = update_action_holders(draft, prev_iesg_state, new_iesg_state) + if e: + draft_events.append(e) + + # If the draft and RFC streams agree, move draft to "pub" stream state. If not, complain. + if draft.stream != rfc.stream: + log.log("Warning while processing {}: draft {} stream is {} but RFC stream is {}".format( + rfc.name, draft.name, draft.stream, rfc.stream + )) + elif draft.stream.slug in ["iab", "irtf", "ise", "editorial"]: + stream_slug = f"draft-stream-{draft.stream.slug}" + prev_state = draft.get_state(stream_slug) + if prev_state is not None and prev_state.slug != "pub": + new_state = State.objects.select_related("type").get(used=True, type__slug=stream_slug, slug="pub") + draft.set_state(new_state) + draft_changes.append( + f"changed {new_state.type.label} to {new_state}" + ) + e = update_action_holders(draft, prev_state, new_state) + if e: + draft_events.append(e) + if draft_changes: + draft_events.append( + DocEvent.objects.create( + doc=draft, + rev=draft.rev, + by=system_person, + type="sync_from_rfc_editor", + desc=f"Updated while publishing {rfc.name} ({', '.join(draft_changes)})", + ) + ) + draft.save_with_history(draft_events) + + return rfc + + def _create_rfc(self, validated_data): + authors_data = validated_data.pop("authors") + formal_languages = validated_data.pop("formal_languages", []) + # todo ad field + rfc = Document.objects.create( + type_id="rfc", + name=f"rfc{validated_data['rfc_number']}", + **validated_data, + ) + rfc.formal_languages.set(formal_languages) # list of PKs is ok + for order, author_data in enumerate(authors_data): + rfc.rfcauthor_set.create( + order=order, + **author_data, + ) + return rfc + + +class RfcFileSerializer(serializers.Serializer): + # The structure of this serializer is constrained by what openapi-generator-cli's + # python generator can correctly serialize as multipart/form-data. It does not + # handle nested serializers well (or perhaps at all). ListFields with child + # ChoiceField or RegexField do not serialize correctly. DictFields don't seem + # to work. + # + # It does seem to correctly send filenames along with FileFields, even as a child + # in a ListField, so we use that to convey the file format of each item. There + # are other options we could consider (e.g., a structured CharField) but this + # works. + allowed_extensions = ( + ".html", + ".json", + ".notprepped.xml", + ".pdf", + ".txt", + ".xml", + ) + + rfc = serializers.SlugRelatedField( + slug_field="rfc_number", + queryset=Document.objects.filter(type_id="rfc"), + help_text="RFC number to which the contents belong", + ) + contents = serializers.ListField( + child=serializers.FileField( + allow_empty_file=False, + use_url=False, + ), + help_text=( + "List of content files. Filename extensions are used to identify " + "file types, but filenames are otherwise ignored." + ), + ) + replace = serializers.BooleanField( + required=False, + default=False, + help_text=( + "Replace existing files for this RFC. Defaults to false. When false, " + "if _any_ files already exist for the specified RFC the upload will be " + "rejected regardless of which files are being uploaded. When true," + "existing files will be removed and new ones will be put in place. BE" + "VERY CAREFUL WITH THIS OPTION IN PRODUCTION." + ), + ) + + def validate_contents(self, data): + found_extensions = [] + for uploaded_file in data: + if not hasattr(uploaded_file, "name"): + raise serializers.ValidationError( + "filename not specified for uploaded file", + code="missing-filename", + ) + ext = "".join(Path(uploaded_file.name).suffixes) + if ext not in self.allowed_extensions: + raise serializers.ValidationError( + f"File uploaded with invalid extension '{ext}'", + code="invalid-filename-ext", + ) + if ext in found_extensions: + raise serializers.ValidationError( + f"More than one file uploaded with extension '{ext}'", + code="duplicate-filename-ext", + ) + return data + + +class NotificationAckSerializer(serializers.Serializer): + message = serializers.CharField(default="ack") diff --git a/ietf/api/tests_views_rpc.py b/ietf/api/tests_views_rpc.py new file mode 100644 index 0000000000..032b4b9495 --- /dev/null +++ b/ietf/api/tests_views_rpc.py @@ -0,0 +1,299 @@ +# Copyright The IETF Trust 2025, All Rights Reserved +from io import StringIO +from pathlib import Path +from tempfile import TemporaryDirectory + +from django.conf import settings +from django.core.files.base import ContentFile +from django.db.models import Max +from django.test.utils import override_settings +from django.urls import reverse as urlreverse + +from ietf.doc.factories import IndividualDraftFactory, WgDraftFactory, WgRfcFactory +from ietf.doc.models import RelatedDocument, Document +from ietf.group.factories import RoleFactory, GroupFactory +from ietf.person.factories import PersonFactory +from ietf.utils.test_utils import APITestCase, reload_db_objects + + +class RpcApiTests(APITestCase): + @override_settings(APP_API_TOKENS={"ietf.api.views_rpc": ["valid-token"]}) + def test_draftviewset_references(self): + viewname = "ietf.api.purple_api.draft-references" + + # non-existent draft + bad_id = Document.objects.aggregate(unused_id=Max("id") + 100)["unused_id"] + url = urlreverse(viewname, kwargs={"doc_id": bad_id}) + # Without credentials + r = self.client.get(url) + self.assertEqual(r.status_code, 403) + # Add credentials + r = self.client.get(url, headers={"X-Api-Key": "valid-token"}) + self.assertEqual(r.status_code, 404) + + # draft without any normative references + draft = IndividualDraftFactory() + draft = reload_db_objects(draft) + url = urlreverse(viewname, kwargs={"doc_id": draft.id}) + r = self.client.get(url) + self.assertEqual(r.status_code, 403) + r = self.client.get(url, headers={"X-Api-Key": "valid-token"}) + self.assertEqual(r.status_code, 200) + refs = r.json() + self.assertEqual(refs, []) + + # draft without any normative references but with an informative reference + draft_foo = IndividualDraftFactory() + draft_foo = reload_db_objects(draft_foo) + RelatedDocument.objects.create( + source=draft, target=draft_foo, relationship_id="refinfo" + ) + url = urlreverse(viewname, kwargs={"doc_id": draft.id}) + r = self.client.get(url) + self.assertEqual(r.status_code, 403) + r = self.client.get(url, headers={"X-Api-Key": "valid-token"}) + self.assertEqual(r.status_code, 200) + refs = r.json() + self.assertEqual(refs, []) + + # draft with a normative reference + draft_bar = IndividualDraftFactory() + draft_bar = reload_db_objects(draft_bar) + RelatedDocument.objects.create( + source=draft, target=draft_bar, relationship_id="refnorm" + ) + url = urlreverse(viewname, kwargs={"doc_id": draft.id}) + r = self.client.get(url) + self.assertEqual(r.status_code, 403) + r = self.client.get(url, headers={"X-Api-Key": "valid-token"}) + self.assertEqual(r.status_code, 200) + refs = r.json() + self.assertEqual(len(refs), 1) + self.assertEqual(refs[0]["id"], draft_bar.id) + self.assertEqual(refs[0]["name"], draft_bar.name) + + @override_settings(APP_API_TOKENS={"ietf.api.views_rpc": ["valid-token"]}) + def test_notify_rfc_published(self): + url = urlreverse("ietf.api.purple_api.notify_rfc_published") + area = GroupFactory(type_id="area") + draft_ad = RoleFactory(group=area, name_id="ad").person + authors = PersonFactory.create_batch(2) + draft = WgDraftFactory(group__parent=area, authors=authors) + assert isinstance(draft, Document), "WgDraftFactory should generate a Document" + unused_rfc_number = ( + Document.objects.filter(rfc_number__isnull=False).aggregate( + unused_rfc_number=Max("rfc_number") + 1 + )["unused_rfc_number"] + or 10000 + ) + + post_data = { + "published": "2025-12-17T20:29:00Z", + "draft_name": draft.name, + "draft_rev": draft.rev, + "rfc_number": unused_rfc_number, + "title": draft.title, + "authors": [ + { + "titlepage_name": f"titlepage {author.name}", + "is_editor": False, + "person": author.pk, + "email": author.email_address(), + "affiliation": "Some Affiliation", + "country": "CA", + } + for author in authors + ], + "group": draft.group.acronym, + "stream": draft.stream_id, + "abstract": draft.abstract, + "pages": draft.pages, + "words": draft.pages * 250, + "formal_languages": [], + "std_level": "ps", + "ad": draft_ad.pk, + "note": "noted", + "obsoletes": [], + "updates": [], + "subseries": [], + } + r = self.client.post(url, data=post_data, format="json") + self.assertEqual(r.status_code, 403) + + r = self.client.post( + url, data=post_data, format="json", headers={"X-Api-Key": "valid-token"} + ) + self.assertEqual(r.status_code, 200) + rfc = Document.objects.filter(rfc_number=unused_rfc_number).first() + self.assertIsNotNone(rfc) + self.assertEqual(rfc.came_from_draft(), draft) + self.assertEqual( + rfc.docevent_set.filter( + type="published_rfc", time="2025-12-17T20:29:00Z" + ).count(), + 1, + ) + self.assertEqual(rfc.title, draft.title) + self.assertEqual(rfc.documentauthor_set.count(), 0) + self.assertEqual( + list( + rfc.rfcauthor_set.values( + "titlepage_name", + "is_editor", + "person", + "email", + "affiliation", + "country", + ) + ), + [ + { + "titlepage_name": f"titlepage {author.name}", + "is_editor": False, + "person": author.pk, + "email": author.email_address(), + "affiliation": "Some Affiliation", + "country": "CA", + } + for author in authors + ], + ) + self.assertEqual(rfc.group, draft.group) + self.assertEqual(rfc.stream, draft.stream) + self.assertEqual(rfc.abstract, draft.abstract) + self.assertEqual(rfc.pages, draft.pages) + self.assertEqual(rfc.words, draft.pages * 250) + self.assertEqual(rfc.formal_languages.count(), 0) + self.assertEqual(rfc.std_level_id, "ps") + self.assertEqual(rfc.ad, draft_ad) + self.assertEqual(rfc.note, "noted") + self.assertEqual(rfc.related_that_doc("obs"), []) + self.assertEqual(rfc.related_that_doc("updates"), []) + self.assertEqual(rfc.part_of(), []) + self.assertEqual(draft.get_state().slug, "rfc") + # todo test non-empty relationships + # todo test references (when updating that is part of the handling) + + @override_settings(APP_API_TOKENS={"ietf.api.views_rpc": ["valid-token"]}) + def test_upload_rfc_files(self): + def _valid_post_data(): + """Generate a valid post data dict + + Each API call needs a fresh set of files, so don't reuse the return + value from this for multiple calls! + """ + return { + "rfc": rfc.rfc_number, + "contents": [ + ContentFile(b"This is .xml", "myfile.xml"), + ContentFile(b"This is .txt", "myfile.txt"), + ContentFile(b"This is .html", "myfile.html"), + ContentFile(b"This is .pdf", "myfile.pdf"), + ContentFile(b"This is .json", "myfile.json"), + ContentFile(b"This is .notprepped.xml", "myfile.notprepped.xml"), + ], + "replace": False, + } + + url = urlreverse("ietf.api.purple_api.upload_rfc_files") + unused_rfc_number = ( + Document.objects.filter(rfc_number__isnull=False).aggregate( + unused_rfc_number=Max("rfc_number") + 1 + )["unused_rfc_number"] + or 10000 + ) + + rfc = WgRfcFactory(rfc_number=unused_rfc_number) + assert isinstance(rfc, Document), "WgRfcFactory should generate a Document" + with TemporaryDirectory() as rfc_dir: + settings.RFC_PATH = rfc_dir # affects overridden settings + rfc_path = Path(rfc_dir) + (rfc_path / "prerelease").mkdir() + content = StringIO("XML content\n") + content.name = "myrfc.xml" + + # no api key + r = self.client.post(url, _valid_post_data(), format="multipart") + self.assertEqual(r.status_code, 403) + + # invalid RFC + r = self.client.post( + url, + _valid_post_data() | {"rfc": unused_rfc_number + 1}, + format="multipart", + headers={"X-Api-Key": "valid-token"}, + ) + self.assertEqual(r.status_code, 400) + + # empty files + r = self.client.post( + url, + _valid_post_data() | { + "contents": [ + ContentFile(b"", "myfile.xml"), + ContentFile(b"", "myfile.txt"), + ContentFile(b"", "myfile.html"), + ContentFile(b"", "myfile.pdf"), + ContentFile(b"", "myfile.json"), + ContentFile(b"", "myfile.notprepped.xml"), + ] + }, + format="multipart", + headers={"X-Api-Key": "valid-token"}, + ) + self.assertEqual(r.status_code, 400) + + # bad file type + r = self.client.post( + url, + _valid_post_data() | { + "contents": [ + ContentFile(b"Some content", "myfile.jpg"), + ] + }, + format="multipart", + headers={"X-Api-Key": "valid-token"}, + ) + self.assertEqual(r.status_code, 400) + + # valid post + r = self.client.post( + url, + _valid_post_data(), + format="multipart", + headers={"X-Api-Key": "valid-token"}, + ) + self.assertEqual(r.status_code, 200) + for suffix in [".xml", ".txt", ".html", ".pdf", ".json"]: + self.assertEqual( + (rfc_path / f"rfc{unused_rfc_number}") + .with_suffix(suffix) + .read_text(), + f"This is {suffix}", + f"{suffix} file should contain the expected content", + ) + self.assertEqual( + ( + rfc_path / "prerelease" / f"rfc{unused_rfc_number}.notprepped.xml" + ).read_text(), + "This is .notprepped.xml", + ".notprepped.xml file should contain the expected content", + ) + + # re-post with replace = False should now fail + r = self.client.post( + url, + _valid_post_data(), + format="multipart", + headers={"X-Api-Key": "valid-token"}, + ) + self.assertEqual(r.status_code, 409) # conflict + + # re-post with replace = True should succeed + r = self.client.post( + url, + _valid_post_data() | {"replace": True}, + format="multipart", + headers={"X-Api-Key": "valid-token"}, + ) + self.assertEqual(r.status_code, 200) # conflict diff --git a/ietf/api/urls.py b/ietf/api/urls.py index 04575b34cb..7a082567b8 100644 --- a/ietf/api/urls.py +++ b/ietf/api/urls.py @@ -1,26 +1,31 @@ # Copyright The IETF Trust 2017-2024, All Rights Reserved +from drf_spectacular.views import SpectacularAPIView + from django.conf import settings -from django.urls import include +from django.urls import include, path from django.views.generic import TemplateView from ietf import api -from ietf.doc import views_ballot +from ietf.doc import views_ballot, api as doc_api from ietf.meeting import views as meeting_views from ietf.submit import views as submit_views from ietf.utils.urls import url from . import views as api_views +from .routers import PrefixedSimpleRouter # DRF API routing - disabled until we plan to use it -# from drf_spectacular.views import SpectacularAPIView -# from django.urls import path # from ietf.person import api as person_api -# from .routers import PrefixedSimpleRouter # core_router = PrefixedSimpleRouter(name_prefix="ietf.api.core_api") # core api router # core_router.register("email", person_api.EmailViewSet) # core_router.register("person", person_api.PersonViewSet) +# todo more general name for this API? +red_router = PrefixedSimpleRouter(name_prefix="ietf.api.red_api") # red api router +red_router.register("doc", doc_api.RfcViewSet) +red_router.register("subseries", doc_api.SubseriesViewSet, basename="subseries") + api.autodiscover() urlpatterns = [ @@ -32,7 +37,9 @@ url(r'^v2/person/person', api_views.ApiV2PersonExportView.as_view()), # --- DRF API --- # path("core/", include(core_router.urls)), - # path("schema/", SpectacularAPIView.as_view()), + path("purple/", include("ietf.api.urls_rpc")), + path("red/", include(red_router.urls)), + path("schema/", SpectacularAPIView.as_view()), # # --- Custom API endpoints, sorted alphabetically --- # Email alias information for drafts diff --git a/ietf/api/urls_rpc.py b/ietf/api/urls_rpc.py new file mode 100644 index 0000000000..9d41ac137f --- /dev/null +++ b/ietf/api/urls_rpc.py @@ -0,0 +1,42 @@ +# Copyright The IETF Trust 2023-2026, All Rights Reserved +from django.urls import include, path + +from ietf.api import views_rpc +from ietf.api.routers import PrefixedDefaultRouter +from ietf.utils.urls import url + +router = PrefixedDefaultRouter(use_regex_path=False, name_prefix="ietf.api.purple_api") +router.include_format_suffixes = False +router.register(r"draft", views_rpc.DraftViewSet, basename="draft") +router.register(r"person", views_rpc.PersonViewSet) +router.register(r"rfc", views_rpc.RfcViewSet, basename="rfc") + +router.register( + r"rfc//authors", + views_rpc.RfcAuthorViewSet, + basename="rfc-authors", +) + +urlpatterns = [ + url(r"^doc/drafts_by_names/", views_rpc.DraftsByNamesView.as_view()), + url(r"^persons/search/", views_rpc.RpcPersonSearch.as_view()), + path( + r"rfc/publish/", + views_rpc.RfcPubNotificationView.as_view(), + name="ietf.api.purple_api.notify_rfc_published", + ), + path( + r"rfc/publish/files/", + views_rpc.RfcPubFilesView.as_view(), + name="ietf.api.purple_api.upload_rfc_files", + ), + path(r"subject//person/", views_rpc.SubjectPersonView.as_view()), +] + +# add routers at the end so individual routes can steal parts of their address +# space (e.g., ^rfc/publish/ superseding the ^rfc/ routes of RfcViewSet) +urlpatterns.extend( + [ + path("", include(router.urls)), + ] +) diff --git a/ietf/api/views.py b/ietf/api/views.py index 22523b2f17..420bc39693 100644 --- a/ietf/api/views.py +++ b/ietf/api/views.py @@ -97,7 +97,7 @@ class PersonalInformationExportView(DetailView, JsonExportMixin): def get(self, request): person = get_object_or_404(self.model, user=request.user) - expand = ['searchrule', 'documentauthor', 'ad_document_set', 'ad_dochistory_set', 'docevent', + expand = ['searchrule', 'documentauthor', 'rfcauthor', 'ad_document_set', 'ad_dochistory_set', 'docevent', 'ballotpositiondocevent', 'deletedevent', 'email_set', 'groupevent', 'role', 'rolehistory', 'iprdisclosurebase', 'iprevent', 'liaisonstatementevent', 'allowlisted', 'schedule', 'constraint', 'schedulingevent', 'message', 'sendqueue', 'nominee', 'topicfeedbacklastseen', 'alias', 'email', 'apikeys', 'personevent', diff --git a/ietf/api/views_rpc.py b/ietf/api/views_rpc.py new file mode 100644 index 0000000000..fce174ab72 --- /dev/null +++ b/ietf/api/views_rpc.py @@ -0,0 +1,434 @@ +# Copyright The IETF Trust 2023-2026, All Rights Reserved +import shutil +from pathlib import Path +from tempfile import TemporaryDirectory + +from django.conf import settings +from drf_spectacular.utils import OpenApiParameter +from rest_framework import mixins, parsers, serializers, viewsets, status +from rest_framework.decorators import action +from rest_framework.exceptions import APIException +from rest_framework.views import APIView +from rest_framework.response import Response + +from django.db.models import CharField as ModelCharField, OuterRef, Subquery, Q +from django.db.models.functions import Coalesce +from django.http import Http404 +from drf_spectacular.utils import extend_schema_view, extend_schema +from rest_framework import generics +from rest_framework.fields import CharField as DrfCharField +from rest_framework.filters import SearchFilter +from rest_framework.pagination import LimitOffsetPagination + +from ietf.api.serializers_rpc import ( + PersonSerializer, + FullDraftSerializer, + DraftSerializer, + SubmittedToQueueSerializer, + OriginalStreamSerializer, + ReferenceSerializer, + EmailPersonSerializer, + RfcWithAuthorsSerializer, + DraftWithAuthorsSerializer, + NotificationAckSerializer, RfcPubSerializer, RfcFileSerializer, + EditableRfcSerializer, +) +from ietf.doc.models import Document, DocHistory, RfcAuthor +from ietf.doc.serializers import RfcAuthorSerializer +from ietf.person.models import Email, Person + + +class Conflict(APIException): + status_code = status.HTTP_409_CONFLICT + default_detail = "Conflict." + default_code = "conflict" + + +@extend_schema_view( + retrieve=extend_schema( + operation_id="get_person_by_id", + summary="Find person by ID", + description="Returns a single person", + parameters=[ + OpenApiParameter( + name="person_id", + type=int, + location="path", + description="Person ID identifying this person.", + ), + ], + ), +) +class PersonViewSet(mixins.RetrieveModelMixin, viewsets.GenericViewSet): + queryset = Person.objects.all() + serializer_class = PersonSerializer + api_key_endpoint = "ietf.api.views_rpc" + lookup_url_kwarg = "person_id" + + @extend_schema( + operation_id="get_persons", + summary="Get a batch of persons", + description="Returns a list of persons matching requested ids. Omits any that are missing.", + request=list[int], + responses=PersonSerializer(many=True), + ) + @action(detail=False, methods=["post"]) + def batch(self, request): + """Get a batch of rpc person names""" + pks = request.data + return Response( + self.get_serializer(Person.objects.filter(pk__in=pks), many=True).data + ) + + @extend_schema( + operation_id="persons_by_email", + summary="Get a batch of persons by email addresses", + description=( + "Returns a list of persons matching requested ids. " + "Omits any that are missing." + ), + request=list[str], + responses=EmailPersonSerializer(many=True), + ) + @action(detail=False, methods=["post"], serializer_class=EmailPersonSerializer) + def batch_by_email(self, request): + emails = Email.objects.filter(address__in=request.data, person__isnull=False) + serializer = self.get_serializer(emails, many=True) + return Response(serializer.data) + + +class SubjectPersonView(APIView): + api_key_endpoint = "ietf.api.views_rpc" + + @extend_schema( + operation_id="get_subject_person_by_id", + summary="Find person for OIDC subject by ID", + description="Returns a single person", + responses=PersonSerializer, + parameters=[ + OpenApiParameter( + name="subject_id", + type=str, + description="subject ID of person to return", + location="path", + ), + ], + ) + def get(self, request, subject_id: str): + try: + user_id = int(subject_id) + except ValueError: + raise serializers.ValidationError( + {"subject_id": "This field must be an integer value."} + ) + person = Person.objects.filter(user__pk=user_id).first() + if person: + return Response(PersonSerializer(person).data) + raise Http404 + + +class RpcLimitOffsetPagination(LimitOffsetPagination): + default_limit = 10 + max_limit = 100 + + +class SingleTermSearchFilter(SearchFilter): + """SearchFilter backend that does not split terms + + The default SearchFilter treats comma or whitespace-separated terms as individual + search terms. This backend instead searches for the exact term. + """ + + def get_search_terms(self, request): + value = request.query_params.get(self.search_param, "") + field = DrfCharField(trim_whitespace=False, allow_blank=True) + cleaned_value = field.run_validation(value) + return [cleaned_value] + + +@extend_schema_view( + get=extend_schema( + operation_id="search_person", + description="Get a list of persons, matching by partial name or email", + ), +) +class RpcPersonSearch(generics.ListAPIView): + # n.b. the OpenAPI schema for this can be generated by running + # ietf/manage.py spectacular --file spectacular.yaml + # and extracting / touching up the rpc_person_search_list operation + api_key_endpoint = "ietf.api.views_rpc" + queryset = Person.objects.all() + serializer_class = PersonSerializer + pagination_class = RpcLimitOffsetPagination + + # Searchable on all name-like fields or email addresses + filter_backends = [SingleTermSearchFilter] + search_fields = ["name", "plain", "email__address"] + + +@extend_schema_view( + retrieve=extend_schema( + operation_id="get_draft_by_id", + summary="Get a draft", + description="Returns the draft for the requested ID", + parameters=[ + OpenApiParameter( + name="doc_id", + type=int, + location="path", + description="Doc ID identifying this draft.", + ), + ], + ), + submitted_to_rpc=extend_schema( + operation_id="submitted_to_rpc", + summary="List documents ready to enter the RFC Editor Queue", + description="List documents ready to enter the RFC Editor Queue", + responses=SubmittedToQueueSerializer(many=True), + ), +) +class DraftViewSet(mixins.RetrieveModelMixin, viewsets.GenericViewSet): + queryset = Document.objects.filter(type_id="draft") + serializer_class = FullDraftSerializer + api_key_endpoint = "ietf.api.views_rpc" + lookup_url_kwarg = "doc_id" + + @action(detail=False, serializer_class=SubmittedToQueueSerializer) + def submitted_to_rpc(self, request): + """Return documents in datatracker that have been submitted to the RPC but are not yet in the queue + + Those queries overreturn - there may be things, particularly not from the IETF stream that are already in the queue. + """ + ietf_docs = Q(states__type_id="draft-iesg", states__slug__in=["ann"]) + irtf_iab_ise_docs = Q( + states__type_id__in=[ + "draft-stream-iab", + "draft-stream-irtf", + "draft-stream-ise", + ], + states__slug__in=["rfc-edit"], + ) + # TODO: Need a way to talk about editorial stream docs + docs = ( + self.get_queryset() + .filter(type_id="draft") + .filter(ietf_docs | irtf_iab_ise_docs) + ) + serializer = self.get_serializer(docs, many=True) + return Response(serializer.data) + + @extend_schema( + operation_id="get_draft_references", + summary="Get normative references to I-Ds", + description=( + "Returns the id and name of each normatively " + "referenced Internet-Draft for the given docId" + ), + parameters=[ + OpenApiParameter( + name="doc_id", + type=int, + location="path", + description="Doc ID identifying this draft.", + ), + ], + responses=ReferenceSerializer(many=True), + ) + @action(detail=True, serializer_class=ReferenceSerializer) + def references(self, request, doc_id=None): + doc = self.get_object() + serializer = self.get_serializer( + [ + reference + for reference in doc.related_that_doc("refnorm") + if reference.type_id == "draft" + ], + many=True, + ) + return Response(serializer.data) + + @extend_schema( + operation_id="get_draft_authors", + summary="Gather authors of the drafts with the given names", + description="returns a list mapping draft names to objects describing authors", + request=list[str], + responses=DraftWithAuthorsSerializer(many=True), + ) + @action(detail=False, methods=["post"], serializer_class=DraftWithAuthorsSerializer) + def bulk_authors(self, request): + drafts = self.get_queryset().filter(name__in=request.data) + serializer = self.get_serializer(drafts, many=True) + return Response(serializer.data) + + +@extend_schema_view( + rfc_original_stream=extend_schema( + operation_id="get_rfc_original_streams", + summary="Get the streams RFCs were originally published into", + description="returns a list of dicts associating an RFC with its originally published stream", + responses=OriginalStreamSerializer(many=True), + ) +) +class RfcViewSet(mixins.UpdateModelMixin, viewsets.GenericViewSet): + queryset = Document.objects.filter(type_id="rfc") + api_key_endpoint = "ietf.api.views_rpc" + lookup_field = "rfc_number" + serializer_class = EditableRfcSerializer + + @action(detail=False, serializer_class=OriginalStreamSerializer) + def rfc_original_stream(self, request): + rfcs = self.get_queryset().annotate( + orig_stream_id=Coalesce( + Subquery( + DocHistory.objects.filter(doc=OuterRef("pk")) + .exclude(stream__isnull=True) + .order_by("time") + .values_list("stream_id", flat=True)[:1] + ), + "stream_id", + output_field=ModelCharField(), + ), + ) + serializer = self.get_serializer(rfcs, many=True) + return Response(serializer.data) + + @extend_schema( + operation_id="get_rfc_authors", + summary="Gather authors of the RFCs with the given numbers", + description="returns a list mapping rfc numbers to objects describing authors", + request=list[int], + responses=RfcWithAuthorsSerializer(many=True), + ) + @action(detail=False, methods=["post"], serializer_class=RfcWithAuthorsSerializer) + def bulk_authors(self, request): + rfcs = self.get_queryset().filter(rfc_number__in=request.data) + serializer = self.get_serializer(rfcs, many=True) + return Response(serializer.data) + + +class DraftsByNamesView(APIView): + api_key_endpoint = "ietf.api.views_rpc" + + @extend_schema( + operation_id="get_drafts_by_names", + summary="Get a batch of drafts by draft names", + description="returns a list of drafts with matching names", + request=list[str], + responses=DraftSerializer(many=True), + ) + def post(self, request): + names = request.data + docs = Document.objects.filter(type_id="draft", name__in=names) + return Response(DraftSerializer(docs, many=True).data) + + +class RfcAuthorViewSet(viewsets.ReadOnlyModelViewSet): + """ViewSet for RfcAuthor model + + Router needs to provide rfc_number as a kwarg + """ + api_key_endpoint = "ietf.api.views_rpc" + + queryset = RfcAuthor.objects.all() + serializer_class = RfcAuthorSerializer + lookup_url_kwarg = "author_id" + rfc_number_param = "rfc_number" + + def get_queryset(self): + return ( + super() + .get_queryset() + .filter( + document__type_id="rfc", + document__rfc_number=self.kwargs[self.rfc_number_param], + ) + ) + + +class RfcPubNotificationView(APIView): + api_key_endpoint = "ietf.api.views_rpc" + + @extend_schema( + operation_id="notify_rfc_published", + summary="Notify datatracker of RFC publication", + request=RfcPubSerializer, + responses=NotificationAckSerializer, + ) + def post(self, request): + serializer = RfcPubSerializer(data=request.data) + serializer.is_valid(raise_exception=True) + # Create RFC + serializer.save() + return Response(NotificationAckSerializer().data) + + +class RfcPubFilesView(APIView): + api_key_endpoint = "ietf.api.views_rpc" + parser_classes = [parsers.MultiPartParser] + + def _destination(self, filename: str | Path) -> Path: + """Destination for an uploaded RFC file + + Strips any path components in filename and returns an absolute Path. + """ + rfc_path = Path(settings.RFC_PATH) + filename = Path(filename) # could potentially have directory components + extension = "".join(filename.suffixes) + if extension == ".notprepped.xml": + return rfc_path / "prerelease" / filename.name + return rfc_path / filename.name + + @extend_schema( + operation_id="upload_rfc_files", + summary="Upload files for a published RFC", + request=RfcFileSerializer, + responses=NotificationAckSerializer, + ) + def post(self, request): + serializer = RfcFileSerializer( + # many=True, + data=request.data, + ) + serializer.is_valid(raise_exception=True) + rfc = serializer.validated_data["rfc"] + uploaded_files = serializer.validated_data["contents"] # list[UploadedFile] + replace = serializer.validated_data["replace"] + dest_stem = f"rfc{rfc.rfc_number}" + + # List of files that might exist for an RFC + possible_rfc_files = [ + self._destination(dest_stem + ext) + for ext in serializer.allowed_extensions + ] + if not replace: + # this is the default: refuse to overwrite anything if not replacing + for possible_existing_file in possible_rfc_files: + if possible_existing_file.exists(): + raise Conflict( + "File(s) already exist for this RFC", + code="files-exist", + ) + + with TemporaryDirectory() as tempdir: + # Save files in a temporary directory. Use the uploaded filename + # extensions to identify files, but ignore the stems and generate our own. + files_to_move = [] # list[Path] + tmpfile_stem = Path(tempdir) / dest_stem + for upfile in uploaded_files: + uploaded_filename = Path(upfile.name) # name supplied by request + uploaded_ext = "".join(uploaded_filename.suffixes) + tempfile_path = tmpfile_stem.with_suffix(uploaded_ext) + with tempfile_path.open("wb") as dest: + for chunk in upfile.chunks(): + dest.write(chunk) + files_to_move.append(tempfile_path) + # copy files to final location, removing any existing ones first if the + # remove flag was set + if replace: + for possible_existing_file in possible_rfc_files: + possible_existing_file.unlink(missing_ok=True) + for ftm in files_to_move: + shutil.move(ftm, self._destination(ftm)) + # todo store in blob storage as well (need a bucket for RFCs) + + return Response(NotificationAckSerializer().data) diff --git a/ietf/blobdb/replication.py b/ietf/blobdb/replication.py index b9d55c9498..d251d3b95c 100644 --- a/ietf/blobdb/replication.py +++ b/ietf/blobdb/replication.py @@ -146,11 +146,11 @@ def replicate_blob(bucket, name): blob = fetch_blob_via_sql(bucket, name) if blob is None: if verbose_logging_enabled(): - log.log("Deleting {bucket}:{name} from replica") + log.log(f"Deleting {bucket}:{name} from replica") try: destination_storage.delete(name) except Exception as e: - log.log("Failed to delete {bucket}:{name} from replica: {e}") + log.log(f"Failed to delete {bucket}:{name} from replica: {e}") raise ReplicationError from e else: # Add metadata expected by the MetadataS3Storage @@ -170,7 +170,7 @@ def replicate_blob(bucket, name): try: destination_storage.save(name, file_with_metadata) except Exception as e: - log.log("Failed to save {bucket}:{name} to replica: {e}") + log.log(f"Failed to save {bucket}:{name} to replica: {e}") raise ReplicationError from e diff --git a/ietf/community/utils.py b/ietf/community/utils.py index f23e8d26ab..b6137095ef 100644 --- a/ietf/community/utils.py +++ b/ietf/community/utils.py @@ -72,8 +72,10 @@ def docs_matching_community_list_rule(rule): return docs.filter(group=rule.group_id) elif rule.rule_type.startswith("state_"): return docs - elif rule.rule_type in ["author", "author_rfc"]: + elif rule.rule_type == "author": return docs.filter(documentauthor__person=rule.person) + elif rule.rule_type == "author_rfc": + return docs.filter(Q(rfcauthor__person=rule.person)|Q(rfcauthor__isnull=True,documentauthor__person=rule.person)) elif rule.rule_type == "ad": return docs.filter(ad=rule.person) elif rule.rule_type == "shepherd": @@ -122,9 +124,16 @@ def community_list_rules_matching_doc(doc): # author rules if doc.type_id == "rfc": + has_rfcauthors = doc.rfcauthor_set.exists() rules |= SearchRule.objects.filter( rule_type="author_rfc", - person__in=list(Person.objects.filter(documentauthor__document=doc)), + person__in=list( + Person.objects.filter( + Q(rfcauthor__document=doc) + if has_rfcauthors + else Q(documentauthor__document=doc) + ) + ), ) else: rules |= SearchRule.objects.filter( diff --git a/ietf/doc/admin.py b/ietf/doc/admin.py index 8f26b222e1..f082418935 100644 --- a/ietf/doc/admin.py +++ b/ietf/doc/admin.py @@ -13,7 +13,8 @@ TelechatDocEvent, BallotPositionDocEvent, ReviewRequestDocEvent, InitialReviewDocEvent, AddedMessageEvent, SubmissionDocEvent, DeletedEvent, EditedAuthorsDocEvent, DocumentURL, ReviewAssignmentDocEvent, IanaExpertDocEvent, IRSGBallotDocEvent, DocExtResource, DocumentActionHolder, - BofreqEditorDocEvent, BofreqResponsibleDocEvent, StoredObject ) + BofreqEditorDocEvent, BofreqResponsibleDocEvent, StoredObject, RfcAuthor, + EditedRfcAuthorsDocEvent) from ietf.utils.admin import SaferTabularInline from ietf.utils.validators import validate_external_resource_value @@ -174,6 +175,7 @@ def short_desc(self, obj): admin.site.register(TelechatDocEvent, DocEventAdmin) admin.site.register(InitialReviewDocEvent, DocEventAdmin) admin.site.register(EditedAuthorsDocEvent, DocEventAdmin) +admin.site.register(EditedRfcAuthorsDocEvent, DocEventAdmin) admin.site.register(IanaExpertDocEvent, DocEventAdmin) class BallotPositionDocEventAdmin(DocEventAdmin): @@ -237,3 +239,9 @@ def is_deleted(self, instance): admin.site.register(StoredObject, StoredObjectAdmin) + +class RfcAuthorAdmin(admin.ModelAdmin): + list_display = ['id', 'document', 'titlepage_name', 'person', 'email', 'affiliation', 'country', 'order'] + search_fields = ['document__name', 'titlepage_name', 'person__name', 'email__address', 'affiliation', 'country'] + raw_id_fields = ["document", "person", "email"] +admin.site.register(RfcAuthor, RfcAuthorAdmin) diff --git a/ietf/doc/api.py b/ietf/doc/api.py new file mode 100644 index 0000000000..47e7e6fffd --- /dev/null +++ b/ietf/doc/api.py @@ -0,0 +1,194 @@ +# Copyright The IETF Trust 2024-2026, All Rights Reserved +"""Doc API implementations""" + +from django.db.models import OuterRef, Subquery, Prefetch, Value, JSONField, QuerySet +from django.db.models.functions import TruncDate +from django_filters import rest_framework as filters +from rest_framework import filters as drf_filters +from rest_framework.mixins import ListModelMixin, RetrieveModelMixin +from rest_framework.pagination import LimitOffsetPagination +from rest_framework.viewsets import GenericViewSet + +from ietf.group.models import Group +from ietf.name.models import StreamName, DocTypeName +from ietf.utils.timezone import RPC_TZINFO +from .models import ( + Document, + DocEvent, + RelatedDocument, + DocumentAuthor, + SUBSERIES_DOC_TYPE_IDS, +) +from .serializers import ( + RfcMetadataSerializer, + RfcStatus, + RfcSerializer, + SubseriesDocSerializer, +) + + +class RfcLimitOffsetPagination(LimitOffsetPagination): + default_limit = 10 + max_limit = 500 + + +class RfcFilter(filters.FilterSet): + published = filters.DateFromToRangeFilter() + stream = filters.ModelMultipleChoiceFilter( + queryset=StreamName.objects.filter(used=True) + ) + group = filters.ModelMultipleChoiceFilter( + queryset=Group.objects.wgs(), + field_name="group__acronym", + to_field_name="acronym", + ) + area = filters.ModelMultipleChoiceFilter( + queryset=Group.objects.areas(), + field_name="group__parent__acronym", + to_field_name="acronym", + ) + status = filters.MultipleChoiceFilter( + choices=[(slug, slug) for slug in RfcStatus.status_slugs], + method=RfcStatus.filter, + ) + sort = filters.OrderingFilter( + fields=( + ("rfc_number", "number"), # ?sort=number / ?sort=-number + ("published", "published"), # ?sort=published / ?sort=-published + ), + ) + + +class PrefetchRelatedDocument(Prefetch): + """Prefetch via a RelatedDocument + + Prefetches following RelatedDocument relationships to other docs. By default, includes + those for which the current RFC is the `source`. If `reverse` is True, includes those + for which it is the `target` instead. Defaults to only "rfc" documents. + """ + + @staticmethod + def _get_queryset(relationship_id, reverse, doc_type_ids): + """Get queryset to use for the prefetch""" + if isinstance(doc_type_ids, str): + doc_type_ids = (doc_type_ids,) + + return RelatedDocument.objects.filter( + **{ + "relationship_id": relationship_id, + f"{'source' if reverse else 'target'}__type_id__in": doc_type_ids, + } + ).select_related("source" if reverse else "target") + + def __init__(self, to_attr, relationship_id, reverse=False, doc_type_ids="rfc"): + super().__init__( + lookup="targets_related" if reverse else "relateddocument_set", + queryset=self._get_queryset(relationship_id, reverse, doc_type_ids), + to_attr=to_attr, + ) + + +def augment_rfc_queryset(queryset: QuerySet[Document]): + return ( + queryset.select_related("std_level", "stream") + .prefetch_related( + Prefetch( + "group", + Group.objects.select_related("parent"), + ), + Prefetch( + "documentauthor_set", + DocumentAuthor.objects.select_related("email", "person"), + ), + PrefetchRelatedDocument( + to_attr="drafts", + relationship_id="became_rfc", + doc_type_ids="draft", + reverse=True, + ), + PrefetchRelatedDocument(to_attr="obsoletes", relationship_id="obs"), + PrefetchRelatedDocument( + to_attr="obsoleted_by", relationship_id="obs", reverse=True + ), + PrefetchRelatedDocument(to_attr="updates", relationship_id="updates"), + PrefetchRelatedDocument( + to_attr="updated_by", relationship_id="updates", reverse=True + ), + PrefetchRelatedDocument( + to_attr="subseries", + relationship_id="contains", + reverse=True, + doc_type_ids=SUBSERIES_DOC_TYPE_IDS, + ), + ) + .annotate( + published_datetime=Subquery( + DocEvent.objects.filter( + doc_id=OuterRef("pk"), + type="published_rfc", + ) + .order_by("-time") + .values("time")[:1] + ), + ) + .annotate(published=TruncDate("published_datetime", tzinfo=RPC_TZINFO)) + .annotate( + # TODO implement these fake fields for real + see_also=Value([], output_field=JSONField()), + formats=Value(["txt", "xml"], output_field=JSONField()), + keywords=Value(["keyword"], output_field=JSONField()), + errata=Value([], output_field=JSONField()), + ) + ) + + +class RfcViewSet(ListModelMixin, RetrieveModelMixin, GenericViewSet): + api_key_endpoint = "ietf.api.red_api" # matches prefix in ietf/api/urls.py + lookup_field = "rfc_number" + queryset = augment_rfc_queryset( + Document.objects.filter(type_id="rfc", rfc_number__isnull=False) + ).order_by("-rfc_number") + + pagination_class = RfcLimitOffsetPagination + filter_backends = [filters.DjangoFilterBackend, drf_filters.SearchFilter] + filterset_class = RfcFilter + search_fields = ["title", "abstract"] + + def get_serializer_class(self): + if self.action == "retrieve": + return RfcSerializer + return RfcMetadataSerializer + + +class PrefetchSubseriesContents(Prefetch): + def __init__(self, to_attr): + super().__init__( + lookup="relateddocument_set", + queryset=RelatedDocument.objects.filter( + relationship_id="contains", + target__type_id="rfc", + ).prefetch_related( + Prefetch( + "target", + queryset=augment_rfc_queryset(Document.objects.all()), + ) + ), + to_attr=to_attr, + ) + + +class SubseriesFilter(filters.FilterSet): + type = filters.ModelMultipleChoiceFilter( + queryset=DocTypeName.objects.filter(pk__in=SUBSERIES_DOC_TYPE_IDS) + ) + + +class SubseriesViewSet(ListModelMixin, RetrieveModelMixin, GenericViewSet): + api_key_endpoint = "ietf.api.red_api" # matches prefix in ietf/api/urls.py + lookup_field = "name" + serializer_class = SubseriesDocSerializer + queryset = Document.objects.subseries_docs().prefetch_related( + PrefetchSubseriesContents(to_attr="contents") + ) + filter_backends = [filters.DjangoFilterBackend] + filterset_class = SubseriesFilter diff --git a/ietf/doc/factories.py b/ietf/doc/factories.py index 19aa9ecc9c..aad01be04f 100644 --- a/ietf/doc/factories.py +++ b/ietf/doc/factories.py @@ -14,7 +14,7 @@ from ietf.doc.models import ( Document, DocEvent, NewRevisionDocEvent, State, DocumentAuthor, StateDocEvent, BallotPositionDocEvent, BallotDocEvent, BallotType, IRSGBallotDocEvent, TelechatDocEvent, - DocumentActionHolder, BofreqEditorDocEvent, BofreqResponsibleDocEvent, DocExtResource ) + DocumentActionHolder, BofreqEditorDocEvent, BofreqResponsibleDocEvent, DocExtResource, RfcAuthor ) from ietf.group.models import Group from ietf.person.factories import PersonFactory from ietf.group.factories import RoleFactory @@ -382,6 +382,19 @@ class Meta: country = factory.Faker('country') order = factory.LazyAttribute(lambda o: o.document.documentauthor_set.count() + 1) +class RfcAuthorFactory(factory.django.DjangoModelFactory): + class Meta: + model = RfcAuthor + + document = factory.SubFactory(DocumentFactory) + titlepage_name = factory.LazyAttribute( + lambda obj: " ".join([obj.person.initials(), obj.person.last_name()]) + ) + person = factory.SubFactory('ietf.person.factories.PersonFactory') + email = factory.LazyAttribute(lambda obj: obj.person.email()) + affiliation = factory.Faker('company') + order = factory.LazyAttribute(lambda o: o.document.rfcauthor_set.count() + 1) + class WgDocumentAuthorFactory(DocumentAuthorFactory): document = factory.SubFactory(WgDraftFactory) diff --git a/ietf/doc/management/commands/reset_rfc_authors.py b/ietf/doc/management/commands/reset_rfc_authors.py deleted file mode 100644 index e2ab5f1208..0000000000 --- a/ietf/doc/management/commands/reset_rfc_authors.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright The IETF Trust 2024, All Rights Reserved - -# Reset an RFC's authors to those of the draft it came from -from django.core.management.base import BaseCommand, CommandError - -from ietf.doc.models import Document, DocEvent -from ietf.person.models import Person - - -class Command(BaseCommand): - def add_arguments(self, parser): - parser.add_argument("rfcnum", type=int, help="RFC number to modify") - parser.add_argument( - "--force", - action="store_true", - help="reset even if RFC already has authors", - ) - - def handle(self, *args, **options): - try: - rfc = Document.objects.get(type="rfc", rfc_number=options["rfcnum"]) - except Document.DoesNotExist: - raise CommandError( - f"rfc{options['rfcnum']} does not exist in the Datatracker." - ) - - draft = rfc.came_from_draft() - if draft is None: - raise CommandError(f"{rfc.name} did not come from a draft. Can't reset.") - - orig_authors = rfc.documentauthor_set.all() - if orig_authors.exists(): - # Potentially dangerous, so refuse unless "--force" is specified - if not options["force"]: - raise CommandError( - f"{rfc.name} already has authors. Not resetting. Use '--force' to reset anyway." - ) - removed_auth_names = list(orig_authors.values_list("person__name", flat=True)) - rfc.documentauthor_set.all().delete() - DocEvent.objects.create( - doc=rfc, - by=Person.objects.get(name="(System)"), - type="edited_authors", - desc=f"Removed all authors: {', '.join(removed_auth_names)}", - ) - self.stdout.write( - self.style.SUCCESS( - f"Removed author(s): {', '.join(removed_auth_names)}" - ) - ) - - for author in draft.documentauthor_set.all(): - # Copy the author but point at the new doc. - # See https://docs.djangoproject.com/en/4.2/topics/db/queries/#copying-model-instances - author.pk = None - author.id = None - author._state.adding = True - author.document = rfc - author.save() - self.stdout.write( - self.style.SUCCESS(f"Added author {author.person.name} <{author.email}>") - ) - auth_names = draft.documentauthor_set.values_list("person__name", flat=True) - DocEvent.objects.create( - doc=rfc, - by=Person.objects.get(name="(System)"), - type="edited_authors", - desc=f"Set authors from rev {draft.rev} of {draft.name}: {', '.join(auth_names)}", - ) diff --git a/ietf/doc/management/commands/tests.py b/ietf/doc/management/commands/tests.py deleted file mode 100644 index 8244d87266..0000000000 --- a/ietf/doc/management/commands/tests.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright The IETF Trust 2024, All Rights Reserved -# -*- coding: utf-8 -*- - -from io import StringIO - -from django.core.management import call_command, CommandError - -from ietf.doc.factories import DocumentAuthorFactory, WgDraftFactory, WgRfcFactory -from ietf.doc.models import Document, DocumentAuthor -from ietf.utils.test_utils import TestCase - - -class CommandTests(TestCase): - @staticmethod - def _call_command(command_name, *args, **options): - """Call command, capturing (and suppressing) output""" - out = StringIO() - err = StringIO() - options["stdout"] = out - options["stderr"] = err - call_command(command_name, *args, **options) - return out.getvalue(), err.getvalue() - - def test_reset_rfc_authors(self): - command_name = "reset_rfc_authors" - - draft = WgDraftFactory() - DocumentAuthorFactory.create_batch(3, document=draft) - rfc = WgRfcFactory() # rfc does not yet have a draft - DocumentAuthorFactory.create_batch(3, document=rfc) - bad_rfc_num = ( - 1 - + Document.objects.filter(rfc_number__isnull=False) - .order_by("-rfc_number") - .first() - .rfc_number - ) - docauthor_fields = [ - field.name - for field in DocumentAuthor._meta.get_fields() - if field.name not in ["document", "id"] - ] - - with self.assertRaises(CommandError, msg="Cannot reset a bad RFC number"): - self._call_command(command_name, bad_rfc_num) - - with self.assertRaises(CommandError, msg="Cannot reset an RFC with no draft"): - self._call_command(command_name, rfc.rfc_number) - - with self.assertRaises(CommandError, msg="Cannot force-reset an RFC with no draft"): - self._call_command(command_name, rfc.rfc_number, "--force") - - # Link the draft to the rfc - rfc.targets_related.create(relationship_id="became_rfc", source=draft) - - with self.assertRaises(CommandError, msg="Cannot reset an RFC with authors"): - self._call_command(command_name, rfc.rfc_number) - - # Calling with force should work - self._call_command(command_name, rfc.rfc_number, "--force") - self.assertCountEqual( - draft.documentauthor_set.values(*docauthor_fields), - rfc.documentauthor_set.values(*docauthor_fields), - ) - - # Calling on an RFC with no authors should also work - rfc.documentauthor_set.all().delete() - self._call_command(command_name, rfc.rfc_number) - self.assertCountEqual( - draft.documentauthor_set.values(*docauthor_fields), - rfc.documentauthor_set.values(*docauthor_fields), - ) diff --git a/ietf/doc/migrations/0027_alter_dochistory_title_alter_document_title.py b/ietf/doc/migrations/0027_alter_dochistory_title_alter_document_title.py new file mode 100644 index 0000000000..e0d8560e6f --- /dev/null +++ b/ietf/doc/migrations/0027_alter_dochistory_title_alter_document_title.py @@ -0,0 +1,41 @@ +# Copyright The IETF Trust 2025, All Rights Reserved + +import django.core.validators +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0026_change_wg_state_descriptions"), + ] + + operations = [ + migrations.AlterField( + model_name="dochistory", + name="title", + field=models.CharField( + max_length=255, + validators=[ + django.core.validators.ProhibitNullCharactersValidator, # type:ignore + django.core.validators.RegexValidator( + message="Please enter a string without control characters.", + regex="^[^\x01-\x1f]*$", + ), + ], + ), + ), + migrations.AlterField( + model_name="document", + name="title", + field=models.CharField( + max_length=255, + validators=[ + django.core.validators.ProhibitNullCharactersValidator, # type:ignore + django.core.validators.RegexValidator( + message="Please enter a string without control characters.", + regex="^[^\x01-\x1f]*$", + ), + ], + ), + ), + ] diff --git a/ietf/doc/migrations/0028_rfcauthor.py b/ietf/doc/migrations/0028_rfcauthor.py new file mode 100644 index 0000000000..776dc22eb1 --- /dev/null +++ b/ietf/doc/migrations/0028_rfcauthor.py @@ -0,0 +1,84 @@ +# Copyright The IETF Trust 2025, All Rights Reserved + +from django.db import migrations, models +import django.db.models.deletion +import ietf.utils.models + + +class Migration(migrations.Migration): + dependencies = [ + ("person", "0005_alter_historicalperson_pronouns_selectable_and_more"), + ("doc", "0027_alter_dochistory_title_alter_document_title"), + ] + + operations = [ + migrations.CreateModel( + name="RfcAuthor", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("titlepage_name", models.CharField(max_length=128)), + ("is_editor", models.BooleanField(default=False)), + ( + "affiliation", + models.CharField( + blank=True, + help_text="Organization/company used by author for submission", + max_length=100, + ), + ), + ( + "country", + models.CharField( + blank=True, + help_text="Country used by author for submission", + max_length=255, + ), + ), + ("order", models.IntegerField(default=1)), + ( + "document", + ietf.utils.models.ForeignKey( + limit_choices_to={"type_id": "rfc"}, + on_delete=django.db.models.deletion.CASCADE, + to="doc.document", + ), + ), + ( + "email", + ietf.utils.models.ForeignKey( + blank=True, + help_text="Email address used by author for submission", + null=True, + on_delete=django.db.models.deletion.PROTECT, + to="person.email", + ), + ), + ( + "person", + ietf.utils.models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.PROTECT, + to="person.person", + ), + ), + ], + options={ + "ordering": ["document", "order"], + "indexes": [ + models.Index( + fields=["document", "order"], + name="doc_rfcauth_documen_6b5dc4_idx", + ) + ], + }, + ), + ] diff --git a/ietf/doc/migrations/0029_editedrfcauthorsdocevent.py b/ietf/doc/migrations/0029_editedrfcauthorsdocevent.py new file mode 100644 index 0000000000..60837c5cb2 --- /dev/null +++ b/ietf/doc/migrations/0029_editedrfcauthorsdocevent.py @@ -0,0 +1,30 @@ +# Copyright The IETF Trust 2025, All Rights Reserved + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0028_rfcauthor"), + ] + + operations = [ + migrations.CreateModel( + name="EditedRfcAuthorsDocEvent", + fields=[ + ( + "docevent_ptr", + models.OneToOneField( + auto_created=True, + on_delete=django.db.models.deletion.CASCADE, + parent_link=True, + primary_key=True, + serialize=False, + to="doc.docevent", + ), + ), + ], + bases=("doc.docevent",), + ), + ] diff --git a/ietf/doc/migrations/0030_alter_dochistory_title_alter_document_title.py b/ietf/doc/migrations/0030_alter_dochistory_title_alter_document_title.py new file mode 100644 index 0000000000..9ee858b2e8 --- /dev/null +++ b/ietf/doc/migrations/0030_alter_dochistory_title_alter_document_title.py @@ -0,0 +1,41 @@ +# Copyright The IETF Trust 2026, All Rights Reserved + +import django.core.validators +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0029_editedrfcauthorsdocevent"), + ] + + operations = [ + migrations.AlterField( + model_name="dochistory", + name="title", + field=models.CharField( + max_length=255, + validators=[ + django.core.validators.ProhibitNullCharactersValidator(), + django.core.validators.RegexValidator( + message="Please enter a string without control characters.", + regex="^[^\x01-\x1f]*$", + ), + ], + ), + ), + migrations.AlterField( + model_name="document", + name="title", + field=models.CharField( + max_length=255, + validators=[ + django.core.validators.ProhibitNullCharactersValidator(), + django.core.validators.RegexValidator( + message="Please enter a string without control characters.", + regex="^[^\x01-\x1f]*$", + ), + ], + ), + ), + ] diff --git a/ietf/doc/models.py b/ietf/doc/models.py index 8bb79b64ed..cce9203d09 100644 --- a/ietf/doc/models.py +++ b/ietf/doc/models.py @@ -1,7 +1,8 @@ -# Copyright The IETF Trust 2010-2025, All Rights Reserved +# Copyright The IETF Trust 2010-2026, All Rights Reserved # -*- coding: utf-8 -*- +from collections import namedtuple import datetime import logging import os @@ -20,7 +21,11 @@ from django.core import checks from django.core.files.base import File from django.core.cache import caches -from django.core.validators import URLValidator, RegexValidator +from django.core.validators import ( + URLValidator, + RegexValidator, + ProhibitNullCharactersValidator, +) from django.urls import reverse as urlreverse from django.contrib.contenttypes.models import ContentType from django.conf import settings @@ -107,7 +112,13 @@ class DocumentInfo(models.Model): time = models.DateTimeField(default=timezone.now) # should probably have auto_now=True type = ForeignKey(DocTypeName, blank=True, null=True) # Draft, Agenda, Minutes, Charter, Discuss, Guideline, Email, Review, Issue, Wiki, External ... - title = models.CharField(max_length=255, validators=[validate_no_control_chars, ]) + title = models.CharField( + max_length=255, + validators=[ + ProhibitNullCharactersValidator(), + validate_no_control_chars, + ], + ) states = models.ManyToManyField(State, blank=True) # plain state (Active/Expired/...), IESG state, stream state tags = models.ManyToManyField(DocTagName, blank=True) # Revised ID Needed, ExternalParty, AD Followup, ... @@ -407,9 +418,55 @@ def friendly_state(self): else: return state.name + def author_names(self): + """Author names as a list of strings""" + names = [] + if self.type_id == "rfc" and self.rfcauthor_set.exists(): + for author in self.rfcauthor_set.select_related("person"): + if author.person: + names.append(author.person.name) + else: + # titlepage_name cannot be blank + names.append(author.titlepage_name) + else: + names = [ + author.person.name + for author in self.documentauthor_set.select_related("person") + ] + return names + + def author_persons_or_names(self): + """Authors as a list of named tuples with person and/or titlepage_name""" + Author = namedtuple("Author", "person titlepage_name") + persons_or_names = [] + if self.type_id=="rfc" and self.rfcauthor_set.exists(): + for author in self.rfcauthor_set.select_related("person"): + persons_or_names.append(Author(person=author.person, titlepage_name=author.titlepage_name)) + else: + for author in self.documentauthor_set.select_related("person"): + persons_or_names.append(Author(person=author.person, titlepage_name="")) + return persons_or_names + + def author_persons(self): + """Authors as a list of Persons + + Omits any RfcAuthors with a null person field. + """ + if self.type_id == "rfc" and self.rfcauthor_set.exists(): + authors_qs = self.rfcauthor_set.filter(person__isnull=False) + else: + authors_qs = self.documentauthor_set.all() + return [a.person for a in authors_qs.select_related("person")] + def author_list(self): + """List of author emails""" + author_qs = ( + self.rfcauthor_set + if self.type_id == "rfc" and self.rfcauthor_set.exists() + else self.documentauthor_set + ).select_related("email").order_by("order") best_addresses = [] - for author in self.documentauthor_set.all(): + for author in author_qs: if author.email: if author.email.active or not author.email.person: best_addresses.append(author.email.address) @@ -417,9 +474,6 @@ def author_list(self): best_addresses.append(author.email.person.email_address()) return ", ".join(best_addresses) - def authors(self): - return [ a.person for a in self.documentauthor_set.all() ] - # This, and several other ballot related functions here, assume that there is only one active ballot for a document at any point in time. # If that assumption is violated, they will only expose the most recently created ballot def ballot_open(self, ballot_type_slug): @@ -721,7 +775,14 @@ def referenced_by_rfcs_as_rfc_or_draft(self): if self.type_id == "rfc" and self.came_from_draft(): refs_to |= self.came_from_draft().referenced_by_rfcs() return refs_to - + + def sent_to_rfc_editor_event(self): + if self.stream_id == "ietf": + return self.docevent_set.filter(type="iesg_approved").order_by("-time").first() + elif self.stream_id in ["editorial", "iab", "irtf", "ise"]: + return self.docevent_set.filter(type="requested_publication").order_by("-time").first() + else: + return None class Meta: abstract = True @@ -845,6 +906,45 @@ def is_approved_downref(self): return False +class RfcAuthor(models.Model): + """Captures the authors of an RFC as represented on the RFC title page. + + This deviates from DocumentAuthor in that it does not get moved into the DocHistory + hierarchy as documents are saved. It will attempt to preserve email, country, and affiliation + from the DocumentAuthor objects associated with the draft leading to this RFC (which + may be wrong if the author moves or changes affiliation while the document is in the + queue). + + It does not, at this time, attempt to capture the authors from anything _but_ the title + page. The datatracker may know more about such authors based on information from the draft + leading to the RFC, and future work may take that into account. + + Once doc.rfcauthor_set.exists() for a doc of type `rfc`, doc.documentauthor_set should be + ignored. + """ + + document = ForeignKey( + "Document", + on_delete=models.CASCADE, + limit_choices_to={"type_id": "rfc"}, # only affects ModelForms (e.g., admin) + ) + titlepage_name = models.CharField(max_length=128, blank=False) + is_editor = models.BooleanField(default=False) + person = ForeignKey(Person, null=True, blank=True, on_delete=models.PROTECT) + email = ForeignKey(Email, help_text="Email address used by author for submission", blank=True, null=True, on_delete=models.PROTECT) + affiliation = models.CharField(max_length=100, blank=True, help_text="Organization/company used by author for submission") + country = models.CharField(max_length=255, blank=True, help_text="Country used by author for submission") + order = models.IntegerField(default=1) + + def __str__(self): + return u"%s %s (%s)" % (self.document.name, self.person, self.order) + + class Meta: + ordering=["document", "order"] + indexes=[ + models.Index(fields=["document", "order"]) + ] + class DocumentAuthorInfo(models.Model): person = ForeignKey(Person) # email should only be null for some historic documents @@ -894,7 +994,7 @@ class Meta: def role_for_doc(self): """Brief string description of this person's relationship to the doc""" roles = [] - if self.person in self.document.authors(): + if self.person in self.document.author_persons(): roles.append('Author') if self.person == self.document.ad: roles.append('Responsible AD') @@ -920,7 +1020,18 @@ def role_for_doc(self): 'invalid' ) + +SUBSERIES_DOC_TYPE_IDS = ("bcp", "fyi", "std") + + +class DocumentQuerySet(models.QuerySet): + def subseries_docs(self): + return self.filter(type_id__in=SUBSERIES_DOC_TYPE_IDS) + + class Document(StorableMixin, DocumentInfo): + objects = DocumentQuerySet.as_manager() + name = models.CharField(max_length=255, validators=[validate_docname,], unique=True) # immutable action_holders = models.ManyToManyField(Person, through=DocumentActionHolder, blank=True) @@ -1581,6 +1692,11 @@ class EditedAuthorsDocEvent(DocEvent): """ basis = models.CharField(help_text="What is the source or reasoning for the changes to the author list",max_length=255) + +class EditedRfcAuthorsDocEvent(DocEvent): + """Change to the RfcAuthor list for a document""" + + class BofreqEditorDocEvent(DocEvent): """ Capture the proponents of a BOF Request.""" editors = models.ManyToManyField('person.Person', blank=True) diff --git a/ietf/doc/resources.py b/ietf/doc/resources.py index 157a3ad556..556465a522 100644 --- a/ietf/doc/resources.py +++ b/ietf/doc/resources.py @@ -17,8 +17,9 @@ InitialReviewDocEvent, DocHistoryAuthor, BallotDocEvent, RelatedDocument, RelatedDocHistory, BallotPositionDocEvent, AddedMessageEvent, SubmissionDocEvent, ReviewRequestDocEvent, ReviewAssignmentDocEvent, EditedAuthorsDocEvent, DocumentURL, - IanaExpertDocEvent, IRSGBallotDocEvent, DocExtResource, DocumentActionHolder, - BofreqEditorDocEvent, BofreqResponsibleDocEvent, StoredObject) + IanaExpertDocEvent, IRSGBallotDocEvent, DocExtResource, DocumentActionHolder, + BofreqEditorDocEvent, BofreqResponsibleDocEvent, StoredObject, RfcAuthor, + EditedRfcAuthorsDocEvent) from ietf.name.resources import BallotPositionNameResource, DocTypeNameResource class BallotTypeResource(ModelResource): @@ -650,6 +651,31 @@ class Meta: api.doc.register(EditedAuthorsDocEventResource()) + +from ietf.person.resources import PersonResource +class EditedRfcAuthorsDocEventResource(ModelResource): + by = ToOneField(PersonResource, 'by') + doc = ToOneField(DocumentResource, 'doc') + docevent_ptr = ToOneField(DocEventResource, 'docevent_ptr') + class Meta: + queryset = EditedRfcAuthorsDocEvent.objects.all() + serializer = api.Serializer() + cache = SimpleCache() + #resource_name = 'editedrfcauthorsdocevent' + ordering = ['id', ] + filtering = { + "id": ALL, + "time": ALL, + "type": ALL, + "rev": ALL, + "desc": ALL, + "by": ALL_WITH_RELATIONS, + "doc": ALL_WITH_RELATIONS, + "docevent_ptr": ALL_WITH_RELATIONS, + } +api.doc.register(EditedRfcAuthorsDocEventResource()) + + from ietf.name.resources import DocUrlTagNameResource class DocumentURLResource(ModelResource): doc = ToOneField(DocumentResource, 'doc') @@ -865,3 +891,28 @@ class Meta: "deleted": ALL, } api.doc.register(StoredObjectResource()) + + +from ietf.person.resources import EmailResource, PersonResource +class RfcAuthorResource(ModelResource): + document = ToOneField(DocumentResource, 'document') + person = ToOneField(PersonResource, 'person', null=True) + email = ToOneField(EmailResource, 'email', null=True) + class Meta: + queryset = RfcAuthor.objects.all() + serializer = api.Serializer() + cache = SimpleCache() + #resource_name = 'rfcauthor' + ordering = ['id', ] + filtering = { + "id": ALL, + "titlepage_name": ALL, + "is_editor": ALL, + "affiliation": ALL, + "country": ALL, + "order": ALL, + "document": ALL_WITH_RELATIONS, + "person": ALL_WITH_RELATIONS, + "email": ALL_WITH_RELATIONS, + } +api.doc.register(RfcAuthorResource()) diff --git a/ietf/doc/serializers.py b/ietf/doc/serializers.py new file mode 100644 index 0000000000..05647d9ce1 --- /dev/null +++ b/ietf/doc/serializers.py @@ -0,0 +1,316 @@ +# Copyright The IETF Trust 2024-2026, All Rights Reserved +"""django-rest-framework serializers""" + +from dataclasses import dataclass +from typing import Literal, ClassVar + +from django.db.models.manager import BaseManager +from django.db.models.query import QuerySet +from drf_spectacular.utils import extend_schema_field +from rest_framework import serializers + +from ietf.group.serializers import GroupSerializer +from ietf.name.serializers import StreamNameSerializer +from .models import Document, DocumentAuthor, RfcAuthor + + +class RfcAuthorSerializer(serializers.ModelSerializer): + """Serializer for an RfcAuthor / DocumentAuthor in a response""" + datatracker_person_path = serializers.URLField( + source="person.get_absolute_url", + required=False, + help_text="URL for person link (relative to datatracker base URL)", + ) + + class Meta: + model = RfcAuthor + fields = [ + "titlepage_name", + "is_editor", + "person", + "email", # relies on email.pk being email.address + "affiliation", + "country", + "datatracker_person_path", + ] + + def to_representation(self, instance): + """instance -> primitive data types + + Translates a DocumentAuthor into an equivalent RfcAuthor we can use the same + serializer for either type. + """ + if isinstance(instance, DocumentAuthor): + # create a non-persisted RfcAuthor as a shim - do not save it! + document_author = instance + instance = RfcAuthor( + titlepage_name=document_author.person.plain_name(), + is_editor=False, + person=document_author.person, + email=document_author.email, + affiliation=document_author.affiliation, + country=document_author.country, + order=document_author.order, + ) + return super().to_representation(instance) + + def validate(self, data): + email = data.get("email") + if email is not None: + person = data.get("person") + if person is None: + raise serializers.ValidationError( + { + "email": "cannot have an email without a person", + }, + code="email-without-person", + ) + if email.person_id != person.pk: + raise serializers.ValidationError( + { + "email": "email must belong to person", + }, + code="email-person-mismatch", + ) + return data + + +@dataclass +class DocIdentifier: + type: Literal["doi", "issn"] + value: str + + +class DocIdentifierSerializer(serializers.Serializer): + type = serializers.ChoiceField(choices=["doi", "issn"]) + value = serializers.CharField() + + +type RfcStatusSlugT = Literal[ + "std", "ps", "ds", "bcp", "inf", "exp", "hist", "unkn", "not-issued", +] + + +@dataclass +class RfcStatus: + """Helper to extract the 'Status' from an RFC document for serialization""" + + slug: RfcStatusSlugT + + # Names that aren't just the slug itself. ClassVar annotation prevents dataclass from treating this as a field. + fancy_names: ClassVar[dict[RfcStatusSlugT, str]] = { + "std": "internet standard", + "ps": "proposed standard", + "ds": "draft standard", + "bcp": "best current practice", + "inf": "informational", + "exp": "experimental", + "hist": "historic", + "unkn": "unknown", + } + + # ClassVar annotation prevents dataclass from treating this as a field + stdlevelname_slug_map: ClassVar[dict[str, RfcStatusSlugT]] = { + "bcp": "bcp", + "ds": "ds", + "exp": "exp", + "hist": "hist", + "inf": "inf", + "std": "std", + "ps": "ps", + "unkn": "unkn", + } + + # ClassVar annotation prevents dataclass from treating this as a field + status_slugs: ClassVar[list[RfcStatusSlugT]] = sorted( + # TODO implement "not-issued" RFCs + set(stdlevelname_slug_map.values()) | {"not-issued"} + ) + + @property + def name(self): + return RfcStatus.fancy_names.get(self.slug, self.slug) + + @classmethod + def from_document(cls, doc: Document): + """Decide the status that applies to a document""" + return cls( + slug=(cls.stdlevelname_slug_map.get(doc.std_level.slug, "unkn")), + ) + + @classmethod + def filter(cls, queryset, name, value: list[RfcStatusSlugT]): + """Filter a queryset by status + + This is basically the inverse of the from_document() method. Given a status name, filter + the queryset to those in that status. The queryset should be a Document queryset. + """ + interesting_slugs = [ + stdlevelname_slug + for stdlevelname_slug, status_slug in cls.stdlevelname_slug_map.items() + if status_slug in value + ] + if len(interesting_slugs) == 0: + return queryset.none() + return queryset.filter(std_level__slug__in=interesting_slugs) + + +class RfcStatusSerializer(serializers.Serializer): + """Status serializer for a Document instance""" + + slug = serializers.ChoiceField(choices=RfcStatus.status_slugs) + name = serializers.CharField() + + def to_representation(self, instance: Document): + return super().to_representation(instance=RfcStatus.from_document(instance)) + + +class RelatedDraftSerializer(serializers.Serializer): + id = serializers.IntegerField(source="source.id") + name = serializers.CharField(source="source.name") + title = serializers.CharField(source="source.title") + + +class RelatedRfcSerializer(serializers.Serializer): + id = serializers.IntegerField(source="target.id") + number = serializers.IntegerField(source="target.rfc_number") + title = serializers.CharField(source="target.title") + + +class ReverseRelatedRfcSerializer(serializers.Serializer): + id = serializers.IntegerField(source="source.id") + number = serializers.IntegerField(source="source.rfc_number") + title = serializers.CharField(source="source.title") + + +class ContainingSubseriesSerializer(serializers.Serializer): + name = serializers.CharField(source="source.name") + type = serializers.CharField(source="source.type_id") + + +class RfcMetadataSerializer(serializers.ModelSerializer): + """Serialize metadata of an RFC""" + + RFC_FORMATS = ("xml", "txt", "html", "htmlized", "pdf", "ps") + + number = serializers.IntegerField(source="rfc_number") + published = serializers.DateField() + status = RfcStatusSerializer(source="*") + authors = serializers.SerializerMethodField() + group = GroupSerializer() + area = GroupSerializer(source="group.area", required=False) + stream = StreamNameSerializer() + identifiers = serializers.SerializerMethodField() + draft = serializers.SerializerMethodField() + obsoletes = RelatedRfcSerializer(many=True, read_only=True) + obsoleted_by = ReverseRelatedRfcSerializer(many=True, read_only=True) + updates = RelatedRfcSerializer(many=True, read_only=True) + updated_by = ReverseRelatedRfcSerializer(many=True, read_only=True) + subseries = ContainingSubseriesSerializer(many=True, read_only=True) + see_also = serializers.ListField(child=serializers.CharField(), read_only=True) + formats = serializers.MultipleChoiceField(choices=RFC_FORMATS) + keywords = serializers.ListField(child=serializers.CharField(), read_only=True) + errata = serializers.ListField(child=serializers.CharField(), read_only=True) + + class Meta: + model = Document + fields = [ + "number", + "title", + "published", + "status", + "pages", + "authors", + "group", + "area", + "stream", + "identifiers", + "obsoletes", + "obsoleted_by", + "updates", + "updated_by", + "subseries", + "see_also", + "draft", + "abstract", + "formats", + "keywords", + "errata", + ] + + + @extend_schema_field(RfcAuthorSerializer(many=True)) + def get_authors(self, doc: Document): + # If doc has any RfcAuthors, use those, otherwise fall back to DocumentAuthors + author_queryset: QuerySet[RfcAuthor] | QuerySet[DocumentAuthor] = ( + doc.rfcauthor_set.all() + if doc.rfcauthor_set.exists() + else doc.documentauthor_set.all() + ) + # RfcAuthorSerializer can deal with DocumentAuthor instances + return RfcAuthorSerializer( + instance=author_queryset, + many=True, + ).data + + @extend_schema_field(DocIdentifierSerializer(many=True)) + def get_identifiers(self, doc: Document): + identifiers = [] + if doc.rfc_number: + identifiers.append( + DocIdentifier(type="doi", value=f"10.17487/RFC{doc.rfc_number:04d}") + ) + return DocIdentifierSerializer(instance=identifiers, many=True).data + + @extend_schema_field(RelatedDraftSerializer) + def get_draft(self, object): + try: + related_doc = object.drafts[0] + except IndexError: + return None + return RelatedDraftSerializer(related_doc).data + + +class RfcSerializer(RfcMetadataSerializer): + """Serialize an RFC, including its metadata and text content if available""" + + text = serializers.CharField(allow_null=True) + + class Meta: + model = RfcMetadataSerializer.Meta.model + fields = RfcMetadataSerializer.Meta.fields + ["text"] + + +class SubseriesContentListSerializer(serializers.ListSerializer): + """ListSerializer that gets its object from item.target""" + + def to_representation(self, data): + """ + List of object instances -> List of dicts of primitive datatypes. + """ + # Dealing with nested relationships, data can be a Manager, + # so, first get a queryset from the Manager if needed + iterable = data.all() if isinstance(data, BaseManager) else data + # Serialize item.target instead of item itself + return [self.child.to_representation(item.target) for item in iterable] + + +class SubseriesContentSerializer(RfcMetadataSerializer): + """Serialize RFC contained in a subseries doc""" + + class Meta(RfcMetadataSerializer.Meta): + list_serializer_class = SubseriesContentListSerializer + + +class SubseriesDocSerializer(serializers.ModelSerializer): + """Serialize a subseries document (e.g., a BCP or STD)""" + + contents = SubseriesContentSerializer(many=True) + + class Meta: + model = Document + fields = [ + "name", + "type", + "contents", + ] diff --git a/ietf/doc/tests.py b/ietf/doc/tests.py index 16dcfb7754..f92c9648e6 100644 --- a/ietf/doc/tests.py +++ b/ietf/doc/tests.py @@ -39,11 +39,15 @@ from ietf.doc.models import ( Document, DocRelationshipName, RelatedDocument, State, DocEvent, BallotPositionDocEvent, LastCallDocEvent, WriteupDocEvent, NewRevisionDocEvent, BallotType, EditedAuthorsDocEvent, StateType) -from ietf.doc.factories import ( DocumentFactory, DocEventFactory, CharterFactory, - ConflictReviewFactory, WgDraftFactory, IndividualDraftFactory, WgRfcFactory, - IndividualRfcFactory, StateDocEventFactory, BallotPositionDocEventFactory, - BallotDocEventFactory, DocumentAuthorFactory, NewRevisionDocEventFactory, - StatusChangeFactory, DocExtResourceFactory, RgDraftFactory, BcpFactory) +from ietf.doc.factories import (DocumentFactory, DocEventFactory, CharterFactory, + ConflictReviewFactory, WgDraftFactory, + IndividualDraftFactory, WgRfcFactory, + IndividualRfcFactory, StateDocEventFactory, + BallotPositionDocEventFactory, + BallotDocEventFactory, DocumentAuthorFactory, + NewRevisionDocEventFactory, + StatusChangeFactory, DocExtResourceFactory, + RgDraftFactory, BcpFactory, RfcAuthorFactory) from ietf.doc.forms import NotifyForm from ietf.doc.fields import SearchableDocumentsField from ietf.doc.utils import ( @@ -979,7 +983,7 @@ def test_edit_authors_permissions(self): # Relevant users not authorized to edit authors unauthorized_usernames = [ 'plain', - *[author.user.username for author in draft.authors()], + *[author.user.username for author in draft.author_persons()], draft.group.get_chair().person.user.username, 'ad' ] @@ -994,7 +998,7 @@ def test_edit_authors_permissions(self): self.client.logout() # Try to add an author via POST - still only the secretary should be able to do this. - orig_authors = draft.authors() + orig_authors = draft.author_persons() post_data = self.make_edit_authors_post_data( basis='permission test', authors=draft.documentauthor_set.all(), @@ -1012,12 +1016,12 @@ def test_edit_authors_permissions(self): for username in unauthorized_usernames: login_testing_unauthorized(self, username, url, method='post', request_kwargs=dict(data=post_data)) draft = Document.objects.get(pk=draft.pk) - self.assertEqual(draft.authors(), orig_authors) # ensure draft author list was not modified + self.assertEqual(draft.author_persons(), orig_authors) # ensure draft author list was not modified login_testing_unauthorized(self, 'secretary', url, method='post', request_kwargs=dict(data=post_data)) r = self.client.post(url, post_data) self.assertEqual(r.status_code, 302) draft = Document.objects.get(pk=draft.pk) - self.assertEqual(draft.authors(), orig_authors + [new_auth_person]) + self.assertEqual(draft.author_persons(), orig_authors + [new_auth_person]) def make_edit_authors_post_data(self, basis, authors): """Helper to generate edit_authors POST data for a set of authors""" @@ -1365,8 +1369,8 @@ def test_edit_authors_edit_fields(self): basis=change_reason ) - old_address = draft.authors()[0].email() - new_email = EmailFactory(person=draft.authors()[0], address=f'changed-{old_address}') + old_address = draft.author_persons()[0].email() + new_email = EmailFactory(person=draft.author_persons()[0], address=f'changed-{old_address}') post_data['author-0-email'] = new_email.address post_data['author-1-affiliation'] = 'University of Nowhere' post_data['author-2-country'] = 'Chile' @@ -1399,17 +1403,17 @@ def test_edit_authors_edit_fields(self): country_event = change_events.filter(desc__icontains='changed country').first() self.assertIsNotNone(email_event) - self.assertIn(draft.authors()[0].name, email_event.desc) + self.assertIn(draft.author_persons()[0].name, email_event.desc) self.assertIn(before[0]['email'], email_event.desc) self.assertIn(after[0]['email'], email_event.desc) self.assertIsNotNone(affiliation_event) - self.assertIn(draft.authors()[1].name, affiliation_event.desc) + self.assertIn(draft.author_persons()[1].name, affiliation_event.desc) self.assertIn(before[1]['affiliation'], affiliation_event.desc) self.assertIn(after[1]['affiliation'], affiliation_event.desc) self.assertIsNotNone(country_event) - self.assertIn(draft.authors()[2].name, country_event.desc) + self.assertIn(draft.author_persons()[2].name, country_event.desc) self.assertIn(before[2]['country'], country_event.desc) self.assertIn(after[2]['country'], country_event.desc) @@ -1863,13 +1867,63 @@ def test_document_ballot_needed_positions(self): def test_document_json(self): doc = IndividualDraftFactory() - + author = DocumentAuthorFactory(document=doc) + r = self.client.get(urlreverse("ietf.doc.views_doc.document_json", kwargs=dict(name=doc.name))) self.assertEqual(r.status_code, 200) data = r.json() - self.assertEqual(doc.name, data['name']) - self.assertEqual(doc.pages,data['pages']) + self.assertEqual(data["name"], doc.name) + self.assertEqual(data["pages"], doc.pages) + self.assertEqual( + data["authors"], + [ + { + "name": author.person.name, + "email": author.email.address, + "affiliation": author.affiliation, + } + ] + ) + def test_document_json_rfc(self): + doc = IndividualRfcFactory() + old_style_author = DocumentAuthorFactory(document=doc) + url = urlreverse("ietf.doc.views_doc.document_json", kwargs=dict(name=doc.name)) + + r = self.client.get(url) + self.assertEqual(r.status_code, 200) + data = r.json() + self.assertEqual(data["name"], doc.name) + self.assertEqual(data["pages"], doc.pages) + self.assertEqual( + data["authors"], + [ + { + "name": old_style_author.person.name, + "email": old_style_author.email.address, + "affiliation": old_style_author.affiliation, + } + ] + ) + + new_style_author = RfcAuthorFactory(document=doc) + r = self.client.get(url) + self.assertEqual(r.status_code, 200) + data = r.json() + self.assertEqual(data["name"], doc.name) + self.assertEqual(data["pages"], doc.pages) + self.assertEqual( + data["authors"], + [ + { + "name": new_style_author.titlepage_name, + "email": new_style_author.email.address, + "affiliation": new_style_author.affiliation, + } + ] + ) + + def test_writeup(self): doc = IndividualDraftFactory(states = [('draft','active'),('draft-iesg','iesg-eva')],) diff --git a/ietf/doc/tests_draft.py b/ietf/doc/tests_draft.py index cebeac1f27..21a873c5c0 100644 --- a/ietf/doc/tests_draft.py +++ b/ietf/doc/tests_draft.py @@ -140,7 +140,7 @@ def test_change_state(self): self.assertEqual(draft.get_state_slug("draft-iesg"), "review-e") self.assertTrue(not draft.tags.filter(slug="ad-f-up")) self.assertTrue(draft.tags.filter(slug="need-rev")) - self.assertCountEqual(draft.action_holders.all(), [ad] + draft.authors()) + self.assertCountEqual(draft.action_holders.all(), [ad] + draft.author_persons()) self.assertEqual(draft.docevent_set.count(), events_before + 3) self.assertTrue("Test comment" in draft.docevent_set.all()[0].desc) self.assertTrue("Changed action holders" in draft.docevent_set.all()[1].desc) @@ -179,7 +179,7 @@ def test_pull_from_rfc_queue(self): states=[('draft-iesg','rfcqueue')], ) DocEventFactory(type='started_iesg_process',by=ad,doc=draft,rev=draft.rev,desc="Started IESG Process") - draft.action_holders.add(*(draft.authors())) + draft.action_holders.add(*(draft.author_persons())) url = urlreverse('ietf.doc.views_draft.change_state', kwargs=dict(name=draft.name)) login_testing_unauthorized(self, "secretary", url) @@ -279,7 +279,7 @@ def test_request_last_call(self): states=[('draft-iesg','ad-eval')], ) DocEventFactory(type='started_iesg_process',by=ad,doc=draft,rev=draft.rev,desc="Started IESG Process") - draft.action_holders.add(*(draft.authors())) + draft.action_holders.add(*(draft.author_persons())) self.client.login(username="secretary", password="secretary+password") url = urlreverse('ietf.doc.views_draft.change_state', kwargs=dict(name=draft.name)) @@ -1369,7 +1369,7 @@ def _test_changing_ah(action_holders, reason): _test_changing_ah([doc.ad, doc.shepherd.person], 'this is a first test') _test_changing_ah([doc.ad], 'this is a second test') - _test_changing_ah(doc.authors(), 'authors can do it, too') + _test_changing_ah(doc.author_persons(), 'authors can do it, too') _test_changing_ah([], 'clear it back out') def test_doc_change_action_holders_as_doc_manager(self): diff --git a/ietf/doc/tests_review.py b/ietf/doc/tests_review.py index 8c1fc99ffe..82d1b5c232 100644 --- a/ietf/doc/tests_review.py +++ b/ietf/doc/tests_review.py @@ -822,7 +822,7 @@ def test_complete_review_upload_content(self): r = self.client.get(url) self.assertEqual(r.status_code, 200) self.assertContains(r, assignment.review_request.team.list_email) - for author in assignment.review_request.doc.authors(): + for author in assignment.review_request.doc.author_persons(): self.assertContains(r, author.formatted_email()) # faulty post diff --git a/ietf/doc/utils.py b/ietf/doc/utils.py index 2bd9a3d314..0715471551 100644 --- a/ietf/doc/utils.py +++ b/ietf/doc/utils.py @@ -13,7 +13,7 @@ from dataclasses import dataclass from hashlib import sha384 from pathlib import Path -from typing import Iterator, Optional, Union +from typing import Iterator, Optional, Union, Iterable from zoneinfo import ZoneInfo from django.conf import settings @@ -33,7 +33,14 @@ from ietf.community.models import CommunityList from ietf.community.utils import docs_tracked_by_community_list -from ietf.doc.models import Document, DocHistory, State, DocumentAuthor, DocHistoryAuthor +from ietf.doc.models import ( + DocHistory, + DocHistoryAuthor, + Document, + DocumentAuthor, + RfcAuthor, + State, EditedRfcAuthorsDocEvent, +) from ietf.doc.models import RelatedDocument, RelatedDocHistory, BallotType, DocReminder from ietf.doc.models import DocEvent, ConsensusDocEvent, BallotDocEvent, IRSGBallotDocEvent, NewRevisionDocEvent, StateDocEvent from ietf.doc.models import TelechatDocEvent, DocumentActionHolder, EditedAuthorsDocEvent, BallotPositionDocEvent @@ -534,7 +541,7 @@ def update_action_holders(doc, prev_state=None, new_state=None, prev_tags=None, doc.action_holders.clear() if tags.removed("need-rev"): # Removed the 'need-rev' tag - drop authors from the action holders list - DocumentActionHolder.objects.filter(document=doc, person__in=doc.authors()).delete() + DocumentActionHolder.objects.filter(document=doc, person__in=doc.author_persons()).delete() elif tags.added("need-rev"): # Remove the AD if we're asking for a new revision DocumentActionHolder.objects.filter(document=doc, person=doc.ad).delete() @@ -549,7 +556,7 @@ def update_action_holders(doc, prev_state=None, new_state=None, prev_tags=None, doc.action_holders.add(doc.ad) # Authors get the action if a revision is needed if tags.added("need-rev"): - for auth in doc.authors(): + for auth in doc.author_persons(): doc.action_holders.add(auth) # Now create an event if we changed the set @@ -561,6 +568,40 @@ def update_action_holders(doc, prev_state=None, new_state=None, prev_tags=None, ) +def _change_field_and_describe( + author: DocumentAuthor | RfcAuthor, + field: str, + newval, + field_display_name: str | None = None, +): + # make the change + oldval = getattr(author, field) + setattr(author, field, newval) + + was_empty = oldval is None or len(str(oldval)) == 0 + now_empty = newval is None or len(str(newval)) == 0 + + # describe the change + if oldval == newval: + return None + else: + if field_display_name is None: + field_display_name = field + + if was_empty and not now_empty: + return 'set {field} to "{new}"'.format( + field=field_display_name, new=newval + ) + elif now_empty and not was_empty: + return 'cleared {field} (was "{old}")'.format( + field=field_display_name, old=oldval + ) + else: + return 'changed {field} from "{old}" to "{new}"'.format( + field=field_display_name, old=oldval, new=newval + ) + + def update_documentauthors(doc, new_docauthors, by=None, basis=None): """Update the list of authors for a document @@ -573,27 +614,6 @@ def update_documentauthors(doc, new_docauthors, by=None, basis=None): used. These objects will not be saved, their attributes will be used to create new DocumentAuthor instances. (The document and order fields will be ignored.) """ - def _change_field_and_describe(auth, field, newval): - # make the change - oldval = getattr(auth, field) - setattr(auth, field, newval) - - was_empty = oldval is None or len(str(oldval)) == 0 - now_empty = newval is None or len(str(newval)) == 0 - - # describe the change - if oldval == newval: - return None - else: - if was_empty and not now_empty: - return 'set {field} to "{new}"'.format(field=field, new=newval) - elif now_empty and not was_empty: - return 'cleared {field} (was "{old}")'.format(field=field, old=oldval) - else: - return 'changed {field} from "{old}" to "{new}"'.format( - field=field, old=oldval, new=newval - ) - persons = [] changes = [] # list of change descriptions @@ -637,6 +657,111 @@ def _change_field_and_describe(auth, field, newval): ) for change in changes ] + +def update_rfcauthors( + rfc: Document, new_rfcauthors: Iterable[RfcAuthor], by: Person | None = None +) -> Iterable[EditedRfcAuthorsDocEvent]: + def _find_matching_author( + author_to_match: RfcAuthor, existing_authors: Iterable[RfcAuthor] + ) -> RfcAuthor | None: + """Helper to find a matching existing author""" + if author_to_match.person_id is not None: + for candidate in existing_authors: + if candidate.person_id == author_to_match.person_id: + return candidate + return None # no match + # author does not have a person, match on titlepage name + for candidate in existing_authors: + if candidate.titlepage_name == author_to_match.titlepage_name: + return candidate + return None # no match + + def _rfcauthor_from_documentauthor(docauthor: DocumentAuthor) -> RfcAuthor: + """Helper to create an equivalent RfcAuthor from a DocumentAuthor""" + return RfcAuthor( + document_id=docauthor.document_id, + titlepage_name=docauthor.person.plain_name(), # closest thing we have + is_editor=False, + person_id=docauthor.person_id, + affiliation=docauthor.affiliation, + country=docauthor.country, + order=docauthor.order, + ) + + # Is this the first time this document is getting an RfcAuthor? If so, the + # updates will need to account for the model change. + converting_from_docauthors = not rfc.rfcauthor_set.exists() + + if converting_from_docauthors: + original_authors = [ + _rfcauthor_from_documentauthor(da) for da in rfc.documentauthor_set.all() + ] + else: + original_authors = list(rfc.rfcauthor_set.all()) + + authors_to_commit = [] + changes = [] + for order, new_author in enumerate(new_rfcauthors): + matching_author = _find_matching_author(new_author, original_authors) + if matching_author is not None: + # Update existing matching author using new_author data + authors_to_commit.append(matching_author) + original_authors.remove(matching_author) # avoid reuse + # Describe changes to this author + author_changes = [] + # Update fields other than order + for field in ["titlepage_name", "is_editor", "affiliation", "country"]: + author_changes.append( + _change_field_and_describe( + matching_author, + field, + getattr(new_author, field), + # List titlepage_name as "name" in logs + "name" if field == "titlepage_name" else field, + ) + ) + # Update order + author_changes.append( + _change_field_and_describe(matching_author, "order", order + 1) + ) + matching_author.save() + author_change_summary = ", ".join( + [ch for ch in author_changes if ch is not None] + ) + if len(author_change_summary) > 0: + changes.append( + 'Changed author "{name}": {summary}'.format( + name=matching_author.titlepage_name, + summary=author_change_summary, + ) + ) + else: + # No author matched, so update the new_author and use that + new_author.document = rfc + new_author.order = order + 1 + new_author.save() + changes.append(f'Added "{new_author.titlepage_name}" as author') + # Any authors left in original_authors are no longer in the list, so remove them + for removed_author in original_authors: + # Skip actual removal of old authors if we are converting from the + # DocumentAuthor models - the original_authors were just stand-ins anyway. + if not converting_from_docauthors: + removed_author.delete() + changes.append(f'Removed "{removed_author.titlepage_name}" as author') + # Create DocEvents, but leave it up to caller to save + if by is None: + by = Person.objects.get(name="(System)") + return [ + EditedRfcAuthorsDocEvent( + type="edited_authors", + by=by, + doc=rfc, + desc=change, + ) + for change in changes + ] + + def update_reminder(doc, reminder_type_slug, event, due_date): reminder_type = DocReminderTypeName.objects.get(slug=reminder_type_slug) diff --git a/ietf/doc/views_doc.py b/ietf/doc/views_doc.py index 5564904504..0578da1b77 100644 --- a/ietf/doc/views_doc.py +++ b/ietf/doc/views_doc.py @@ -1653,11 +1653,18 @@ def extract_name(s): data["state"] = extract_name(doc.get_state()) data["intended_std_level"] = extract_name(doc.intended_std_level) data["std_level"] = extract_name(doc.std_level) + author_qs = ( + doc.rfcauthor_set + if doc.type_id == "rfc" and doc.rfcauthor_set.exists() + else doc.documentauthor_set + ).select_related("person", "email").order_by("order") data["authors"] = [ - dict(name=author.person.name, - email=author.email.address if author.email else None, - affiliation=author.affiliation) - for author in doc.documentauthor_set.all().select_related("person", "email").order_by("order") + { + "name": author.titlepage_name if hasattr(author, "titlepage_name") else author.person.name, + "email": author.email.address if author.email else None, + "affiliation": author.affiliation, + } + for author in author_qs ] data["shepherd"] = doc.shepherd.formatted_email() if doc.shepherd else None data["ad"] = doc.ad.role_email("ad").formatted_email() if doc.ad else None @@ -1941,9 +1948,9 @@ def edit_action_holders(request, name): role_ids = dict() # maps role slug to list of Person IDs (assumed numeric in the JavaScript) extra_prefetch = [] # list of Person objects to prefetch for select2 field - if len(doc.authors()) > 0: + authors = doc.author_persons() + if len(authors) > 0: doc_role_labels.append(dict(slug='authors', label='Authors')) - authors = doc.authors() role_ids['authors'] = [p.pk for p in authors] extra_prefetch += authors diff --git a/ietf/doc/views_search.py b/ietf/doc/views_search.py index 3b67061b05..4232d77f6c 100644 --- a/ietf/doc/views_search.py +++ b/ietf/doc/views_search.py @@ -219,7 +219,7 @@ def retrieve_search_results(form, all_types=False): queries.extend([Q(targets_related__source__name__icontains=look_for, targets_related__relationship_id="became_rfc")]) combined_query = reduce(operator.or_, queries) - docs = docs.filter(combined_query).distinct() + docs = docs.filter(combined_query) # rfc/active/old check buttons allowed_draft_states = [] @@ -229,20 +229,23 @@ def retrieve_search_results(form, all_types=False): allowed_draft_states.extend(['repl', 'expired', 'auth-rm', 'ietf-rm']) docs = docs.filter(Q(states__slug__in=allowed_draft_states) | - ~Q(type__slug='draft')).distinct() + ~Q(type__slug='draft')) # radio choices by = query["by"] if by == "author": docs = docs.filter( Q(documentauthor__person__alias__name__icontains=query["author"]) | - Q(documentauthor__person__email__address__icontains=query["author"]) + Q(documentauthor__person__email__address__icontains=query["author"]) | + Q(rfcauthor__person__alias__name__icontains=query["author"]) | + Q(rfcauthor__person__email__address__icontains=query["author"]) | + Q(rfcauthor__titlepage_name__icontains=query["author"]) ) elif by == "group": docs = docs.filter(group__acronym__iexact=query["group"]) elif by == "area": docs = docs.filter(Q(group__type="wg", group__parent=query["area"]) | - Q(group=query["area"])).distinct() + Q(group=query["area"])) elif by == "ad": docs = docs.filter(ad=query["ad"]) elif by == "state": @@ -255,6 +258,8 @@ def retrieve_search_results(form, all_types=False): elif by == "stream": docs = docs.filter(stream=query["stream"]) + docs=docs.distinct() + return docs diff --git a/ietf/group/models.py b/ietf/group/models.py index 2d5e7c4e6f..a7e3c6616e 100644 --- a/ietf/group/models.py +++ b/ietf/group/models.py @@ -111,6 +111,9 @@ def active_wgs(self): def closed_wgs(self): return self.wgs().exclude(state__in=Group.ACTIVE_STATE_IDS) + def areas(self): + return self.get_queryset().filter(type="area") + def with_meetings(self): return self.get_queryset().filter(type__features__has_meetings=True) diff --git a/ietf/group/serializers.py b/ietf/group/serializers.py new file mode 100644 index 0000000000..08e6bba81a --- /dev/null +++ b/ietf/group/serializers.py @@ -0,0 +1,11 @@ +# Copyright The IETF Trust 2024, All Rights Reserved +"""django-rest-framework serializers""" +from rest_framework import serializers + +from .models import Group + + +class GroupSerializer(serializers.ModelSerializer): + class Meta: + model = Group + fields = ["acronym", "name", "type"] diff --git a/ietf/iesg/agenda.py b/ietf/iesg/agenda.py index 587713089f..ace4c9ec40 100644 --- a/ietf/iesg/agenda.py +++ b/ietf/iesg/agenda.py @@ -133,7 +133,7 @@ def agenda_sections(): ('4.2', {'title':"WG rechartering"}), ('4.2.1', {'title':"Under evaluation for IETF review", 'docs':[]}), ('4.2.2', {'title':"Proposed for approval", 'docs':[]}), - ('5', {'title':"IAB news we can use"}), + ('5', {'title':"IESG Liaison News"}), ('6', {'title':"Management issues"}), ('7', {'title':"Any Other Business (WG News, New Proposals, etc.)"}), ]) diff --git a/ietf/ietfauth/utils.py b/ietf/ietfauth/utils.py index 1f634278be..0df667fbd2 100644 --- a/ietf/ietfauth/utils.py +++ b/ietf/ietfauth/utils.py @@ -287,7 +287,7 @@ def is_individual_draft_author(user, doc): if not hasattr(user, 'person'): return False - if user.person in doc.authors(): + if user.person in doc.author_persons(): return True return False diff --git a/ietf/ipr/views.py b/ietf/ipr/views.py index 665c99dc43..0a43ff2c27 100644 --- a/ietf/ipr/views.py +++ b/ietf/ipr/views.py @@ -81,7 +81,8 @@ def get_document_emails(ipr): addrs = gather_address_lists('ipr_posted_on_doc',doc=doc).as_strings(compact=False) - author_names = ', '.join(a.person.name for a in doc.documentauthor_set.select_related("person")) + # Get a list of author names for the salutation in the body of the email + author_names = ', '.join(doc.author_names()) context = dict( settings=settings, diff --git a/ietf/name/serializers.py b/ietf/name/serializers.py new file mode 100644 index 0000000000..a764f56051 --- /dev/null +++ b/ietf/name/serializers.py @@ -0,0 +1,11 @@ +# Copyright The IETF Trust 2024, All Rights Reserved +"""django-rest-framework serializers""" +from rest_framework import serializers + +from .models import StreamName + + +class StreamNameSerializer(serializers.ModelSerializer): + class Meta: + model = StreamName + fields = ["slug", "name", "desc"] diff --git a/ietf/nomcom/tests.py b/ietf/nomcom/tests.py index dcdb9ef836..b6e8c57da7 100644 --- a/ietf/nomcom/tests.py +++ b/ietf/nomcom/tests.py @@ -1,5 +1,4 @@ -# Copyright The IETF Trust 2012-2023, All Rights Reserved -# -*- coding: utf-8 -*- +# Copyright The IETF Trust 2012-2025, All Rights Reserved import datetime @@ -27,8 +26,14 @@ from ietf.api.views import EmailIngestionError from ietf.dbtemplate.factories import DBTemplateFactory from ietf.dbtemplate.models import DBTemplate -from ietf.doc.factories import DocEventFactory, WgDocumentAuthorFactory, \ - NewRevisionDocEventFactory, DocumentAuthorFactory +from ietf.doc.factories import ( + DocEventFactory, + WgDocumentAuthorFactory, + NewRevisionDocEventFactory, + DocumentAuthorFactory, + RfcAuthorFactory, + WgDraftFactory, WgRfcFactory, +) from ietf.group.factories import GroupFactory, GroupHistoryFactory, RoleFactory, RoleHistoryFactory from ietf.group.models import Group, Role from ietf.meeting.factories import MeetingFactory, AttendedFactory, RegistrationFactory @@ -45,10 +50,20 @@ nomcom_kwargs_for_year, provide_private_key_to_test_client, \ key from ietf.nomcom.tasks import send_nomcom_reminders_task -from ietf.nomcom.utils import get_nomcom_by_year, make_nomineeposition, \ - get_hash_nominee_position, is_eligible, list_eligible, \ - get_eligibility_date, suggest_affiliation, ingest_feedback_email, \ - decorate_volunteers_with_qualifications, send_reminders, _is_time_to_send_reminder +from ietf.nomcom.utils import ( + get_nomcom_by_year, + make_nomineeposition, + get_hash_nominee_position, + is_eligible, + list_eligible, + get_eligibility_date, + suggest_affiliation, + ingest_feedback_email, + decorate_volunteers_with_qualifications, + send_reminders, + _is_time_to_send_reminder, + get_qualified_author_queryset, +) from ietf.person.factories import PersonFactory, EmailFactory from ietf.person.models import Email, Person from ietf.utils.mail import outbox, empty_outbox, get_payload_text @@ -2440,6 +2455,86 @@ def test_get_eligibility_date(self): NomComFactory(group__acronym=f'nomcom{this_year}', first_call_for_volunteers=datetime.date(this_year,5,6)) self.assertEqual(get_eligibility_date(),datetime.date(this_year,5,6)) + def test_get_qualified_author_queryset(self): + """get_qualified_author_queryset implements the eligiblity rules correctly + + This is not an exhaustive test of corner cases. Overlaps considerably with + rfc8989EligibilityTests.test_elig_by_author(). + """ + people = PersonFactory.create_batch(2) + extra_person = PersonFactory() + base_qs = Person.objects.filter(pk__in=[person.pk for person in people]) + now = datetime.datetime.now(tz=datetime.UTC) + one_year = datetime.timedelta(days=365) + + # Authors with no qualifying drafts + self.assertCountEqual( + get_qualified_author_queryset(base_qs, now - 5 * one_year, now), [] + ) + + # Authors with one qualifying draft + approved_draft = WgDraftFactory(authors=people, states=[("draft", "active")]) + DocEventFactory( + type="iesg_approved", + doc=approved_draft, + time=now - 4 * one_year, + ) + self.assertCountEqual( + get_qualified_author_queryset(base_qs, now - 5 * one_year, now), [] + ) + + # Create a draft that was published into an RFC. Give it an extra author who + # should not be eligible. + published_draft = WgDraftFactory(authors=people, states=[("draft", "rfc")]) + DocEventFactory( + type="iesg_approved", + doc=published_draft, + time=now - 5.5 * one_year, # < 6 years ago + ) + rfc = WgRfcFactory( + authors=people + [extra_person], + group=published_draft.group, + ) + DocEventFactory( + type="published_rfc", + doc=rfc, + time=now - 0.5 * one_year, # < 1 year ago + ) + # Period 6 years ago to 1 year ago - authors are eligible due to the + # iesg-approved draft in this window + self.assertCountEqual( + get_qualified_author_queryset(base_qs, now - 6 * one_year, now - one_year), + people, + ) + + # Period 5 years ago to now - authors are eligible due to the RFC publication + self.assertCountEqual( + get_qualified_author_queryset(base_qs, now - 5 * one_year, now), + people, + ) + + # Use the extra_person to check that a single doc can't count both as an + # RFC _and_ an approved draft. Use an eligibility interval that includes both + # the approval and the RFC publication + self.assertCountEqual( + get_qualified_author_queryset(base_qs, now - 6 * one_year, now), + people, # does not include extra_person! + ) + + # Now add an RfcAuthor for only one of the two authors to the RFC. This should + # remove the other author from the eligibility list because the DocumentAuthor + # records are no longer used. + RfcAuthorFactory( + document=rfc, + person=people[0], + titlepage_name="P. Zero", + email=people[0].email_set.first(), + ) + self.assertCountEqual( + get_qualified_author_queryset(base_qs, now - 5 * one_year, now), + [people[0]], + ) + class rfc8713EligibilityTests(TestCase): @@ -2724,33 +2819,41 @@ def test_elig_by_author(self): ineligible = set() p = PersonFactory() - ineligible.add(p) - + ineligible.add(p) # no RFCs or iesg-approved drafts p = PersonFactory() - da = WgDocumentAuthorFactory(person=p) - DocEventFactory(type='published_rfc',doc=da.document,time=middle_date) - ineligible.add(p) + doc = WgRfcFactory(authors=[p]) + DocEventFactory(type='published_rfc', doc=doc, time=middle_date) + ineligible.add(p) # only one RFC p = PersonFactory() - da = WgDocumentAuthorFactory(person=p) + da = WgDocumentAuthorFactory( + person=p, + document__states=[("draft", "active"), ("draft-rfceditor", "ref")], + ) DocEventFactory(type='iesg_approved',doc=da.document,time=last_date) - da = WgDocumentAuthorFactory(person=p) - DocEventFactory(type='published_rfc',doc=da.document,time=first_date) - eligible.add(p) + doc = WgRfcFactory(authors=[p]) + DocEventFactory(type='published_rfc', doc=doc, time=first_date) + eligible.add(p) # one RFC and one iesg-approved draft p = PersonFactory() - da = WgDocumentAuthorFactory(person=p) + da = WgDocumentAuthorFactory( + person=p, + document__states=[("draft", "active"), ("draft-rfceditor", "ref")], + ) DocEventFactory(type='iesg_approved',doc=da.document,time=middle_date) - da = WgDocumentAuthorFactory(person=p) - DocEventFactory(type='published_rfc',doc=da.document,time=day_before_first_date) - ineligible.add(p) + doc = WgRfcFactory(authors=[p]) + DocEventFactory(type='published_rfc', doc=doc, time=day_before_first_date) + ineligible.add(p) # RFC is out of the eligibility window p = PersonFactory() - da = WgDocumentAuthorFactory(person=p) + da = WgDocumentAuthorFactory( + person=p, + document__states=[("draft", "active"), ("draft-rfceditor", "ref")], + ) DocEventFactory(type='iesg_approved',doc=da.document,time=day_after_last_date) - da = WgDocumentAuthorFactory(person=p) - DocEventFactory(type='published_rfc',doc=da.document,time=middle_date) - ineligible.add(p) + doc = WgRfcFactory(authors=[p]) + DocEventFactory(type='published_rfc', doc=doc, time=middle_date) + ineligible.add(p) # iesg approval is outside the eligibility window for person in eligible: self.assertTrue(is_eligible(person,nomcom)) @@ -2878,15 +2981,38 @@ def test_volunteer(self): def test_suggest_affiliation(self): person = PersonFactory() - self.assertEqual(suggest_affiliation(person), '') - da = DocumentAuthorFactory(person=person,affiliation='auth_affil') + self.assertEqual(suggest_affiliation(person), "") + rfc_da = DocumentAuthorFactory( + person=person, + document__type_id="rfc", + affiliation="", + ) + rfc = rfc_da.document + DocEventFactory(doc=rfc, type="published_rfc") + self.assertEqual(suggest_affiliation(person), "") + + rfc_da.affiliation = "rfc_da_affil" + rfc_da.save() + self.assertEqual(suggest_affiliation(person), "rfc_da_affil") + + rfc_ra = RfcAuthorFactory(person=person, document=rfc, affiliation="") + self.assertEqual(suggest_affiliation(person), "") + + rfc_ra.affiliation = "rfc_ra_affil" + rfc_ra.save() + self.assertEqual(suggest_affiliation(person), "rfc_ra_affil") + + da = DocumentAuthorFactory(person=person, affiliation="auth_affil") NewRevisionDocEventFactory(doc=da.document) - self.assertEqual(suggest_affiliation(person), 'auth_affil') + self.assertEqual(suggest_affiliation(person), "auth_affil") + nc = NomComFactory() - nc.volunteer_set.create(person=person,affiliation='volunteer_affil') - self.assertEqual(suggest_affiliation(person), 'volunteer_affil') - RegistrationFactory(person=person, affiliation='meeting_affil') - self.assertEqual(suggest_affiliation(person), 'meeting_affil') + nc.volunteer_set.create(person=person, affiliation="volunteer_affil") + self.assertEqual(suggest_affiliation(person), "volunteer_affil") + + RegistrationFactory(person=person, affiliation="meeting_affil") + self.assertEqual(suggest_affiliation(person), "meeting_affil") + class VolunteerDecoratorUnitTests(TestCase): def test_decorate_volunteers_with_qualifications(self): @@ -2922,10 +3048,10 @@ def test_decorate_volunteers_with_qualifications(self): author_person = PersonFactory() for i in range(2): - da = WgDocumentAuthorFactory(person=author_person) + doc = WgRfcFactory(authors=[author_person]) DocEventFactory( type='published_rfc', - doc=da.document, + doc=doc, time=datetime.datetime( elig_date.year - 3, elig_date.month, diff --git a/ietf/nomcom/utils.py b/ietf/nomcom/utils.py index dd651c2941..a2ab680df6 100644 --- a/ietf/nomcom/utils.py +++ b/ietf/nomcom/utils.py @@ -18,7 +18,7 @@ from email.utils import parseaddr from textwrap import dedent -from django.db.models import Q, Count +from django.db.models import Q, Count, F, QuerySet from django.conf import settings from django.contrib.sites.models import Site from django.core.exceptions import ObjectDoesNotExist @@ -27,7 +27,7 @@ from django.shortcuts import get_object_or_404 from ietf.dbtemplate.models import DBTemplate -from ietf.doc.models import DocEvent, NewRevisionDocEvent +from ietf.doc.models import DocEvent, NewRevisionDocEvent, Document from ietf.group.models import Group, Role from ietf.person.models import Email, Person from ietf.mailtrigger.utils import gather_address_lists @@ -576,6 +576,70 @@ def get_8989_eligibility_querysets(date, base_qs): def get_9389_eligibility_querysets(date, base_qs): return get_threerule_eligibility_querysets(date, base_qs, three_of_five_callable=three_of_five_eligible_9389) + +def get_qualified_author_queryset( + base_qs: QuerySet[Person], + eligibility_period_start: datetime.datetime, + eligibility_period_end: datetime.datetime, +): + """Filter a Person queryset, keeping those qualified by RFC 8989's author path + + The author path is defined by "path 3" in section 4 of RFC 8989. It qualifies + a person who has been a front-page listed author or editor of at least two IETF- + stream RFCs within the last five years. An I-D in the RFC Editor queue that was + approved by the IESG is treated as an RFC, using the date of entry to the RFC + Editor queue as the date for qualification. + + This method does not strictly enforce "in the RFC Editor queue" for IESG-approved + drafts when computing eligibility. In the overwhelming majority of cases, an IESG- + approved draft immediately enters the queue and goes on to be published, so this + simplification makes the calculation much easier and virtually never affects + eligibility. + + Arguments eligibility_period_start and eligibility_period_end are datetimes that + mark the start and end of the eligibility period. These should be five years apart. + """ + # First, get the RFCs using publication date + qualifying_rfc_pub_events = DocEvent.objects.filter( + type='published_rfc', + time__gte=eligibility_period_start, + time__lte=eligibility_period_end, + ) + qualifying_rfcs = Document.objects.filter( + type_id="rfc", + docevent__in=qualifying_rfc_pub_events + ).annotate( + rfcauthor_count=Count("rfcauthor") + ) + rfcs_with_rfcauthors = qualifying_rfcs.filter(rfcauthor_count__gt=0).distinct() + rfcs_without_rfcauthors = qualifying_rfcs.filter(rfcauthor_count=0).distinct() + + # Second, get the IESG-approved I-Ds excluding any we're already counting as rfcs + qualifying_approval_events = DocEvent.objects.filter( + type='iesg_approved', + time__gte=eligibility_period_start, + time__lte=eligibility_period_end, + ) + qualifying_drafts = Document.objects.filter( + type_id="draft", + docevent__in=qualifying_approval_events, + ).exclude( + relateddocument__relationship_id="became_rfc", + relateddocument__target__in=qualifying_rfcs, + ).distinct() + + return base_qs.filter( + Q(documentauthor__document__in=qualifying_drafts) + | Q(rfcauthor__document__in=rfcs_with_rfcauthors) + | Q(documentauthor__document__in=rfcs_without_rfcauthors) + ).annotate( + document_author_count=Count('documentauthor'), + rfc_author_count=Count("rfcauthor") + ).annotate( + authorship_count=F("document_author_count") + F("rfc_author_count") + ).filter(authorship_count__gte=2) + + def get_threerule_eligibility_querysets(date, base_qs, three_of_five_callable): if not base_qs: base_qs = Person.objects.all() @@ -608,14 +672,7 @@ def get_threerule_eligibility_querysets(date, base_qs, three_of_five_callable): ) ).distinct() - rfc_pks = set(DocEvent.objects.filter(type='published_rfc', time__gte=five_years_ago, time__lte=date_as_dt).values_list('doc__pk', flat=True)) - iesgappr_pks = set(DocEvent.objects.filter(type='iesg_approved', time__gte=five_years_ago, time__lte=date_as_dt).values_list('doc__pk',flat=True)) - qualifying_pks = rfc_pks.union(iesgappr_pks.difference(rfc_pks)) - author_qs = base_qs.filter( - documentauthor__document__pk__in=qualifying_pks - ).annotate( - document_author_count = Count('documentauthor') - ).filter(document_author_count__gte=2) + author_qs = get_qualified_author_queryset(base_qs, five_years_ago, date_as_dt) return three_of_five_qs, officer_qs, author_qs def list_eligible_8989(date, base_qs=None): @@ -691,18 +748,42 @@ def three_of_five_eligible_9389(previous_five, queryset=None): counts[id] += 1 return queryset.filter(pk__in=[id for id, count in counts.items() if count >= 3]) -def suggest_affiliation(person): +def suggest_affiliation(person) -> str: + """Heuristically suggest a current affiliation for a Person""" recent_meeting = person.registration_set.order_by('-meeting__date').first() - affiliation = recent_meeting.affiliation if recent_meeting else '' - if not affiliation: - recent_volunteer = person.volunteer_set.order_by('-nomcom__group__acronym').first() - if recent_volunteer: - affiliation = recent_volunteer.affiliation - if not affiliation: - recent_draft_revision = NewRevisionDocEvent.objects.filter(doc__type_id='draft',doc__documentauthor__person=person).order_by('-time').first() - if recent_draft_revision: - affiliation = recent_draft_revision.doc.documentauthor_set.filter(person=person).first().affiliation - return affiliation + if recent_meeting and recent_meeting.affiliation: + return recent_meeting.affiliation + + recent_volunteer = person.volunteer_set.order_by('-nomcom__group__acronym').first() + if recent_volunteer and recent_volunteer.affiliation: + return recent_volunteer.affiliation + + recent_draft_revision = NewRevisionDocEvent.objects.filter( + doc__type_id="draft", + doc__documentauthor__person=person, + ).order_by("-time").first() + if recent_draft_revision: + draft_author = recent_draft_revision.doc.documentauthor_set.filter( + person=person + ).first() + if draft_author and draft_author.affiliation: + return draft_author.affiliation + + recent_rfc_publication = DocEvent.objects.filter( + Q(doc__documentauthor__person=person) | Q(doc__rfcauthor__person=person), + doc__type_id="rfc", + type="published_rfc", + ).order_by("-time").first() + if recent_rfc_publication: + rfc = recent_rfc_publication.doc + if rfc.rfcauthor_set.exists(): + rfc_author = rfc.rfcauthor_set.filter(person=person).first() + else: + rfc_author = rfc.documentauthor_set.filter(person=person).first() + if rfc_author and rfc_author.affiliation: + return rfc_author.affiliation + return "" + def extract_volunteers(year): nomcom = get_nomcom_by_year(year) diff --git a/ietf/person/models.py b/ietf/person/models.py index 03cf0c87fb..3ab89289a6 100644 --- a/ietf/person/models.py +++ b/ietf/person/models.py @@ -87,7 +87,7 @@ def short(self): else: prefix, first, middle, last, suffix = self.ascii_parts() return (first and first[0]+"." or "")+(middle or "")+" "+last+(suffix and " "+suffix or "") - def plain_name(self): + def plain_name(self) -> str: if not hasattr(self, '_cached_plain_name'): if self.plain: self._cached_plain_name = self.plain @@ -203,7 +203,10 @@ def has_drafts(self): def rfcs(self): from ietf.doc.models import Document - rfcs = list(Document.objects.filter(documentauthor__person=self, type='rfc')) + # When RfcAuthors are populated, this may over-return if an author is dropped + # from the author list between the final draft and the published RFC. Should + # ignore DocumentAuthors when an RfcAuthor exists for a draft. + rfcs = list(Document.objects.filter(type="rfc").filter(models.Q(documentauthor__person=self)|models.Q(rfcauthor__person=self)).distinct()) rfcs.sort(key=lambda d: d.name ) return rfcs @@ -266,11 +269,16 @@ def available_api_endpoints(self): def cdn_photo_url(self, size=80): if self.photo: if settings.SERVE_CDN_PHOTOS: + if settings.SERVER_MODE != "production": + original_media_dir = settings.MEDIA_URL + settings.MEDIA_URL = "https://www.ietf.org/lib/dt/media/" source_url = self.photo.url if source_url.startswith(settings.IETF_HOST_URL): source_url = source_url[len(settings.IETF_HOST_URL):] elif source_url.startswith('/'): source_url = source_url[1:] + if settings.SERVER_MODE != "production": + settings.MEDIA_URL = original_media_dir return f'{settings.IETF_HOST_URL}cdn-cgi/image/fit=scale-down,width={size},height={size}/{source_url}' else: datatracker_photo_path = urlreverse('ietf.person.views.photo', kwargs={'email_or_name': self.email()}) diff --git a/ietf/secr/telechat/tests.py b/ietf/secr/telechat/tests.py index fa26d33a5c..91ccde2187 100644 --- a/ietf/secr/telechat/tests.py +++ b/ietf/secr/telechat/tests.py @@ -256,7 +256,7 @@ def test_doc_detail_post_update_state_action_holder_automation(self): self.assertEqual(response.status_code,302) draft = Document.objects.get(name=draft.name) self.assertEqual(draft.get_state('draft-iesg').slug,'defer') - self.assertCountEqual(draft.action_holders.all(), [draft.ad] + draft.authors()) + self.assertCountEqual(draft.action_holders.all(), [draft.ad] + draft.author_persons()) self.assertEqual(draft.docevent_set.filter(type='changed_action_holders').count(), 1) # Removing need-rev should remove authors @@ -273,7 +273,7 @@ def test_doc_detail_post_update_state_action_holder_automation(self): # Setting to approved should remove all action holders # noinspection DjangoOrm - draft.action_holders.add(*(draft.authors())) # add() with through model ok in Django 2.2+ + draft.action_holders.add(*(draft.author_persons())) # add() with through model ok in Django 2.2+ response = self.client.post(url,{ 'submit': 'update_state', 'state': State.objects.get(type_id='draft-iesg', slug='approved').pk, diff --git a/ietf/settings.py b/ietf/settings.py index f8d8a28d65..fedd313ca0 100644 --- a/ietf/settings.py +++ b/ietf/settings.py @@ -22,6 +22,7 @@ warnings.filterwarnings("ignore", message="The django.utils.timezone.utc alias is deprecated.", module="oidc_provider") warnings.filterwarnings("ignore", message="The django.utils.datetime_safe module is deprecated.", module="tastypie") warnings.filterwarnings("ignore", message="The USE_DEPRECATED_PYTZ setting,") # https://github.com/ietf-tools/datatracker/issues/5635 +warnings.filterwarnings("ignore", message="The is_dst argument to make_aware\\(\\)") # caused by django-filters when USE_DEPRECATED_PYTZ is true warnings.filterwarnings("ignore", message="The USE_L10N setting is deprecated.") # https://github.com/ietf-tools/datatracker/issues/5648 warnings.filterwarnings("ignore", message="django.contrib.auth.hashers.CryptPasswordHasher is deprecated.") # https://github.com/ietf-tools/datatracker/issues/5663 @@ -35,6 +36,8 @@ warnings.filterwarnings("ignore", message="datetime.datetime.utcfromtimestamp\\(\\) is deprecated", module="oic.utils.time_util") warnings.filterwarnings("ignore", message="datetime.datetime.utcfromtimestamp\\(\\) is deprecated", module="pytz.tzinfo") warnings.filterwarnings("ignore", message="'instantiateVariableFont' is deprecated", module="weasyprint") +warnings.filterwarnings("ignore", category=DeprecationWarning, module="bibtexparser") # https://github.com/sciunto-org/python-bibtexparser/issues/502 +warnings.filterwarnings("ignore", category=DeprecationWarning, module="pyparsing") # https://github.com/sciunto-org/python-bibtexparser/issues/502 base_path = pathlib.Path(__file__).resolve().parent @@ -500,6 +503,7 @@ def skip_unreadable_post(record): 'django_celery_results', 'corsheaders', 'django_markup', + 'django_filters', 'oidc_provider', 'drf_spectacular', 'drf_standardized_errors', diff --git a/ietf/submit/tests.py b/ietf/submit/tests.py index ede63d2752..400d0d8c7d 100644 --- a/ietf/submit/tests.py +++ b/ietf/submit/tests.py @@ -595,7 +595,7 @@ def submit_existing(self, formats, change_authors=True, group_type='wg', stream_ TestBlobstoreManager().emptyTestBlobstores() def _assert_authors_are_action_holders(draft, expect=True): - for author in draft.authors(): + for author in draft.author_persons(): if expect: self.assertIn(author, draft.action_holders.all()) else: @@ -2404,7 +2404,7 @@ def test_upload_draft(self): response = r.json() self.assertCountEqual( response.keys(), - ['id', 'name', 'rev', 'status_url'], + ['id', 'name', 'rev', 'status_url', 'submission_url'], ) submission_id = int(response['id']) self.assertEqual(response['name'], 'draft-somebody-test') @@ -2416,6 +2416,13 @@ def test_upload_draft(self): kwargs={'submission_id': submission_id}, ), ) + self.assertEqual( + response['submission_url'], + 'https://datatracker.example.com' + urlreverse( + 'ietf.submit.views.submission_status', + kwargs={'submission_id': submission_id}, + ) + ) self.assertEqual(mock_task.delay.call_count, 1) self.assertEqual(mock_task.delay.call_args.args, (submission_id,)) submission = Submission.objects.get(pk=submission_id) diff --git a/ietf/submit/utils.py b/ietf/submit/utils.py index a0c7dd8511..9a7c358a6d 100644 --- a/ietf/submit/utils.py +++ b/ietf/submit/utils.py @@ -1268,7 +1268,7 @@ def process_submission_text(filename, revision): if title: title = _normalize_title(title) - # Translation taable drops \r, \n, <, >. + # Translation table drops \r, \n, <, >. trans_table = str.maketrans("", "", "\r\n<>") authors = [ { diff --git a/ietf/submit/views.py b/ietf/submit/views.py index 8329a312bb..2db3f51098 100644 --- a/ietf/submit/views.py +++ b/ietf/submit/views.py @@ -182,6 +182,10 @@ def err(code, error, messages=None): settings.IDTRACKER_BASE_URL, urlreverse(api_submission_status, kwargs={'submission_id': submission.pk}), ), + 'submission_url': urljoin( + settings.IDTRACKER_BASE_URL, + urlreverse("ietf.submit.views.submission_status", kwargs={'submission_id': submission.pk}), + ), } ) else: diff --git a/ietf/sync/rfceditor.py b/ietf/sync/rfceditor.py index b3234a87e2..cdcdeb5989 100644 --- a/ietf/sync/rfceditor.py +++ b/ietf/sync/rfceditor.py @@ -468,14 +468,18 @@ def update_docs_from_rfc_index( doc.set_state(rfc_published_state) if draft: doc.formal_languages.set(draft.formal_languages.all()) - for author in draft.documentauthor_set.all(): + # Create authors based on the last draft in the datatracker. This + # path will go away when we publish via the modernized RPC workflow + # but until then, these are the only data we have for authors that + # are easily connected to Person records. + for documentauthor in draft.documentauthor_set.all(): # Copy the author but point at the new doc. # See https://docs.djangoproject.com/en/4.2/topics/db/queries/#copying-model-instances - author.pk = None - author.id = None - author._state.adding = True - author.document = doc - author.save() + documentauthor.pk = None + documentauthor.id = None + documentauthor._state.adding = True + documentauthor.document = doc + documentauthor.save() if draft: draft_events = [] diff --git a/ietf/sync/tests.py b/ietf/sync/tests.py index 3432f6214a..888920ae9d 100644 --- a/ietf/sync/tests.py +++ b/ietf/sync/tests.py @@ -446,7 +446,7 @@ def test_rfc_index(self): rfc_doc = Document.objects.filter(rfc_number=1234, type_id="rfc").first() self.assertIsNotNone(rfc_doc, "RFC document should have been created") - self.assertEqual(rfc_doc.authors(), draft_doc.authors()) + self.assertEqual(rfc_doc.author_persons_or_names(), draft_doc.author_persons_or_names()) rfc_events = rfc_doc.docevent_set.all() self.assertEqual(len(rfc_events), 8) expected_events = [ diff --git a/ietf/templates/doc/document_info.html b/ietf/templates/doc/document_info.html index 71050f9d41..d6d8d43071 100644 --- a/ietf/templates/doc/document_info.html +++ b/ietf/templates/doc/document_info.html @@ -87,7 +87,7 @@ {% endif %} - Author{% if doc.pk %}{{ doc.authors|pluralize }}{% endif %} + Author{% if doc.pk %}{{ doc.author_persons_or_names|pluralize }}{% endif %} {% if can_edit_authors %} {# Implementation that uses the current primary email for each author #} - {% if doc.pk %}{% for author in doc.authors %} - {% person_link author %}{% if not forloop.last %},{% endif %} + {% if doc.pk %}{% for author in doc.author_persons_or_names %} + {% if author.person %}{% person_link author.person %}{% else %}{{ author.titlepage_name }}{% endif %}{% if not forloop.last %},{% endif %} {% endfor %}{% endif %} {% if document_html and not snapshot or document_html and doc.rev == latest_rev%}
diff --git a/ietf/templates/doc/index_active_drafts.html b/ietf/templates/doc/index_active_drafts.html index 06ea2c4ff5..607385f56f 100644 --- a/ietf/templates/doc/index_active_drafts.html +++ b/ietf/templates/doc/index_active_drafts.html @@ -29,7 +29,7 @@

Active Internet-Drafts

{% for group in groups %}

{{ group.name }} ({{ group.acronym }})

- {% for d in group.active_drafts %} + {% for d in group.active_drafts %}{# n.b., d is a dict, not a Document #}
{{ d.title }}. diff --git a/ietf/templates/doc/opengraph.html b/ietf/templates/doc/opengraph.html index 4fe39b6209..1c8c5abe91 100644 --- a/ietf/templates/doc/opengraph.html +++ b/ietf/templates/doc/opengraph.html @@ -1,4 +1,4 @@ -{# Copyright The IETF Trust 2016-2020, All Rights Reserved #} +{# Copyright The IETF Trust 2016-2025, All Rights Reserved #} {% load origin %} {% load static %} {% load ietf_filters %} @@ -36,7 +36,7 @@ {% else %}{# TODO: We need a card image for individual I-Ds. #} {% endif %} -{% if doc.pk %}{% for author in doc.documentauthor_set.all %} +{% if doc.pk %}{% for author_name in doc.author_names %} {% endfor %}{% endif %} {% if published %}{% endif %} {% if expires %}{% endif %} \ No newline at end of file diff --git a/ietf/templates/doc/review/request_info.html b/ietf/templates/doc/review/request_info.html index 9ad126d59e..51aea10a02 100644 --- a/ietf/templates/doc/review/request_info.html +++ b/ietf/templates/doc/review/request_info.html @@ -74,13 +74,13 @@ {% person_link review_req.requested_by %} {% endif %} - {% if review_req.doc.authors %} + {% if review_req.doc.author_persons_or_names %} Authors - {% for author in review_req.doc.authors %} - {% person_link author %}{% if not forloop.last %},{% endif %} + {% for person, tp_name in review_req.doc.author_persons_or_names %} + {% if person %}{% person_link person %}{% else %}{{ tp_name }}{% endif %}{% if not forloop.last %},{% endif %} {% endfor %} diff --git a/ietf/templates/group/manage_review_requests.html b/ietf/templates/group/manage_review_requests.html index 99b23c138a..d240ef24fa 100644 --- a/ietf/templates/group/manage_review_requests.html +++ b/ietf/templates/group/manage_review_requests.html @@ -66,10 +66,10 @@

Auto-suggested
{% endif %} - {% if r.doc.authors %} + {% if r.doc.author_persons_or_names %} Authors: - {% for person in r.doc.authors %} - {% person_link person %}{% if not forloop.last %},{% endif %} + {% for person, tp_name in r.doc.author_persons_or_names %} + {% if person %}{% person_link person %}{% else %}{{ tp_name }}{% endif %}{% if not forloop.last %},{% endif %} {% endfor %}
{% endif %} diff --git a/ietf/utils/test_utils.py b/ietf/utils/test_utils.py index 86c5a0c1c3..5faf83d93f 100644 --- a/ietf/utils/test_utils.py +++ b/ietf/utils/test_utils.py @@ -38,6 +38,7 @@ import re import email import html5lib +import rest_framework.test import requests_mock import shutil import sys @@ -312,3 +313,11 @@ def tearDown(self): shutil.rmtree(dir) self.requests_mock.stop() super().tearDown() + + +class APITestCase(TestCase): + """Test case that uses rest_framework's APIClient + + This is equivalent to rest_framework.test.APITestCase, but picks up our + """ + client_class = rest_framework.test.APIClient diff --git a/ietf/utils/validators.py b/ietf/utils/validators.py index 92a20f5a26..a99de72724 100644 --- a/ietf/utils/validators.py +++ b/ietf/utils/validators.py @@ -33,8 +33,9 @@ # Note that this is an instantiation of the regex validator, _not_ the # regex-string validator defined right below validate_no_control_chars = RegexValidator( - regex="^[^\x00-\x1f]*$", - message="Please enter a string without control characters." ) + regex="^[^\x01-\x1f]*$", + message="Please enter a string without control characters.", +) @deconstructible diff --git a/mypy.ini b/mypy.ini index 19df7ec9b0..4acaf98c95 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,6 +2,9 @@ ignore_missing_imports = True +# allow PEP 695 type aliases (flag needed until mypy >= 1.13) +enable_incomplete_feature = NewGenericSyntax + plugins = mypy_django_plugin.main diff --git a/requirements.txt b/requirements.txt index 02a4cf5fd0..3f89f6f16c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,8 +19,10 @@ django-analytical>=3.2.0 django-bootstrap5>=25.1 django-celery-beat>=2.7.0,<2.8.0 # pin until https://github.com/celery/django-celery-beat/issues/875 is resolved, then revisit django-celery-results>=2.6.0 +django-csp>=3.7 django-cors-headers>=4.7.0 django-debug-toolbar>=6.0.0 +django-filter>=24.3 django-markup>=1.10 # Limited use - need to reconcile against direct use of markdown django-oidc-provider==0.8.2 # 0.8.3 changes logout flow and claim return django-simple-history>=3.10.1 @@ -50,7 +52,7 @@ markdown>=3.8.0 types-markdown>=3.8.0 mock>=5.2.0 # should replace with unittest.mock and remove dependency types-mock>=5.2.0 -mypy~=1.7.0 # Version requirements determined by django-stubs. +mypy~=1.11.2 # Version requirements loosely determined by django-stubs. oic>=1.7.0 # Used only by tests opentelemetry-sdk>=1.38.0 opentelemetry-instrumentation-django>=0.59b0