diff options
author | Takeshi KOMIYA <i.tkomiya@gmail.com> | 2018-11-30 01:52:30 +0900 |
---|---|---|
committer | Takeshi KOMIYA <i.tkomiya@gmail.com> | 2018-11-30 02:21:54 +0900 |
commit | a0bb4c8c8f01c49fe9e1603cb6d29b5f791163fe (patch) | |
tree | 7823d7ce18316bea88433985dcd47ac70a033d21 /sphinx/util/docfields.py | |
parent | 69065682f4958b10187dca508f7e961b3b3e263e (diff) | |
download | sphinx-git-a0bb4c8c8f01c49fe9e1603cb6d29b5f791163fe.tar.gz |
Fix annotations for util
Diffstat (limited to 'sphinx/util/docfields.py')
-rw-r--r-- | sphinx/util/docfields.py | 77 |
1 files changed, 43 insertions, 34 deletions
diff --git a/sphinx/util/docfields.py b/sphinx/util/docfields.py index 5d6f03701..36ad222fc 100644 --- a/sphinx/util/docfields.py +++ b/sphinx/util/docfields.py @@ -11,25 +11,27 @@ """ from __future__ import absolute_import +from typing import List, cast + from docutils import nodes from sphinx import addnodes if False: # For type annotation - from typing import Any, Dict, List, Tuple, Type # NOQA + from typing import Any, Dict, Tuple, Type # NOQA from sphinx.domains import Domain # NOQA from sphinx.environment import BuildEnvironment # NOQA from sphinx.util.typing import unicode # NOQA def _is_single_paragraph(node): - # type: (nodes.Node) -> bool + # type: (nodes.field_body) -> bool """True if the node only contains one paragraph (and system messages).""" if len(node) == 0: return False elif len(node) > 1: - for subnode in node[1:]: + for subnode in node[1:]: # type: nodes.Node if not isinstance(subnode, nodes.system_message): return False if isinstance(node[0], nodes.paragraph): @@ -93,7 +95,7 @@ class Field: return [self.make_xref(rolename, domain, target, innernode, contnode, env)] def make_entry(self, fieldarg, content): - # type: (List, unicode) -> Tuple[List, unicode] + # type: (unicode, List[nodes.Node]) -> Tuple[unicode, List[nodes.Node]] return (fieldarg, content) def make_field(self, @@ -160,7 +162,8 @@ class GroupedField(Field): listnode += nodes.list_item('', par) if len(items) == 1 and self.can_collapse: - fieldbody = nodes.field_body('', listnode[0][0]) + list_item = cast(nodes.list_item, listnode[0]) + fieldbody = nodes.field_body('', list_item[0]) return nodes.field('', fieldname, fieldbody) fieldbody = nodes.field_body('', listnode) @@ -227,7 +230,7 @@ class TypedField(GroupedField): fieldname = nodes.field_name('', self.label) if len(items) == 1 and self.can_collapse: fieldarg, content = items[0] - bodynode = handle_item(fieldarg, content) + bodynode = handle_item(fieldarg, content) # type: nodes.Node else: bodynode = self.list_type() for fieldarg, content in items: @@ -241,6 +244,7 @@ class DocFieldTransformer: Transforms field lists in "doc field" syntax into better-looking equivalents, using the field type definitions given on a domain. """ + typemap = None # type: Dict[unicode, Tuple[Field, bool]] def __init__(self, directive): # type: (Any) -> None @@ -251,18 +255,19 @@ class DocFieldTransformer: self.typemap = directive._doc_field_type_map def preprocess_fieldtypes(self, types): - # type: (List) -> Dict[unicode, Tuple[Any, bool]] + # type: (List[Field]) -> Dict[unicode, Tuple[Field, bool]] typemap = {} for fieldtype in types: for name in fieldtype.names: typemap[name] = fieldtype, False if fieldtype.is_typed: - for name in fieldtype.typenames: - typemap[name] = fieldtype, True + typed_field = cast(TypedField, fieldtype) + for name in typed_field.typenames: + typemap[name] = typed_field, True return typemap def transform_all(self, node): - # type: (nodes.Node) -> None + # type: (addnodes.desc_content) -> None """Transform all field list children of a node.""" # don't traverse, only handle field lists that are immediate children for child in node: @@ -270,30 +275,33 @@ class DocFieldTransformer: self.transform(child) def transform(self, node): - # type: (nodes.Node) -> None + # type: (nodes.field_list) -> None """Transform a single field list *node*.""" typemap = self.typemap - entries = [] - groupindices = {} # type: Dict[unicode, int] - types = {} # type: Dict[unicode, Dict] + entries = [] # type: List + groupindices = {} # type: Dict[unicode, int] + types = {} # type: Dict[unicode, Dict] # step 1: traverse all fields and collect field types and content - for field in node: - fieldname, fieldbody = field + for field in cast(List[nodes.field], node): + assert len(field) == 2 + field_name = cast(nodes.field_name, field[0]) + field_body = cast(nodes.field_body, field[1]) try: # split into field type and argument - fieldtype, fieldarg = fieldname.astext().split(None, 1) + fieldtype, fieldarg = field_name.astext().split(None, 1) except ValueError: # maybe an argument-less field type? - fieldtype, fieldarg = fieldname.astext(), '' + fieldtype, fieldarg = field_name.astext(), '' typedesc, is_typefield = typemap.get(fieldtype, (None, None)) # collect the content, trying not to keep unnecessary paragraphs - if _is_single_paragraph(fieldbody): - content = fieldbody.children[0].children + if _is_single_paragraph(field_body): + paragraph = cast(nodes.paragraph, field_body[0]) + content = paragraph.children else: - content = fieldbody.children + content = field_body.children # sort out unknown fields if typedesc is None or typedesc.has_arg != bool(fieldarg): @@ -302,26 +310,27 @@ class DocFieldTransformer: new_fieldname = fieldtype[0:1].upper() + fieldtype[1:] if fieldarg: new_fieldname += ' ' + fieldarg - fieldname[0] = nodes.Text(new_fieldname) + field_name[0] = nodes.Text(new_fieldname) entries.append(field) # but if this has a type then we can at least link it if (typedesc and is_typefield and content and len(content) == 1 and isinstance(content[0], nodes.Text)): + typed_field = cast(TypedField, typedesc) target = content[0].astext() - xrefs = typedesc.make_xrefs( - typedesc.typerolename, + xrefs = typed_field.make_xrefs( + typed_field.typerolename, self.directive.domain, target, contnode=content[0], ) - if _is_single_paragraph(fieldbody): - fieldbody.children[0].clear() - fieldbody.children[0].extend(xrefs) + if _is_single_paragraph(field_body): + paragraph = cast(nodes.paragraph, field_body[0]) + paragraph.clear() + paragraph.extend(xrefs) else: - fieldbody.clear() - fieldbody += nodes.paragraph() - fieldbody[0].extend(xrefs) + field_body.clear() + field_body += nodes.paragraph('', '', *xrefs) continue @@ -348,11 +357,11 @@ class DocFieldTransformer: [nodes.Text(argtype)] fieldarg = argname - translatable_content = nodes.inline(fieldbody.rawsource, + translatable_content = nodes.inline(field_body.rawsource, translatable=True) - translatable_content.document = fieldbody.parent.document - translatable_content.source = fieldbody.parent.source - translatable_content.line = fieldbody.parent.line + translatable_content.document = field_body.parent.document + translatable_content.source = field_body.parent.source + translatable_content.line = field_body.parent.line translatable_content += content # grouped entries need to be collected in one entry, while others |