mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Bump Ruff to 0.8.1
This commit is contained in:
parent
d9a2821509
commit
b458850b32
@ -82,7 +82,7 @@ docs = [
|
||||
]
|
||||
lint = [
|
||||
"flake8>=6.0",
|
||||
"ruff==0.8.0",
|
||||
"ruff==0.8.1",
|
||||
"mypy==1.13.0",
|
||||
"sphinx-lint>=0.9",
|
||||
"types-colorama==0.4.15.20240311",
|
||||
|
@ -151,7 +151,7 @@ class ShowUrlsTransform(SphinxPostTransform):
|
||||
break
|
||||
|
||||
# assign new footnote number
|
||||
old_label = cast(nodes.label, footnote[0])
|
||||
old_label = cast('nodes.label', footnote[0])
|
||||
old_label.replace_self(nodes.label('', str(num)))
|
||||
if old_label in footnote['names']:
|
||||
footnote['names'].remove(old_label.astext())
|
||||
|
@ -251,7 +251,7 @@ class HyperlinkCollector(SphinxPostTransform):
|
||||
:param uri: URI to add
|
||||
:param node: A node class where the URI was found
|
||||
"""
|
||||
builder = cast(CheckExternalLinksBuilder, self.app.builder)
|
||||
builder = cast('CheckExternalLinksBuilder', self.app.builder)
|
||||
hyperlinks = builder.hyperlinks
|
||||
docname = self.env.docname
|
||||
|
||||
|
@ -81,7 +81,7 @@ class ObjectDescription(SphinxDirective, Generic[ObjDescT]):
|
||||
self._doc_field_type_map[name] = (field, False)
|
||||
|
||||
if field.is_typed:
|
||||
typed_field = cast(TypedField, field)
|
||||
typed_field = cast('TypedField', field)
|
||||
for name in typed_field.typenames:
|
||||
self._doc_field_type_map[name] = (field, True)
|
||||
|
||||
@ -346,7 +346,7 @@ class DefaultRole(SphinxDirective):
|
||||
)
|
||||
messages += [error]
|
||||
|
||||
return cast(list[nodes.Node], messages)
|
||||
return cast('list[nodes.Node]', messages)
|
||||
|
||||
|
||||
class DefaultDomain(SphinxDirective):
|
||||
|
@ -376,7 +376,7 @@ class Only(SphinxDirective):
|
||||
# Use these depths to determine where the nested sections should
|
||||
# be placed in the doctree.
|
||||
n_sects_to_raise = current_depth - nested_depth + 1
|
||||
parent = cast(nodes.Element, self.state.parent)
|
||||
parent = cast('nodes.Element', self.state.parent)
|
||||
for _i in range(n_sects_to_raise):
|
||||
if parent.parent:
|
||||
parent = parent.parent
|
||||
|
@ -38,7 +38,7 @@ class Figure(images.Figure): # type: ignore[misc]
|
||||
return result
|
||||
|
||||
assert len(result) == 1
|
||||
figure_node = cast(nodes.figure, result[0])
|
||||
figure_node = cast('nodes.figure', result[0])
|
||||
if name:
|
||||
# set ``name`` to figure_node if given
|
||||
self.options['name'] = name
|
||||
@ -46,7 +46,7 @@ class Figure(images.Figure): # type: ignore[misc]
|
||||
|
||||
# copy lineno from image node
|
||||
if figure_node.line is None and len(figure_node) == 2:
|
||||
caption = cast(nodes.caption, figure_node[1])
|
||||
caption = cast('nodes.caption', figure_node[1])
|
||||
figure_node.line = caption.line
|
||||
|
||||
return [figure_node]
|
||||
@ -163,7 +163,7 @@ class MathDirective(SphinxDirective):
|
||||
return ret
|
||||
|
||||
def add_target(self, ret: list[Node]) -> None:
|
||||
node = cast(nodes.math_block, ret[0])
|
||||
node = cast('nodes.math_block', ret[0])
|
||||
|
||||
# assign label automatically if math_number_all enabled
|
||||
if node['label'] == '' or (self.config.math_number_all and not node['label']): # NoQA: PLC1901
|
||||
|
@ -1793,14 +1793,14 @@ class ASTDeclaration(ASTBaseBase):
|
||||
|
||||
@property
|
||||
def name(self) -> ASTNestedName:
|
||||
decl = cast(DeclarationType, self.declaration)
|
||||
decl = cast('DeclarationType', self.declaration)
|
||||
return decl.name
|
||||
|
||||
@property
|
||||
def function_params(self) -> list[ASTFunctionParameter] | None:
|
||||
if self.objectType != 'function':
|
||||
return None
|
||||
decl = cast(ASTType, self.declaration)
|
||||
decl = cast('ASTType', self.declaration)
|
||||
return decl.function_params
|
||||
|
||||
def get_id(self, version: int, prefixed: bool = True) -> str:
|
||||
@ -1851,7 +1851,7 @@ class ASTDeclaration(ASTBaseBase):
|
||||
mainDeclNode += addnodes.desc_sig_keyword('enumerator', 'enumerator')
|
||||
mainDeclNode += addnodes.desc_sig_space()
|
||||
elif self.objectType == 'type':
|
||||
decl = cast(ASTType, self.declaration)
|
||||
decl = cast('ASTType', self.declaration)
|
||||
prefix = decl.get_type_declaration_prefix()
|
||||
mainDeclNode += addnodes.desc_sig_keyword(prefix, prefix)
|
||||
mainDeclNode += addnodes.desc_sig_space()
|
||||
|
@ -117,7 +117,7 @@ class CitationDefinitionTransform(SphinxTransform):
|
||||
domain.note_citation(node)
|
||||
|
||||
# mark citation labels as not smartquoted
|
||||
label = cast(nodes.label, node[0])
|
||||
label = cast('nodes.label', node[0])
|
||||
label['support_smartquotes'] = False
|
||||
|
||||
|
||||
|
@ -542,10 +542,10 @@ def filter_meta_fields(app: Sphinx, domain: str, objtype: str, content: Element)
|
||||
|
||||
for node in content:
|
||||
if isinstance(node, nodes.field_list):
|
||||
fields = cast(list[nodes.field], node)
|
||||
fields = cast('list[nodes.field]', node)
|
||||
# removing list items while iterating the list needs reversed()
|
||||
for field in reversed(fields):
|
||||
field_name = cast(nodes.field_body, field[0]).astext().strip()
|
||||
field_name = cast('nodes.field_body', field[0]).astext().strip()
|
||||
if field_name == 'meta' or field_name.startswith('meta '):
|
||||
node.remove(field)
|
||||
|
||||
|
@ -7,7 +7,6 @@ from copy import copy
|
||||
from typing import TYPE_CHECKING, Any, ClassVar, Final, cast
|
||||
|
||||
from docutils import nodes
|
||||
from docutils.nodes import Element, Node, system_message
|
||||
from docutils.parsers.rst import Directive, directives
|
||||
from docutils.statemachine import StringList
|
||||
|
||||
@ -25,6 +24,8 @@ from sphinx.util.parsing import nested_parse_to_nodes
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable, Iterable, Iterator, Set
|
||||
|
||||
from docutils.nodes import Element, Node, system_message
|
||||
|
||||
from sphinx.application import Sphinx
|
||||
from sphinx.builders import Builder
|
||||
from sphinx.environment import BuildEnvironment
|
||||
@ -813,7 +814,7 @@ class StandardDomain(Domain):
|
||||
location=node)
|
||||
self.anonlabels[name] = docname, labelid
|
||||
if node.tagname == 'section':
|
||||
title = cast(nodes.title, node[0])
|
||||
title = cast('nodes.title', node[0])
|
||||
sectname = clean_astext(title)
|
||||
elif node.tagname == 'rubric':
|
||||
sectname = clean_astext(node)
|
||||
@ -824,9 +825,9 @@ class StandardDomain(Domain):
|
||||
else:
|
||||
if (isinstance(node, nodes.definition_list | nodes.field_list) and
|
||||
node.children):
|
||||
node = cast(nodes.Element, node.children[0])
|
||||
node = cast('nodes.Element', node.children[0])
|
||||
if isinstance(node, nodes.field | nodes.definition_list_item):
|
||||
node = cast(nodes.Element, node.children[0])
|
||||
node = cast('nodes.Element', node.children[0])
|
||||
if isinstance(node, nodes.term | nodes.field_name):
|
||||
sectname = clean_astext(node)
|
||||
else:
|
||||
@ -1117,7 +1118,7 @@ class StandardDomain(Domain):
|
||||
def get_numfig_title(self, node: Node) -> str | None:
|
||||
"""Get the title of enumerable nodes to refer them using its title"""
|
||||
if self.is_enumerable_node(node):
|
||||
elem = cast(Element, node)
|
||||
elem = cast('Element', node)
|
||||
_, title_getter = self.enumerable_nodes.get(elem.__class__, (None, None))
|
||||
if title_getter:
|
||||
return title_getter(elem)
|
||||
|
@ -43,12 +43,12 @@ class MetadataCollector(EnvironmentCollector):
|
||||
for node in doctree[index]: # type: ignore[attr-defined]
|
||||
# nodes are multiply inherited...
|
||||
if isinstance(node, nodes.authors):
|
||||
authors = cast(list[nodes.author], node)
|
||||
authors = cast('list[nodes.author]', node)
|
||||
md['authors'] = [author.astext() for author in authors]
|
||||
elif isinstance(node, nodes.field):
|
||||
assert len(node) == 2
|
||||
field_name = cast(nodes.field_name, node[0])
|
||||
field_body = cast(nodes.field_body, node[1])
|
||||
field_name = cast('nodes.field_name', node[0])
|
||||
field_body = cast('nodes.field_body', node[1])
|
||||
md[field_name.astext()] = field_body.astext()
|
||||
elif isinstance(node, nodes.TextElement):
|
||||
# other children must be TextElement
|
||||
|
@ -232,7 +232,7 @@ class TocTreeCollector(EnvironmentCollector):
|
||||
if 'skip_section_number' in subnode:
|
||||
continue
|
||||
numstack[-1] += 1
|
||||
reference = cast(nodes.reference, subnode[0])
|
||||
reference = cast('nodes.reference', subnode[0])
|
||||
if depth > 0:
|
||||
number = numstack.copy()
|
||||
secnums[reference['anchorname']] = tuple(numstack)
|
||||
|
@ -94,11 +94,11 @@ def get_type_comment(obj: Any, bound_method: bool = False) -> Signature | None:
|
||||
# this adds if-block before the declaration.
|
||||
module = ast.parse('if True:\n' + source, type_comments=True)
|
||||
subject = cast(
|
||||
ast.FunctionDef, module.body[0].body[0], # type: ignore[attr-defined]
|
||||
'ast.FunctionDef', module.body[0].body[0], # type: ignore[attr-defined]
|
||||
)
|
||||
else:
|
||||
module = ast.parse(source, type_comments=True)
|
||||
subject = cast(ast.FunctionDef, module.body[0])
|
||||
subject = cast('ast.FunctionDef', module.body[0])
|
||||
|
||||
type_comment = getattr(subject, "type_comment", None)
|
||||
if type_comment:
|
||||
|
@ -3,7 +3,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from collections.abc import Iterable
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from docutils import nodes
|
||||
@ -14,6 +13,8 @@ from sphinx.util import inspect
|
||||
from sphinx.util.typing import ExtensionMetadata, stringify_annotation
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable
|
||||
|
||||
from docutils.nodes import Element
|
||||
|
||||
from sphinx.application import Sphinx
|
||||
@ -49,7 +50,7 @@ def merge_typehints(app: Sphinx, domain: str, objtype: str, contentnode: Element
|
||||
return
|
||||
|
||||
try:
|
||||
signature = cast(addnodes.desc_signature, contentnode.parent[0])
|
||||
signature = cast('addnodes.desc_signature', contentnode.parent[0])
|
||||
if signature['module']:
|
||||
fullname = f'{signature["module"]}.{signature["fullname"]}'
|
||||
else:
|
||||
@ -97,7 +98,7 @@ def insert_field_list(node: Element) -> nodes.field_list:
|
||||
def modify_field_list(node: nodes.field_list, annotations: dict[str, str],
|
||||
suppress_rtype: bool = False) -> None:
|
||||
arguments: dict[str, dict[str, bool]] = {}
|
||||
fields = cast(Iterable[nodes.field], node)
|
||||
fields = cast('Iterable[nodes.field]', node)
|
||||
for field in fields:
|
||||
field_name = field[0].astext()
|
||||
parts = re.split(' +', field_name)
|
||||
@ -159,7 +160,7 @@ def augment_descriptions_with_types(
|
||||
annotations: dict[str, str],
|
||||
force_rtype: bool,
|
||||
) -> None:
|
||||
fields = cast(Iterable[nodes.field], node)
|
||||
fields = cast('Iterable[nodes.field]', node)
|
||||
has_description: set[str] = set()
|
||||
has_type: set[str] = set()
|
||||
for field in fields:
|
||||
|
@ -37,7 +37,7 @@ def register_sections_as_label(app: Sphinx, document: Node) -> None:
|
||||
continue
|
||||
labelid = node['ids'][0]
|
||||
docname = app.env.docname
|
||||
title = cast(nodes.title, node[0])
|
||||
title = cast('nodes.title', node[0])
|
||||
ref_name = getattr(title, 'rawsource', title.astext())
|
||||
if app.config.autosectionlabel_prefix_document:
|
||||
name = nodes.fully_normalize_name(docname + ':' + ref_name)
|
||||
|
@ -134,13 +134,13 @@ class autosummary_table(nodes.comment):
|
||||
def autosummary_table_visit_html(self: HTML5Translator, node: autosummary_table) -> None:
|
||||
"""Make the first column of the table non-breaking."""
|
||||
try:
|
||||
table = cast(nodes.table, node[0])
|
||||
tgroup = cast(nodes.tgroup, table[0])
|
||||
tbody = cast(nodes.tbody, tgroup[-1])
|
||||
rows = cast(list[nodes.row], tbody)
|
||||
table = cast('nodes.table', node[0])
|
||||
tgroup = cast('nodes.tgroup', table[0])
|
||||
tbody = cast('nodes.tbody', tgroup[-1])
|
||||
rows = cast('list[nodes.row]', tbody)
|
||||
for row in rows:
|
||||
col1_entry = cast(nodes.entry, row[0])
|
||||
par = cast(nodes.paragraph, col1_entry[0])
|
||||
col1_entry = cast('nodes.entry', row[0])
|
||||
par = cast('nodes.paragraph', col1_entry[0])
|
||||
for j, subnode in enumerate(list(par)):
|
||||
if isinstance(subnode, nodes.Text):
|
||||
new_text = subnode.astext().replace(" ", "\u00a0")
|
||||
@ -765,7 +765,7 @@ class AutoLink(SphinxRole):
|
||||
return objects, errors
|
||||
|
||||
assert len(objects) == 1
|
||||
pending_xref = cast(addnodes.pending_xref, objects[0])
|
||||
pending_xref = cast('addnodes.pending_xref', objects[0])
|
||||
try:
|
||||
# try to import object by name
|
||||
prefixes = get_import_prefixes_from_env(self.env)
|
||||
@ -778,7 +778,7 @@ class AutoLink(SphinxRole):
|
||||
]
|
||||
import_by_name(name, prefixes)
|
||||
except ImportExceptionGroup:
|
||||
literal = cast(nodes.literal, pending_xref[0])
|
||||
literal = cast('nodes.literal', pending_xref[0])
|
||||
objects[0] = nodes.emphasis(self.rawtext, literal.astext(),
|
||||
classes=literal['classes'])
|
||||
|
||||
|
@ -35,7 +35,6 @@ import hashlib
|
||||
import inspect
|
||||
import os.path
|
||||
import re
|
||||
from collections.abc import Iterable, Sequence
|
||||
from importlib import import_module
|
||||
from typing import TYPE_CHECKING, Any, ClassVar, cast
|
||||
|
||||
@ -54,6 +53,8 @@ from sphinx.ext.graphviz import (
|
||||
from sphinx.util.docutils import SphinxDirective
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable, Sequence
|
||||
|
||||
from docutils.nodes import Node
|
||||
|
||||
from sphinx.application import Sphinx
|
||||
@ -424,7 +425,7 @@ def html_visit_inheritance_diagram(self: HTML5Translator, node: inheritance_diag
|
||||
graphviz_output_format = self.builder.env.config.graphviz_output_format.upper()
|
||||
current_filename = os.path.basename(self.builder.current_docname + self.builder.out_suffix)
|
||||
urls = {}
|
||||
pending_xrefs = cast(Iterable[addnodes.pending_xref], node)
|
||||
pending_xrefs = cast('Iterable[addnodes.pending_xref]', node)
|
||||
for child in pending_xrefs:
|
||||
if child.get('refuri') is not None:
|
||||
# Construct the name from the URI if the reference is external via intersphinx
|
||||
|
@ -569,7 +569,7 @@ class IntersphinxRoleResolver(ReferencesResolver):
|
||||
for node in self.document.findall(pending_xref):
|
||||
if 'intersphinx' not in node:
|
||||
continue
|
||||
contnode = cast(nodes.TextElement, node[0].deepcopy())
|
||||
contnode = cast('nodes.TextElement', node[0].deepcopy())
|
||||
inv_name = node['inventory']
|
||||
if inv_name is not None:
|
||||
assert inventory_exists(self.env, inv_name)
|
||||
|
@ -13,13 +13,13 @@ from typing import TYPE_CHECKING, Any, cast
|
||||
from docutils import nodes
|
||||
|
||||
import sphinx
|
||||
from sphinx.builders.html import StandaloneHTMLBuilder
|
||||
from sphinx.errors import ExtensionError
|
||||
from sphinx.locale import _
|
||||
from sphinx.util.math import get_node_equation_number
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from sphinx.application import Sphinx
|
||||
from sphinx.builders.html import StandaloneHTMLBuilder
|
||||
from sphinx.util.typing import ExtensionMetadata
|
||||
from sphinx.writers.html5 import HTML5Translator
|
||||
|
||||
@ -82,7 +82,7 @@ def install_mathjax(app: Sphinx, pagename: str, templatename: str, context: dict
|
||||
raise ExtensionError(msg)
|
||||
|
||||
domain = app.env.domains.math_domain
|
||||
builder = cast(StandaloneHTMLBuilder, app.builder)
|
||||
builder = cast('StandaloneHTMLBuilder', app.builder)
|
||||
if app.registry.html_assets_policy == 'always' or domain.has_equations(pagename):
|
||||
# Enable mathjax only if equations exists
|
||||
if app.config.mathjax2_config:
|
||||
|
@ -214,7 +214,7 @@ def latex_visit_todo_node(self: LaTeXTranslator, node: todo_node) -> None:
|
||||
self.body.append('\n\\begin{sphinxtodo}{')
|
||||
self.body.append(self.hypertarget_to(node))
|
||||
|
||||
title_node = cast(nodes.title, node[0])
|
||||
title_node = cast('nodes.title', node[0])
|
||||
title = texescape.escape(title_node.astext(), self.config.latex_engine)
|
||||
self.body.append('%s:}' % title)
|
||||
self.no_latex_floats += 1
|
||||
|
@ -14,7 +14,6 @@ from docutils.nodes import Element, Node
|
||||
|
||||
import sphinx
|
||||
from sphinx import addnodes
|
||||
from sphinx.builders.html import StandaloneHTMLBuilder
|
||||
from sphinx.locale import _, __
|
||||
from sphinx.pycode import ModuleAnalyzer
|
||||
from sphinx.transforms.post_transforms import SphinxPostTransform
|
||||
@ -28,6 +27,7 @@ if TYPE_CHECKING:
|
||||
|
||||
from sphinx.application import Sphinx
|
||||
from sphinx.builders import Builder
|
||||
from sphinx.builders.html import StandaloneHTMLBuilder
|
||||
from sphinx.environment import BuildEnvironment
|
||||
from sphinx.util._pathlib import _StrPath
|
||||
from sphinx.util.typing import ExtensionMetadata
|
||||
@ -228,7 +228,7 @@ def should_generate_module_page(app: Sphinx, modname: str) -> bool:
|
||||
# Always (re-)generate module page when module filename is not found.
|
||||
return True
|
||||
|
||||
builder = cast(StandaloneHTMLBuilder, app.builder)
|
||||
builder = cast('StandaloneHTMLBuilder', app.builder)
|
||||
basename = modname.replace('.', '/') + builder.out_suffix
|
||||
page_filename = os.path.join(app.outdir, '_modules/', basename)
|
||||
|
||||
|
@ -443,11 +443,11 @@ class GlossarySorter(SphinxTransform):
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
for glossary in self.document.findall(addnodes.glossary):
|
||||
if glossary['sorted']:
|
||||
definition_list = cast(nodes.definition_list, glossary[0])
|
||||
definition_list = cast('nodes.definition_list', glossary[0])
|
||||
definition_list[:] = sorted(
|
||||
definition_list,
|
||||
key=lambda item: unicodedata.normalize(
|
||||
'NFD', cast(nodes.term, item)[0].astext().lower()
|
||||
'NFD', cast('nodes.term', item)[0].astext().lower()
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -75,8 +75,8 @@ class RefOnlyBulletListTransform(SphinxTransform):
|
||||
for node in self.document.findall(nodes.bullet_list):
|
||||
if check_refonly_list(node):
|
||||
for item in node.findall(nodes.list_item):
|
||||
para = cast(nodes.paragraph, item[0])
|
||||
ref = cast(nodes.reference, para[0])
|
||||
para = cast('nodes.paragraph', item[0])
|
||||
ref = cast('nodes.reference', para[0])
|
||||
compact_para = addnodes.compact_paragraph()
|
||||
compact_para += ref
|
||||
item.replace(para, compact_para)
|
||||
|
@ -7,7 +7,6 @@ from itertools import starmap
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from docutils import nodes
|
||||
from docutils.nodes import Element, Node
|
||||
|
||||
from sphinx import addnodes
|
||||
from sphinx.errors import NoUri
|
||||
@ -20,6 +19,8 @@ from sphinx.util.nodes import find_pending_xref_condition, process_only_nodes
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Sequence
|
||||
|
||||
from docutils.nodes import Element, Node
|
||||
|
||||
from sphinx.addnodes import pending_xref
|
||||
from sphinx.application import Sphinx
|
||||
from sphinx.domains import Domain
|
||||
@ -68,9 +69,9 @@ class ReferencesResolver(SphinxPostTransform):
|
||||
for node in self.document.findall(addnodes.pending_xref):
|
||||
content = self.find_pending_xref_condition(node, ('resolved', '*'))
|
||||
if content:
|
||||
contnode = cast(Element, content[0].deepcopy())
|
||||
contnode = cast('Element', content[0].deepcopy())
|
||||
else:
|
||||
contnode = cast(Element, node[0].deepcopy())
|
||||
contnode = cast('Element', node[0].deepcopy())
|
||||
|
||||
newnode = None
|
||||
|
||||
|
@ -10,7 +10,6 @@ import contextlib
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from docutils import nodes
|
||||
from docutils.nodes import Element, Node
|
||||
|
||||
from sphinx import addnodes
|
||||
from sphinx.locale import __
|
||||
@ -18,6 +17,7 @@ from sphinx.util import logging
|
||||
from sphinx.util.nodes import get_node_line
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from docutils.nodes import Element, Node
|
||||
from docutils.parsers.rst.states import Inliner
|
||||
|
||||
from sphinx.directives import ObjectDescription
|
||||
@ -237,7 +237,7 @@ class GroupedField(Field):
|
||||
listnode += nodes.list_item('', par)
|
||||
|
||||
if len(items) == 1 and self.can_collapse:
|
||||
list_item = cast(nodes.list_item, listnode[0])
|
||||
list_item = cast('nodes.list_item', listnode[0])
|
||||
fieldbody = nodes.field_body('', list_item[0])
|
||||
return nodes.field('', fieldname, fieldbody)
|
||||
|
||||
@ -366,10 +366,10 @@ class DocFieldTransformer:
|
||||
types: dict[str, dict] = {}
|
||||
|
||||
# step 1: traverse all fields and collect field types and content
|
||||
for field in cast(list[nodes.field], node):
|
||||
for field in cast('list[nodes.field]', node):
|
||||
assert len(field) == 2
|
||||
field_name = cast(nodes.field_name, field[0])
|
||||
field_body = cast(nodes.field_body, field[1])
|
||||
field_name = cast('nodes.field_name', field[0])
|
||||
field_body = cast('nodes.field_body', field[1])
|
||||
try:
|
||||
# split into field type and argument
|
||||
fieldtype_name, fieldarg = field_name.astext().split(None, 1)
|
||||
@ -380,7 +380,7 @@ class DocFieldTransformer:
|
||||
|
||||
# collect the content, trying not to keep unnecessary paragraphs
|
||||
if _is_single_paragraph(field_body):
|
||||
paragraph = cast(nodes.paragraph, field_body[0])
|
||||
paragraph = cast('nodes.paragraph', field_body[0])
|
||||
content = paragraph.children
|
||||
else:
|
||||
content = field_body.children
|
||||
@ -403,7 +403,7 @@ class DocFieldTransformer:
|
||||
and len(content) == 1
|
||||
and isinstance(content[0], nodes.Text)
|
||||
):
|
||||
typed_field = cast(TypedField, typedesc)
|
||||
typed_field = cast('TypedField', typedesc)
|
||||
target = content[0].astext()
|
||||
xrefs = typed_field.make_xrefs(
|
||||
typed_field.typerolename,
|
||||
@ -413,7 +413,7 @@ class DocFieldTransformer:
|
||||
env=self.directive.state.document.settings.env,
|
||||
)
|
||||
if _is_single_paragraph(field_body):
|
||||
paragraph = cast(nodes.paragraph, field_body[0])
|
||||
paragraph = cast('nodes.paragraph', field_body[0])
|
||||
paragraph.clear()
|
||||
paragraph.extend(xrefs)
|
||||
else:
|
||||
@ -456,7 +456,7 @@ class DocFieldTransformer:
|
||||
if typedesc.is_grouped:
|
||||
if typename in groupindices:
|
||||
group = cast(
|
||||
tuple[Field, list, Node], entries[groupindices[typename]]
|
||||
'tuple[Field, list, Node]', entries[groupindices[typename]]
|
||||
)
|
||||
else:
|
||||
groupindices[typename] = len(entries)
|
||||
|
@ -355,7 +355,7 @@ class LoggingReporter(Reporter):
|
||||
debug: bool = False,
|
||||
error_handler: str = 'backslashreplace',
|
||||
) -> None:
|
||||
stream = cast(IO, WarningStream())
|
||||
stream = cast('IO', WarningStream())
|
||||
super().__init__(
|
||||
source, report_level, halt_level, stream, debug, error_handler=error_handler
|
||||
)
|
||||
|
@ -848,7 +848,7 @@ def signature_from_str(signature: str) -> Signature:
|
||||
"""Create a :class:`~inspect.Signature` object from a string."""
|
||||
code = 'def func' + signature + ': pass'
|
||||
module = ast.parse(code)
|
||||
function = typing.cast(ast.FunctionDef, module.body[0])
|
||||
function = typing.cast('ast.FunctionDef', module.body[0])
|
||||
|
||||
return signature_from_ast(function, code)
|
||||
|
||||
|
@ -95,7 +95,7 @@ class NodeMatcher(Generic[N]):
|
||||
confounds type checkers' ability to determine the return type of the iterator.
|
||||
"""
|
||||
for found in node.findall(self):
|
||||
yield cast(N, found)
|
||||
yield cast('N', found)
|
||||
|
||||
|
||||
def get_full_module_name(node: Node) -> str:
|
||||
|
@ -105,7 +105,7 @@ def append_epilog(content: StringList, epilog: str) -> None:
|
||||
if len(content) > 0:
|
||||
source, lineno = content.info(-1)
|
||||
# lineno will never be None, since len(content) > 0
|
||||
lineno = cast(int, lineno)
|
||||
lineno = cast('int', lineno)
|
||||
else:
|
||||
source = '<generated>'
|
||||
lineno = 0
|
||||
|
@ -31,7 +31,7 @@ class HTMLWriter(Writer): # type: ignore[misc]
|
||||
def translate(self) -> None:
|
||||
# sadly, this is mostly copied from parent class
|
||||
visitor = self.builder.create_translator(self.document, self.builder)
|
||||
self.visitor = cast(HTML5Translator, visitor)
|
||||
self.visitor = cast('HTML5Translator', visitor)
|
||||
self.document.walkabout(visitor)
|
||||
self.output = self.visitor.astext()
|
||||
for attr in (
|
||||
|
@ -5,7 +5,6 @@ from __future__ import annotations
|
||||
import posixpath
|
||||
import re
|
||||
import urllib.parse
|
||||
from collections.abc import Iterable
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
from docutils import nodes
|
||||
@ -18,6 +17,8 @@ from sphinx.util.docutils import SphinxTranslator
|
||||
from sphinx.util.images import get_image_size
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable
|
||||
|
||||
from docutils.nodes import Element, Node, Text
|
||||
|
||||
from sphinx.builders import Builder
|
||||
@ -670,7 +671,7 @@ class HTML5Translator(SphinxTranslator, BaseTranslator): # type: ignore[misc]
|
||||
|
||||
def visit_productionlist(self, node: Element) -> None:
|
||||
self.body.append(self.starttag(node, 'pre'))
|
||||
productionlist = cast(Iterable[addnodes.production], node)
|
||||
productionlist = cast('Iterable[addnodes.production]', node)
|
||||
names = (production['tokenname'] for production in productionlist)
|
||||
maxlen = max(len(name) for name in names)
|
||||
lastname = None
|
||||
|
@ -8,7 +8,6 @@ from __future__ import annotations
|
||||
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from collections.abc import Iterable
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any, ClassVar, cast
|
||||
|
||||
@ -26,6 +25,8 @@ from sphinx.util.template import LaTeXRenderer
|
||||
from sphinx.util.texescape import tex_replace_map
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable
|
||||
|
||||
from docutils.nodes import Element, Node, Text
|
||||
|
||||
from sphinx.builders.latex import LaTeXBuilder
|
||||
@ -95,7 +96,7 @@ class LaTeXWriter(writers.Writer): # type: ignore[type-arg]
|
||||
self.document, self.builder, self.theme
|
||||
)
|
||||
self.document.walkabout(visitor)
|
||||
self.output = cast(LaTeXTranslator, visitor).astext()
|
||||
self.output = cast('LaTeXTranslator', visitor).astext()
|
||||
|
||||
|
||||
# Helper classes
|
||||
@ -1111,7 +1112,7 @@ class LaTeXTranslator(SphinxTranslator):
|
||||
|
||||
def visit_footnote(self, node: Element) -> None:
|
||||
self.in_footnote += 1
|
||||
label = cast(nodes.label, node[0])
|
||||
label = cast('nodes.label', node[0])
|
||||
if self.in_parsed_literal:
|
||||
self.body.append(r'\begin{footnote}[%s]' % label.astext())
|
||||
else:
|
||||
@ -1382,8 +1383,8 @@ class LaTeXTranslator(SphinxTranslator):
|
||||
def visit_acks(self, node: Element) -> None:
|
||||
# this is a list in the source, but should be rendered as a
|
||||
# comma-separated list here
|
||||
bullet_list = cast(nodes.bullet_list, node[0])
|
||||
list_items = cast(Iterable[nodes.list_item], bullet_list)
|
||||
bullet_list = cast('nodes.bullet_list', node[0])
|
||||
list_items = cast('Iterable[nodes.list_item]', bullet_list)
|
||||
self.body.append(BLANKLINE)
|
||||
self.body.append(', '.join(n.astext() for n in list_items) + '.')
|
||||
self.body.append(BLANKLINE)
|
||||
@ -2091,8 +2092,8 @@ class LaTeXTranslator(SphinxTranslator):
|
||||
self.body.append('}')
|
||||
|
||||
def visit_thebibliography(self, node: Element) -> None:
|
||||
citations = cast(Iterable[nodes.citation], node)
|
||||
labels = (cast(nodes.label, citation[0]) for citation in citations)
|
||||
citations = cast('Iterable[nodes.citation]', node)
|
||||
labels = (cast('nodes.label', citation[0]) for citation in citations)
|
||||
longest_label = max((label.astext() for label in labels), key=len)
|
||||
if len(longest_label) > MAX_CITATION_LABEL_LENGTH:
|
||||
# adjust max width of citation labels not to break the layout
|
||||
@ -2106,7 +2107,7 @@ class LaTeXTranslator(SphinxTranslator):
|
||||
self.body.append(r'\end{sphinxthebibliography}' + CR)
|
||||
|
||||
def visit_citation(self, node: Element) -> None:
|
||||
label = cast(nodes.label, node[0])
|
||||
label = cast('nodes.label', node[0])
|
||||
self.body.append(
|
||||
rf'\bibitem[{self.encode(label.astext())}]'
|
||||
rf'{{{node["docname"]}:{node["ids"][0]}}}'
|
||||
@ -2159,7 +2160,7 @@ class LaTeXTranslator(SphinxTranslator):
|
||||
self.body.append(']')
|
||||
|
||||
def visit_footnotetext(self, node: Element) -> None:
|
||||
label = cast(nodes.label, node[0])
|
||||
label = cast('nodes.label', node[0])
|
||||
self.body.append('%' + CR)
|
||||
self.body.append(r'\begin{footnotetext}[%s]' % label.astext())
|
||||
self.body.append(r'\sphinxAtStartFootnote' + CR)
|
||||
|
@ -2,7 +2,6 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterable
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from docutils import nodes
|
||||
@ -17,6 +16,8 @@ from sphinx.util.i18n import format_date
|
||||
from sphinx.util.nodes import NodeMatcher
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable
|
||||
|
||||
from docutils.nodes import Element
|
||||
|
||||
from sphinx.builders import Builder
|
||||
@ -33,7 +34,7 @@ class ManualPageWriter(Writer): # type: ignore[misc]
|
||||
transform = NestedInlineTransform(self.document)
|
||||
transform.apply()
|
||||
visitor = self.builder.create_translator(self.document, self.builder)
|
||||
self.visitor = cast(ManualPageTranslator, visitor)
|
||||
self.visitor = cast('ManualPageTranslator', visitor)
|
||||
self.document.walkabout(visitor)
|
||||
self.output = self.visitor.astext()
|
||||
|
||||
@ -277,7 +278,7 @@ class ManualPageTranslator(SphinxTranslator, BaseTranslator): # type: ignore[mi
|
||||
self.ensure_eol()
|
||||
self.in_productionlist += 1
|
||||
self.body.append('.sp\n.nf\n')
|
||||
productionlist = cast(Iterable[addnodes.production], node)
|
||||
productionlist = cast('Iterable[addnodes.production]', node)
|
||||
names = (production['tokenname'] for production in productionlist)
|
||||
maxlen = max(len(name) for name in names)
|
||||
lastname = None
|
||||
@ -379,11 +380,11 @@ class ManualPageTranslator(SphinxTranslator, BaseTranslator): # type: ignore[mi
|
||||
pass
|
||||
|
||||
def visit_acks(self, node: Element) -> None:
|
||||
bullet_list = cast(nodes.bullet_list, node[0])
|
||||
list_items = cast(Iterable[nodes.list_item], bullet_list)
|
||||
bullet_list = cast('nodes.bullet_list', node[0])
|
||||
list_items = cast('Iterable[nodes.list_item]', bullet_list)
|
||||
self.ensure_eol()
|
||||
bullet_list = cast(nodes.bullet_list, node[0])
|
||||
list_items = cast(Iterable[nodes.list_item], bullet_list)
|
||||
bullet_list = cast('nodes.bullet_list', node[0])
|
||||
list_items = cast('Iterable[nodes.list_item]', bullet_list)
|
||||
self.body.append(', '.join(n.astext() for n in list_items) + '.')
|
||||
self.body.append('\n')
|
||||
raise nodes.SkipNode
|
||||
|
@ -5,7 +5,6 @@ from __future__ import annotations
|
||||
import os.path
|
||||
import re
|
||||
import textwrap
|
||||
from collections.abc import Iterable, Iterator
|
||||
from typing import TYPE_CHECKING, Any, ClassVar, cast
|
||||
|
||||
from docutils import nodes, writers
|
||||
@ -19,6 +18,8 @@ from sphinx.util.i18n import format_date
|
||||
from sphinx.writers.latex import collected_footnote
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable, Iterator
|
||||
|
||||
from docutils.nodes import Element, Node, Text
|
||||
|
||||
from sphinx.builders.texinfo import TexinfoBuilder
|
||||
@ -133,7 +134,7 @@ class TexinfoWriter(writers.Writer): # type: ignore[type-arg]
|
||||
def translate(self) -> None:
|
||||
assert isinstance(self.document, nodes.document)
|
||||
visitor = self.builder.create_translator(self.document, self.builder)
|
||||
self.visitor = cast(TexinfoTranslator, visitor)
|
||||
self.visitor = cast('TexinfoTranslator', visitor)
|
||||
self.document.walkabout(visitor)
|
||||
self.visitor.finish()
|
||||
for attr in self.visitor_attributes:
|
||||
@ -288,7 +289,7 @@ class TexinfoTranslator(SphinxTranslator):
|
||||
]
|
||||
# each section is also a node
|
||||
for section in self.document.findall(nodes.section):
|
||||
title = cast(nodes.TextElement, section.next_node(nodes.Titular)) # type: ignore[type-var]
|
||||
title = cast('nodes.TextElement', section.next_node(nodes.Titular)) # type: ignore[type-var]
|
||||
name = title.astext() if title else '<untitled>'
|
||||
section['node_name'] = add_node_name(name)
|
||||
|
||||
@ -530,7 +531,7 @@ class TexinfoTranslator(SphinxTranslator):
|
||||
|
||||
fnotes: dict[str, list[collected_footnote | bool]] = {}
|
||||
for fn in footnotes_under(node):
|
||||
label = cast(nodes.label, fn[0])
|
||||
label = cast('nodes.label', fn[0])
|
||||
num = label.astext().strip()
|
||||
fnotes[num] = [collected_footnote('', *fn.children), False]
|
||||
return fnotes
|
||||
@ -609,7 +610,7 @@ class TexinfoTranslator(SphinxTranslator):
|
||||
self.add_anchor(id, node)
|
||||
|
||||
self.next_section_ids.clear()
|
||||
self.previous_section = cast(nodes.section, node)
|
||||
self.previous_section = cast('nodes.section', node)
|
||||
self.section_level += 1
|
||||
|
||||
def depart_section(self, node: Element) -> None:
|
||||
@ -1110,7 +1111,7 @@ class TexinfoTranslator(SphinxTranslator):
|
||||
|
||||
def visit_admonition(self, node: Element, name: str = '') -> None:
|
||||
if not name:
|
||||
title = cast(nodes.title, node[0])
|
||||
title = cast('nodes.title', node[0])
|
||||
name = self.escape(title.astext())
|
||||
self.body.append('\n@cartouche\n@quotation %s ' % name)
|
||||
|
||||
@ -1173,7 +1174,7 @@ class TexinfoTranslator(SphinxTranslator):
|
||||
# ignore TOC's since we have to have a "menu" anyway
|
||||
if 'contents' in node.get('classes', []):
|
||||
raise nodes.SkipNode
|
||||
title = cast(nodes.title, node[0])
|
||||
title = cast('nodes.title', node[0])
|
||||
self.visit_rubric(title)
|
||||
self.body.append('%s\n' % self.escape(title.astext()))
|
||||
self.depart_rubric(title)
|
||||
@ -1307,7 +1308,7 @@ class TexinfoTranslator(SphinxTranslator):
|
||||
|
||||
def visit_productionlist(self, node: Element) -> None:
|
||||
self.visit_literal_block(None)
|
||||
productionlist = cast(Iterable[addnodes.production], node)
|
||||
productionlist = cast('Iterable[addnodes.production]', node)
|
||||
names = (production['tokenname'] for production in productionlist)
|
||||
maxlen = max(len(name) for name in names)
|
||||
|
||||
@ -1388,8 +1389,8 @@ class TexinfoTranslator(SphinxTranslator):
|
||||
pass
|
||||
|
||||
def visit_acks(self, node: Element) -> None:
|
||||
bullet_list = cast(nodes.bullet_list, node[0])
|
||||
list_items = cast(Iterable[nodes.list_item], bullet_list)
|
||||
bullet_list = cast('nodes.bullet_list', node[0])
|
||||
list_items = cast('Iterable[nodes.list_item]', bullet_list)
|
||||
self.body.append('\n\n')
|
||||
self.body.append(', '.join(n.astext() for n in list_items) + '.')
|
||||
self.body.append('\n\n')
|
||||
|
@ -6,7 +6,6 @@ import math
|
||||
import os
|
||||
import re
|
||||
import textwrap
|
||||
from collections.abc import Iterable, Iterator, Sequence
|
||||
from itertools import chain, groupby, pairwise
|
||||
from typing import TYPE_CHECKING, Any, ClassVar, cast
|
||||
|
||||
@ -18,6 +17,8 @@ from sphinx.locale import _, admonitionlabels
|
||||
from sphinx.util.docutils import SphinxTranslator
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable, Iterator, Sequence
|
||||
|
||||
from docutils.nodes import Element, Text
|
||||
|
||||
from sphinx.builders.text import TextBuilder
|
||||
@ -381,7 +382,7 @@ class TextWriter(writers.Writer): # type: ignore[type-arg]
|
||||
assert isinstance(self.document, nodes.document)
|
||||
visitor = self.builder.create_translator(self.document, self.builder)
|
||||
self.document.walkabout(visitor)
|
||||
self.output = cast(TextTranslator, visitor).body
|
||||
self.output = cast('TextTranslator', visitor).body
|
||||
|
||||
|
||||
class TextTranslator(SphinxTranslator):
|
||||
@ -776,7 +777,7 @@ class TextTranslator(SphinxTranslator):
|
||||
|
||||
def visit_productionlist(self, node: Element) -> None:
|
||||
self.new_state()
|
||||
productionlist = cast(Iterable[addnodes.production], node)
|
||||
productionlist = cast('Iterable[addnodes.production]', node)
|
||||
names = (production['tokenname'] for production in productionlist)
|
||||
maxlen = max(len(name) for name in names)
|
||||
lastname = None
|
||||
@ -791,7 +792,7 @@ class TextTranslator(SphinxTranslator):
|
||||
raise nodes.SkipNode
|
||||
|
||||
def visit_footnote(self, node: Element) -> None:
|
||||
label = cast(nodes.label, node[0])
|
||||
label = cast('nodes.label', node[0])
|
||||
self._footnote = label.astext().strip()
|
||||
self.new_state(len(self._footnote) + 3)
|
||||
|
||||
@ -923,8 +924,8 @@ class TextTranslator(SphinxTranslator):
|
||||
self.end_state(wrap=False)
|
||||
|
||||
def visit_acks(self, node: Element) -> None:
|
||||
bullet_list = cast(nodes.bullet_list, node[0])
|
||||
list_items = cast(Iterable[nodes.list_item], bullet_list)
|
||||
bullet_list = cast('nodes.bullet_list', node[0])
|
||||
list_items = cast('Iterable[nodes.list_item]', bullet_list)
|
||||
self.new_state(0)
|
||||
self.add_text(', '.join(n.astext() for n in list_items) + '.')
|
||||
self.end_state()
|
||||
|
Loading…
Reference in New Issue
Block a user