mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Enable automatic formatting for `sphinx/transforms/
` (#12971)
This commit is contained in:
parent
3a066f2bbc
commit
a6e449094a
@ -475,6 +475,5 @@ exclude = [
|
|||||||
"sphinx/ext/todo.py",
|
"sphinx/ext/todo.py",
|
||||||
"sphinx/ext/viewcode.py",
|
"sphinx/ext/viewcode.py",
|
||||||
"sphinx/registry.py",
|
"sphinx/registry.py",
|
||||||
"sphinx/transforms/*",
|
|
||||||
"sphinx/writers/*",
|
"sphinx/writers/*",
|
||||||
]
|
]
|
||||||
|
@ -120,7 +120,9 @@ class DefaultSubstitutions(SphinxTransform):
|
|||||||
if (name := ref['refname']) in to_handle:
|
if (name := ref['refname']) in to_handle:
|
||||||
ref.replace_self(self._handle_default_substitution(name))
|
ref.replace_self(self._handle_default_substitution(name))
|
||||||
|
|
||||||
def _handle_default_substitution(self, name: _DEFAULT_SUBSTITUTION_NAMES) -> nodes.Text:
|
def _handle_default_substitution(
|
||||||
|
self, name: _DEFAULT_SUBSTITUTION_NAMES
|
||||||
|
) -> nodes.Text:
|
||||||
if name == 'translation progress':
|
if name == 'translation progress':
|
||||||
# special handling: calculate translation progress
|
# special handling: calculate translation progress
|
||||||
return nodes.Text(_calculate_translation_progress(self.document))
|
return nodes.Text(_calculate_translation_progress(self.document))
|
||||||
@ -128,10 +130,8 @@ class DefaultSubstitutions(SphinxTransform):
|
|||||||
if text := self.config.today:
|
if text := self.config.today:
|
||||||
return nodes.Text(text)
|
return nodes.Text(text)
|
||||||
# special handling: can also specify a strftime format
|
# special handling: can also specify a strftime format
|
||||||
return nodes.Text(format_date(
|
today_fmt = self.config.today_fmt or _('%b %d, %Y')
|
||||||
self.config.today_fmt or _('%b %d, %Y'),
|
return nodes.Text(format_date(today_fmt, language=self.config.language))
|
||||||
language=self.config.language,
|
|
||||||
))
|
|
||||||
# config.version and config.release
|
# config.version and config.release
|
||||||
return nodes.Text(getattr(self.config, name))
|
return nodes.Text(getattr(self.config, name))
|
||||||
|
|
||||||
@ -185,8 +185,7 @@ class HandleCodeBlocks(SphinxTransform):
|
|||||||
def apply(self, **kwargs: Any) -> None:
|
def apply(self, **kwargs: Any) -> None:
|
||||||
# move doctest blocks out of blockquotes
|
# move doctest blocks out of blockquotes
|
||||||
for node in self.document.findall(nodes.block_quote):
|
for node in self.document.findall(nodes.block_quote):
|
||||||
if all(isinstance(child, nodes.doctest_block) for child
|
if all(isinstance(child, nodes.doctest_block) for child in node.children):
|
||||||
in node.children):
|
|
||||||
node.replace_self(node.children)
|
node.replace_self(node.children)
|
||||||
# combine successive doctest blocks
|
# combine successive doctest blocks
|
||||||
# for node in self.document.findall(nodes.doctest_block):
|
# for node in self.document.findall(nodes.doctest_block):
|
||||||
@ -211,9 +210,11 @@ class AutoNumbering(SphinxTransform):
|
|||||||
domain: StandardDomain = self.env.domains.standard_domain
|
domain: StandardDomain = self.env.domains.standard_domain
|
||||||
|
|
||||||
for node in self.document.findall(nodes.Element):
|
for node in self.document.findall(nodes.Element):
|
||||||
if (domain.is_enumerable_node(node) and
|
if (
|
||||||
domain.get_numfig_title(node) is not None and
|
domain.is_enumerable_node(node)
|
||||||
node['ids'] == []):
|
and domain.get_numfig_title(node) is not None
|
||||||
|
and node['ids'] == []
|
||||||
|
):
|
||||||
self.document.note_implicit_target(node)
|
self.document.note_implicit_target(node)
|
||||||
|
|
||||||
|
|
||||||
@ -262,8 +263,13 @@ class AutoIndexUpgrader(SphinxTransform):
|
|||||||
def apply(self, **kwargs: Any) -> None:
|
def apply(self, **kwargs: Any) -> None:
|
||||||
for node in self.document.findall(addnodes.index):
|
for node in self.document.findall(addnodes.index):
|
||||||
if 'entries' in node and any(len(entry) == 4 for entry in node['entries']):
|
if 'entries' in node and any(len(entry) == 4 for entry in node['entries']):
|
||||||
msg = __('4 column based index found. '
|
msg = (
|
||||||
'It might be a bug of extensions you use: %r') % node['entries']
|
__(
|
||||||
|
'4 column based index found. '
|
||||||
|
'It might be a bug of extensions you use: %r'
|
||||||
|
)
|
||||||
|
% node['entries']
|
||||||
|
)
|
||||||
logger.warning(msg, location=node)
|
logger.warning(msg, location=node)
|
||||||
for i, entry in enumerate(node['entries']):
|
for i, entry in enumerate(node['entries']):
|
||||||
if len(entry) == 4:
|
if len(entry) == 4:
|
||||||
@ -302,13 +308,13 @@ class UnreferencedFootnotesDetector(SphinxTransform):
|
|||||||
# note we do not warn on duplicate footnotes here
|
# note we do not warn on duplicate footnotes here
|
||||||
# (i.e. where the name has been moved to dupnames)
|
# (i.e. where the name has been moved to dupnames)
|
||||||
# since this is already reported by docutils
|
# since this is already reported by docutils
|
||||||
if not node['backrefs'] and node["names"]:
|
if not node['backrefs'] and node['names']:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
__('Footnote [%s] is not referenced.'),
|
__('Footnote [%s] is not referenced.'),
|
||||||
node['names'][0] if node['names'] else node['dupnames'][0],
|
node['names'][0] if node['names'] else node['dupnames'][0],
|
||||||
type='ref',
|
type='ref',
|
||||||
subtype='footnote',
|
subtype='footnote',
|
||||||
location=node
|
location=node,
|
||||||
)
|
)
|
||||||
for node in self.document.symbol_footnotes:
|
for node in self.document.symbol_footnotes:
|
||||||
if not node['backrefs']:
|
if not node['backrefs']:
|
||||||
@ -316,18 +322,18 @@ class UnreferencedFootnotesDetector(SphinxTransform):
|
|||||||
__('Footnote [*] is not referenced.'),
|
__('Footnote [*] is not referenced.'),
|
||||||
type='ref',
|
type='ref',
|
||||||
subtype='footnote',
|
subtype='footnote',
|
||||||
location=node
|
location=node,
|
||||||
)
|
)
|
||||||
for node in self.document.autofootnotes:
|
for node in self.document.autofootnotes:
|
||||||
# note we do not warn on duplicate footnotes here
|
# note we do not warn on duplicate footnotes here
|
||||||
# (i.e. where the name has been moved to dupnames)
|
# (i.e. where the name has been moved to dupnames)
|
||||||
# since this is already reported by docutils
|
# since this is already reported by docutils
|
||||||
if not node['backrefs'] and node["names"]:
|
if not node['backrefs'] and node['names']:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
__('Footnote [#] is not referenced.'),
|
__('Footnote [#] is not referenced.'),
|
||||||
type='ref',
|
type='ref',
|
||||||
subtype='footnote',
|
subtype='footnote',
|
||||||
location=node
|
location=node,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -403,10 +409,7 @@ class SphinxSmartQuotes(SmartQuotes, SphinxTransform):
|
|||||||
|
|
||||||
# confirm selected language supports smart_quotes or not
|
# confirm selected language supports smart_quotes or not
|
||||||
language = self.env.settings['language_code']
|
language = self.env.settings['language_code']
|
||||||
return any(
|
return any(tag in smartchars.quotes for tag in normalize_language_tag(language))
|
||||||
tag in smartchars.quotes
|
|
||||||
for tag in normalize_language_tag(language)
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_tokens(self, txtnodes: list[Text]) -> Iterator[tuple[str, str]]:
|
def get_tokens(self, txtnodes: list[Text]) -> Iterator[tuple[str, str]]:
|
||||||
# A generator that yields ``(texttype, nodetext)`` tuples for a list
|
# A generator that yields ``(texttype, nodetext)`` tuples for a list
|
||||||
@ -439,13 +442,13 @@ class GlossarySorter(SphinxTransform):
|
|||||||
|
|
||||||
def apply(self, **kwargs: Any) -> None:
|
def apply(self, **kwargs: Any) -> None:
|
||||||
for glossary in self.document.findall(addnodes.glossary):
|
for glossary in self.document.findall(addnodes.glossary):
|
||||||
if glossary["sorted"]:
|
if glossary['sorted']:
|
||||||
definition_list = cast(nodes.definition_list, glossary[0])
|
definition_list = cast(nodes.definition_list, glossary[0])
|
||||||
definition_list[:] = sorted(
|
definition_list[:] = sorted(
|
||||||
definition_list,
|
definition_list,
|
||||||
key=lambda item: unicodedata.normalize(
|
key=lambda item: unicodedata.normalize(
|
||||||
'NFD',
|
'NFD', cast(nodes.term, item)[0].astext().lower()
|
||||||
cast(nodes.term, item)[0].astext().lower()),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -508,7 +511,7 @@ def _reorder_index_target_nodes(start_node: nodes.target) -> None:
|
|||||||
first_idx = parent.index(nodes_to_reorder[0])
|
first_idx = parent.index(nodes_to_reorder[0])
|
||||||
last_idx = parent.index(nodes_to_reorder[-1])
|
last_idx = parent.index(nodes_to_reorder[-1])
|
||||||
if first_idx + len(nodes_to_reorder) - 1 == last_idx:
|
if first_idx + len(nodes_to_reorder) - 1 == last_idx:
|
||||||
parent[first_idx:last_idx + 1] = sorted(nodes_to_reorder, key=_sort_key)
|
parent[first_idx : last_idx + 1] = sorted(nodes_to_reorder, key=_sort_key)
|
||||||
|
|
||||||
|
|
||||||
def _sort_key(node: nodes.Node) -> int:
|
def _sort_key(node: nodes.Node) -> int:
|
||||||
|
@ -30,8 +30,9 @@ class RefOnlyListChecker(nodes.GenericNodeVisitor):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def visit_list_item(self, node: nodes.list_item) -> None:
|
def visit_list_item(self, node: nodes.list_item) -> None:
|
||||||
children: list[Node] = [child for child in node.children
|
children: list[Node] = [
|
||||||
if not isinstance(child, nodes.Invisible)]
|
child for child in node.children if not isinstance(child, nodes.Invisible)
|
||||||
|
]
|
||||||
if len(children) != 1:
|
if len(children) != 1:
|
||||||
raise nodes.NodeFound
|
raise nodes.NodeFound
|
||||||
if not isinstance(children[0], nodes.paragraph):
|
if not isinstance(children[0], nodes.paragraph):
|
||||||
|
@ -48,8 +48,14 @@ EXCLUDED_PENDING_XREF_ATTRIBUTES = ('refexplicit',)
|
|||||||
N = TypeVar('N', bound=nodes.Node)
|
N = TypeVar('N', bound=nodes.Node)
|
||||||
|
|
||||||
|
|
||||||
def publish_msgstr(app: Sphinx, source: str, source_path: str, source_line: int,
|
def publish_msgstr(
|
||||||
config: Config, settings: Any) -> nodes.Element:
|
app: Sphinx,
|
||||||
|
source: str,
|
||||||
|
source_path: str,
|
||||||
|
source_line: int,
|
||||||
|
config: Config,
|
||||||
|
settings: Any,
|
||||||
|
) -> nodes.Element:
|
||||||
"""Publish msgstr (single line) into docutils document
|
"""Publish msgstr (single line) into docutils document
|
||||||
|
|
||||||
:param sphinx.application.Sphinx app: sphinx application
|
:param sphinx.application.Sphinx app: sphinx application
|
||||||
@ -67,13 +73,15 @@ def publish_msgstr(app: Sphinx, source: str, source_path: str, source_line: int,
|
|||||||
config.rst_prolog = None
|
config.rst_prolog = None
|
||||||
|
|
||||||
from sphinx.io import SphinxI18nReader
|
from sphinx.io import SphinxI18nReader
|
||||||
|
|
||||||
reader = SphinxI18nReader()
|
reader = SphinxI18nReader()
|
||||||
reader.setup(app)
|
reader.setup(app)
|
||||||
filetype = get_filetype(config.source_suffix, source_path)
|
filetype = get_filetype(config.source_suffix, source_path)
|
||||||
parser = app.registry.create_source_parser(app, filetype)
|
parser = app.registry.create_source_parser(app, filetype)
|
||||||
doc = reader.read(
|
doc = reader.read(
|
||||||
source=StringInput(source=source,
|
source=StringInput(
|
||||||
source_path=f"{source_path}:{source_line}:<translated>"),
|
source=source, source_path=f'{source_path}:{source_line}:<translated>'
|
||||||
|
),
|
||||||
parser=parser,
|
parser=parser,
|
||||||
settings=settings,
|
settings=settings,
|
||||||
)
|
)
|
||||||
@ -85,7 +93,7 @@ def publish_msgstr(app: Sphinx, source: str, source_path: str, source_line: int,
|
|||||||
|
|
||||||
|
|
||||||
def parse_noqa(source: str) -> tuple[str, bool]:
|
def parse_noqa(source: str) -> tuple[str, bool]:
|
||||||
m = match(r"(.*)(?<!\\)#\s*noqa\s*$", source, DOTALL)
|
m = match(r'(.*)(?<!\\)#\s*noqa\s*$', source, DOTALL)
|
||||||
if m:
|
if m:
|
||||||
return m.group(1), True
|
return m.group(1), True
|
||||||
else:
|
else:
|
||||||
@ -108,29 +116,43 @@ class _NodeUpdater:
|
|||||||
"""Contains logic for updating one node with the translated content."""
|
"""Contains logic for updating one node with the translated content."""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self, node: nodes.Element, patch: nodes.Element, document: nodes.document, noqa: bool,
|
self,
|
||||||
|
node: nodes.Element,
|
||||||
|
patch: nodes.Element,
|
||||||
|
document: nodes.document,
|
||||||
|
noqa: bool,
|
||||||
) -> None:
|
) -> None:
|
||||||
self.node: nodes.Element = node
|
self.node: nodes.Element = node
|
||||||
self.patch: nodes.Element = patch
|
self.patch: nodes.Element = patch
|
||||||
self.document: nodes.document = document
|
self.document: nodes.document = document
|
||||||
self.noqa: bool = noqa
|
self.noqa: bool = noqa
|
||||||
|
|
||||||
def compare_references(self, old_refs: Sequence[nodes.Element],
|
def compare_references(
|
||||||
new_refs: Sequence[nodes.Element],
|
self,
|
||||||
warning_msg: str) -> None:
|
old_refs: Sequence[nodes.Element],
|
||||||
|
new_refs: Sequence[nodes.Element],
|
||||||
|
warning_msg: str,
|
||||||
|
) -> None:
|
||||||
"""Warn about mismatches between references in original and translated content."""
|
"""Warn about mismatches between references in original and translated content."""
|
||||||
# FIXME: could use a smarter strategy than len(old_refs) == len(new_refs)
|
# FIXME: could use a smarter strategy than len(old_refs) == len(new_refs)
|
||||||
if not self.noqa and len(old_refs) != len(new_refs):
|
if not self.noqa and len(old_refs) != len(new_refs):
|
||||||
old_ref_rawsources = [ref.rawsource for ref in old_refs]
|
old_ref_rawsources = [ref.rawsource for ref in old_refs]
|
||||||
new_ref_rawsources = [ref.rawsource for ref in new_refs]
|
new_ref_rawsources = [ref.rawsource for ref in new_refs]
|
||||||
logger.warning(warning_msg.format(old_ref_rawsources, new_ref_rawsources),
|
logger.warning(
|
||||||
location=self.node, type='i18n', subtype='inconsistent_references')
|
warning_msg.format(old_ref_rawsources, new_ref_rawsources),
|
||||||
|
location=self.node,
|
||||||
|
type='i18n',
|
||||||
|
subtype='inconsistent_references',
|
||||||
|
)
|
||||||
|
|
||||||
def update_title_mapping(self) -> bool:
|
def update_title_mapping(self) -> bool:
|
||||||
processed = False # skip flag
|
processed = False # skip flag
|
||||||
|
|
||||||
# update title(section) target name-id mapping
|
# update title(section) target name-id mapping
|
||||||
if isinstance(self.node, nodes.title) and isinstance(self.node.parent, nodes.section):
|
if (
|
||||||
|
isinstance(self.node, nodes.title)
|
||||||
|
and isinstance(self.node.parent, nodes.section)
|
||||||
|
): # fmt: skip
|
||||||
section_node = self.node.parent
|
section_node = self.node.parent
|
||||||
new_name = nodes.fully_normalize_name(self.patch.astext())
|
new_name = nodes.fully_normalize_name(self.patch.astext())
|
||||||
old_name = nodes.fully_normalize_name(self.node.astext())
|
old_name = nodes.fully_normalize_name(self.node.astext())
|
||||||
@ -201,9 +223,14 @@ class _NodeUpdater:
|
|||||||
is_autofootnote_ref = NodeMatcher(nodes.footnote_reference, auto=Any)
|
is_autofootnote_ref = NodeMatcher(nodes.footnote_reference, auto=Any)
|
||||||
old_foot_refs = list(is_autofootnote_ref.findall(self.node))
|
old_foot_refs = list(is_autofootnote_ref.findall(self.node))
|
||||||
new_foot_refs = list(is_autofootnote_ref.findall(self.patch))
|
new_foot_refs = list(is_autofootnote_ref.findall(self.patch))
|
||||||
self.compare_references(old_foot_refs, new_foot_refs,
|
self.compare_references(
|
||||||
__('inconsistent footnote references in translated message.'
|
old_foot_refs,
|
||||||
' original: {0}, translated: {1}'))
|
new_foot_refs,
|
||||||
|
__(
|
||||||
|
'inconsistent footnote references in translated message.'
|
||||||
|
' original: {0}, translated: {1}'
|
||||||
|
),
|
||||||
|
)
|
||||||
old_foot_namerefs: dict[str, list[nodes.footnote_reference]] = {}
|
old_foot_namerefs: dict[str, list[nodes.footnote_reference]] = {}
|
||||||
for r in old_foot_refs:
|
for r in old_foot_refs:
|
||||||
old_foot_namerefs.setdefault(r.get('refname'), []).append(r)
|
old_foot_namerefs.setdefault(r.get('refname'), []).append(r)
|
||||||
@ -241,9 +268,14 @@ class _NodeUpdater:
|
|||||||
is_refnamed_ref = NodeMatcher(nodes.reference, refname=Any)
|
is_refnamed_ref = NodeMatcher(nodes.reference, refname=Any)
|
||||||
old_refs = list(is_refnamed_ref.findall(self.node))
|
old_refs = list(is_refnamed_ref.findall(self.node))
|
||||||
new_refs = list(is_refnamed_ref.findall(self.patch))
|
new_refs = list(is_refnamed_ref.findall(self.patch))
|
||||||
self.compare_references(old_refs, new_refs,
|
self.compare_references(
|
||||||
__('inconsistent references in translated message.'
|
old_refs,
|
||||||
' original: {0}, translated: {1}'))
|
new_refs,
|
||||||
|
__(
|
||||||
|
'inconsistent references in translated message.'
|
||||||
|
' original: {0}, translated: {1}'
|
||||||
|
),
|
||||||
|
)
|
||||||
old_ref_names = [r['refname'] for r in old_refs]
|
old_ref_names = [r['refname'] for r in old_refs]
|
||||||
new_ref_names = [r['refname'] for r in new_refs]
|
new_ref_names = [r['refname'] for r in new_refs]
|
||||||
orphans = [*({*old_ref_names} - {*new_ref_names})]
|
orphans = [*({*old_ref_names} - {*new_ref_names})]
|
||||||
@ -266,31 +298,41 @@ class _NodeUpdater:
|
|||||||
old_foot_refs = list(is_refnamed_footnote_ref.findall(self.node))
|
old_foot_refs = list(is_refnamed_footnote_ref.findall(self.node))
|
||||||
new_foot_refs = list(is_refnamed_footnote_ref.findall(self.patch))
|
new_foot_refs = list(is_refnamed_footnote_ref.findall(self.patch))
|
||||||
refname_ids_map: dict[str, list[str]] = {}
|
refname_ids_map: dict[str, list[str]] = {}
|
||||||
self.compare_references(old_foot_refs, new_foot_refs,
|
self.compare_references(
|
||||||
__('inconsistent footnote references in translated message.'
|
old_foot_refs,
|
||||||
' original: {0}, translated: {1}'))
|
new_foot_refs,
|
||||||
|
__(
|
||||||
|
'inconsistent footnote references in translated message.'
|
||||||
|
' original: {0}, translated: {1}'
|
||||||
|
),
|
||||||
|
)
|
||||||
for oldf in old_foot_refs:
|
for oldf in old_foot_refs:
|
||||||
refname_ids_map.setdefault(oldf["refname"], []).append(oldf["ids"])
|
refname_ids_map.setdefault(oldf['refname'], []).append(oldf['ids'])
|
||||||
for newf in new_foot_refs:
|
for newf in new_foot_refs:
|
||||||
refname = newf["refname"]
|
refname = newf['refname']
|
||||||
if refname_ids_map.get(refname):
|
if refname_ids_map.get(refname):
|
||||||
newf["ids"] = refname_ids_map[refname].pop(0)
|
newf['ids'] = refname_ids_map[refname].pop(0)
|
||||||
|
|
||||||
def update_citation_references(self) -> None:
|
def update_citation_references(self) -> None:
|
||||||
# citation should use original 'ids'.
|
# citation should use original 'ids'.
|
||||||
is_citation_ref = NodeMatcher(nodes.citation_reference, refname=Any)
|
is_citation_ref = NodeMatcher(nodes.citation_reference, refname=Any)
|
||||||
old_cite_refs = list(is_citation_ref.findall(self.node))
|
old_cite_refs = list(is_citation_ref.findall(self.node))
|
||||||
new_cite_refs = list(is_citation_ref.findall(self.patch))
|
new_cite_refs = list(is_citation_ref.findall(self.patch))
|
||||||
self.compare_references(old_cite_refs, new_cite_refs,
|
self.compare_references(
|
||||||
__('inconsistent citation references in translated message.'
|
old_cite_refs,
|
||||||
' original: {0}, translated: {1}'))
|
new_cite_refs,
|
||||||
|
__(
|
||||||
|
'inconsistent citation references in translated message.'
|
||||||
|
' original: {0}, translated: {1}'
|
||||||
|
),
|
||||||
|
)
|
||||||
refname_ids_map: dict[str, list[str]] = {}
|
refname_ids_map: dict[str, list[str]] = {}
|
||||||
for oldc in old_cite_refs:
|
for oldc in old_cite_refs:
|
||||||
refname_ids_map.setdefault(oldc["refname"], []).append(oldc["ids"])
|
refname_ids_map.setdefault(oldc['refname'], []).append(oldc['ids'])
|
||||||
for newc in new_cite_refs:
|
for newc in new_cite_refs:
|
||||||
refname = newc["refname"]
|
refname = newc['refname']
|
||||||
if refname_ids_map.get(refname):
|
if refname_ids_map.get(refname):
|
||||||
newc["ids"] = refname_ids_map[refname].pop()
|
newc['ids'] = refname_ids_map[refname].pop()
|
||||||
|
|
||||||
def update_pending_xrefs(self) -> None:
|
def update_pending_xrefs(self) -> None:
|
||||||
# Original pending_xref['reftarget'] contain not-translated
|
# Original pending_xref['reftarget'] contain not-translated
|
||||||
@ -298,20 +340,25 @@ class _NodeUpdater:
|
|||||||
# This code restricts to change ref-targets in the translation.
|
# This code restricts to change ref-targets in the translation.
|
||||||
old_xrefs = [*self.node.findall(addnodes.pending_xref)]
|
old_xrefs = [*self.node.findall(addnodes.pending_xref)]
|
||||||
new_xrefs = [*self.patch.findall(addnodes.pending_xref)]
|
new_xrefs = [*self.patch.findall(addnodes.pending_xref)]
|
||||||
self.compare_references(old_xrefs, new_xrefs,
|
self.compare_references(
|
||||||
__('inconsistent term references in translated message.'
|
old_xrefs,
|
||||||
' original: {0}, translated: {1}'))
|
new_xrefs,
|
||||||
|
__(
|
||||||
|
'inconsistent term references in translated message.'
|
||||||
|
' original: {0}, translated: {1}'
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
xref_reftarget_map: dict[tuple[str, str, str] | None, dict[str, Any]] = {}
|
xref_reftarget_map: dict[tuple[str, str, str] | None, dict[str, Any]] = {}
|
||||||
|
|
||||||
def get_ref_key(node: addnodes.pending_xref) -> tuple[str, str, str] | None:
|
def get_ref_key(node: addnodes.pending_xref) -> tuple[str, str, str] | None:
|
||||||
case = node["refdomain"], node["reftype"]
|
case = node['refdomain'], node['reftype']
|
||||||
if case == ('std', 'term'):
|
if case == ('std', 'term'):
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
return (
|
return (
|
||||||
node["refdomain"],
|
node['refdomain'],
|
||||||
node["reftype"],
|
node['reftype'],
|
||||||
node['reftarget'],
|
node['reftarget'],
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -347,8 +394,10 @@ class Locale(SphinxTransform):
|
|||||||
textdomain = docname_to_domain(self.env.docname, self.config.gettext_compact)
|
textdomain = docname_to_domain(self.env.docname, self.config.gettext_compact)
|
||||||
|
|
||||||
# fetch translations
|
# fetch translations
|
||||||
dirs = [path.join(self.env.srcdir, directory)
|
dirs = [
|
||||||
for directory in self.config.locale_dirs]
|
path.join(self.env.srcdir, directory)
|
||||||
|
for directory in self.config.locale_dirs
|
||||||
|
]
|
||||||
catalog, has_catalog = init_locale(dirs, self.config.language, textdomain)
|
catalog, has_catalog = init_locale(dirs, self.config.language, textdomain)
|
||||||
if not has_catalog:
|
if not has_catalog:
|
||||||
return
|
return
|
||||||
@ -393,8 +442,14 @@ class Locale(SphinxTransform):
|
|||||||
if isinstance(node, LITERAL_TYPE_NODES):
|
if isinstance(node, LITERAL_TYPE_NODES):
|
||||||
msgstr = '::\n\n' + indent(msgstr, ' ' * 3)
|
msgstr = '::\n\n' + indent(msgstr, ' ' * 3)
|
||||||
|
|
||||||
patch = publish_msgstr(self.app, msgstr, source,
|
patch = publish_msgstr(
|
||||||
node.line, self.config, settings) # type: ignore[arg-type]
|
self.app,
|
||||||
|
msgstr,
|
||||||
|
source,
|
||||||
|
node.line, # type: ignore[arg-type]
|
||||||
|
self.config,
|
||||||
|
settings,
|
||||||
|
)
|
||||||
# FIXME: no warnings about inconsistent references in this part
|
# FIXME: no warnings about inconsistent references in this part
|
||||||
# XXX doctest and other block markup
|
# XXX doctest and other block markup
|
||||||
if not isinstance(patch, nodes.paragraph):
|
if not isinstance(patch, nodes.paragraph):
|
||||||
@ -408,11 +463,21 @@ class Locale(SphinxTransform):
|
|||||||
for _id in node['ids']:
|
for _id in node['ids']:
|
||||||
term, first_classifier = split_term_classifiers(msgstr)
|
term, first_classifier = split_term_classifiers(msgstr)
|
||||||
patch = publish_msgstr(
|
patch = publish_msgstr(
|
||||||
self.app, term or '', source, node.line, self.config, settings, # type: ignore[arg-type]
|
self.app,
|
||||||
|
term or '',
|
||||||
|
source,
|
||||||
|
node.line, # type: ignore[arg-type]
|
||||||
|
self.config,
|
||||||
|
settings,
|
||||||
)
|
)
|
||||||
updater.patch = make_glossary_term(
|
updater.patch = make_glossary_term(
|
||||||
self.env, patch, first_classifier,
|
self.env,
|
||||||
source, node.line, _id, self.document, # type: ignore[arg-type]
|
patch,
|
||||||
|
first_classifier,
|
||||||
|
source,
|
||||||
|
node.line, # type: ignore[arg-type]
|
||||||
|
_id,
|
||||||
|
self.document,
|
||||||
)
|
)
|
||||||
processed = True
|
processed = True
|
||||||
|
|
||||||
@ -474,8 +539,14 @@ class Locale(SphinxTransform):
|
|||||||
# This generates: <section ...><title>msgstr</title></section>
|
# This generates: <section ...><title>msgstr</title></section>
|
||||||
msgstr = msgstr + '\n' + '=' * len(msgstr) * 2
|
msgstr = msgstr + '\n' + '=' * len(msgstr) * 2
|
||||||
|
|
||||||
patch = publish_msgstr(self.app, msgstr, source,
|
patch = publish_msgstr(
|
||||||
node.line, self.config, settings) # type: ignore[arg-type]
|
self.app,
|
||||||
|
msgstr,
|
||||||
|
source,
|
||||||
|
node.line, # type: ignore[arg-type]
|
||||||
|
self.config,
|
||||||
|
settings,
|
||||||
|
)
|
||||||
# Structural Subelements phase2
|
# Structural Subelements phase2
|
||||||
if isinstance(node, nodes.title):
|
if isinstance(node, nodes.title):
|
||||||
# get <title> node that placed as a first child
|
# get <title> node that placed as a first child
|
||||||
@ -483,8 +554,8 @@ class Locale(SphinxTransform):
|
|||||||
|
|
||||||
# ignore unexpected markups in translation message
|
# ignore unexpected markups in translation message
|
||||||
unexpected: tuple[type[nodes.Element], ...] = (
|
unexpected: tuple[type[nodes.Element], ...] = (
|
||||||
nodes.paragraph, # expected form of translation
|
nodes.paragraph, # expected form of translation
|
||||||
nodes.title, # generated by above "Subelements phase2"
|
nodes.title, # generated by above "Subelements phase2"
|
||||||
)
|
)
|
||||||
|
|
||||||
# following types are expected if
|
# following types are expected if
|
||||||
@ -527,7 +598,13 @@ class Locale(SphinxTransform):
|
|||||||
msgstr = part
|
msgstr = part
|
||||||
msgstr_parts.append(msgstr)
|
msgstr_parts.append(msgstr)
|
||||||
|
|
||||||
new_entry = entry_type, ';'.join(msgstr_parts), target_id, main, None
|
new_entry = (
|
||||||
|
entry_type,
|
||||||
|
';'.join(msgstr_parts),
|
||||||
|
target_id,
|
||||||
|
main,
|
||||||
|
None,
|
||||||
|
)
|
||||||
new_entries.append(new_entry)
|
new_entries.append(new_entry)
|
||||||
|
|
||||||
node['raw_entries'] = entries
|
node['raw_entries'] = entries
|
||||||
@ -543,6 +620,7 @@ class TranslationProgressTotaliser(SphinxTransform):
|
|||||||
|
|
||||||
def apply(self, **kwargs: Any) -> None:
|
def apply(self, **kwargs: Any) -> None:
|
||||||
from sphinx.builders.gettext import MessageCatalogBuilder
|
from sphinx.builders.gettext import MessageCatalogBuilder
|
||||||
|
|
||||||
if isinstance(self.app.builder, MessageCatalogBuilder):
|
if isinstance(self.app.builder, MessageCatalogBuilder):
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -567,6 +645,7 @@ class AddTranslationClasses(SphinxTransform):
|
|||||||
|
|
||||||
def apply(self, **kwargs: Any) -> None:
|
def apply(self, **kwargs: Any) -> None:
|
||||||
from sphinx.builders.gettext import MessageCatalogBuilder
|
from sphinx.builders.gettext import MessageCatalogBuilder
|
||||||
|
|
||||||
if isinstance(self.app.builder, MessageCatalogBuilder):
|
if isinstance(self.app.builder, MessageCatalogBuilder):
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -582,8 +661,10 @@ class AddTranslationClasses(SphinxTransform):
|
|||||||
add_translated = False
|
add_translated = False
|
||||||
add_untranslated = True
|
add_untranslated = True
|
||||||
else:
|
else:
|
||||||
msg = ('translation_progress_classes must be '
|
msg = (
|
||||||
'True, False, "translated" or "untranslated"')
|
'translation_progress_classes must be '
|
||||||
|
'True, False, "translated" or "untranslated"'
|
||||||
|
)
|
||||||
raise ConfigError(msg)
|
raise ConfigError(msg)
|
||||||
|
|
||||||
for node in NodeMatcher(nodes.Element, translated=Any).findall(self.document):
|
for node in NodeMatcher(nodes.Element, translated=Any).findall(self.document):
|
||||||
@ -604,6 +685,7 @@ class RemoveTranslatableInline(SphinxTransform):
|
|||||||
|
|
||||||
def apply(self, **kwargs: Any) -> None:
|
def apply(self, **kwargs: Any) -> None:
|
||||||
from sphinx.builders.gettext import MessageCatalogBuilder
|
from sphinx.builders.gettext import MessageCatalogBuilder
|
||||||
|
|
||||||
if isinstance(self.app.builder, MessageCatalogBuilder):
|
if isinstance(self.app.builder, MessageCatalogBuilder):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ class ReferencesResolver(SphinxPostTransform):
|
|||||||
|
|
||||||
def run(self, **kwargs: Any) -> None:
|
def run(self, **kwargs: Any) -> None:
|
||||||
for node in self.document.findall(addnodes.pending_xref):
|
for node in self.document.findall(addnodes.pending_xref):
|
||||||
content = self.find_pending_xref_condition(node, ("resolved", "*"))
|
content = self.find_pending_xref_condition(node, ('resolved', '*'))
|
||||||
if content:
|
if content:
|
||||||
contnode = cast(Element, content[0].deepcopy())
|
contnode = cast(Element, content[0].deepcopy())
|
||||||
else:
|
else:
|
||||||
@ -87,16 +87,21 @@ class ReferencesResolver(SphinxPostTransform):
|
|||||||
domain = self.env.domains[node['refdomain']]
|
domain = self.env.domains[node['refdomain']]
|
||||||
except KeyError as exc:
|
except KeyError as exc:
|
||||||
raise NoUri(target, typ) from exc
|
raise NoUri(target, typ) from exc
|
||||||
newnode = domain.resolve_xref(self.env, refdoc, self.app.builder,
|
newnode = domain.resolve_xref(
|
||||||
typ, target, node, contnode)
|
self.env, refdoc, self.app.builder, typ, target, node, contnode
|
||||||
|
)
|
||||||
# really hardwired reference types
|
# really hardwired reference types
|
||||||
elif typ == 'any':
|
elif typ == 'any':
|
||||||
newnode = self.resolve_anyref(refdoc, node, contnode)
|
newnode = self.resolve_anyref(refdoc, node, contnode)
|
||||||
# no new node found? try the missing-reference event
|
# no new node found? try the missing-reference event
|
||||||
if newnode is None:
|
if newnode is None:
|
||||||
newnode = self.app.emit_firstresult('missing-reference', self.env,
|
newnode = self.app.emit_firstresult(
|
||||||
node, contnode,
|
'missing-reference',
|
||||||
allowed_exceptions=(NoUri,))
|
self.env,
|
||||||
|
node,
|
||||||
|
contnode,
|
||||||
|
allowed_exceptions=(NoUri,),
|
||||||
|
)
|
||||||
# still not found? warn if node wishes to be warned about or
|
# still not found? warn if node wishes to be warned about or
|
||||||
# we are in nitpicky mode
|
# we are in nitpicky mode
|
||||||
if newnode is None:
|
if newnode is None:
|
||||||
@ -108,69 +113,103 @@ class ReferencesResolver(SphinxPostTransform):
|
|||||||
newnodes: list[Node] = [newnode]
|
newnodes: list[Node] = [newnode]
|
||||||
else:
|
else:
|
||||||
newnodes = [contnode]
|
newnodes = [contnode]
|
||||||
if newnode is None and isinstance(node[0], addnodes.pending_xref_condition):
|
if newnode is None and isinstance(
|
||||||
matched = self.find_pending_xref_condition(node, ("*",))
|
node[0], addnodes.pending_xref_condition
|
||||||
|
):
|
||||||
|
matched = self.find_pending_xref_condition(node, ('*',))
|
||||||
if matched:
|
if matched:
|
||||||
newnodes = matched
|
newnodes = matched
|
||||||
else:
|
else:
|
||||||
logger.warning(__('Could not determine the fallback text for the '
|
logger.warning(
|
||||||
'cross-reference. Might be a bug.'), location=node)
|
__(
|
||||||
|
'Could not determine the fallback text for the '
|
||||||
|
'cross-reference. Might be a bug.'
|
||||||
|
),
|
||||||
|
location=node,
|
||||||
|
)
|
||||||
|
|
||||||
node.replace_self(newnodes)
|
node.replace_self(newnodes)
|
||||||
|
|
||||||
def resolve_anyref(
|
def resolve_anyref(
|
||||||
self, refdoc: str, node: pending_xref, contnode: Element,
|
self,
|
||||||
|
refdoc: str,
|
||||||
|
node: pending_xref,
|
||||||
|
contnode: Element,
|
||||||
) -> Element | None:
|
) -> Element | None:
|
||||||
"""Resolve reference generated by the "any" role."""
|
"""Resolve reference generated by the "any" role."""
|
||||||
stddomain = self.env.domains.standard_domain
|
stddomain = self.env.domains.standard_domain
|
||||||
target = node['reftarget']
|
target = node['reftarget']
|
||||||
results: list[tuple[str, Element]] = []
|
results: list[tuple[str, Element]] = []
|
||||||
# first, try resolving as :doc:
|
# first, try resolving as :doc:
|
||||||
doc_ref = stddomain.resolve_xref(self.env, refdoc, self.app.builder,
|
doc_ref = stddomain.resolve_xref(
|
||||||
'doc', target, node, contnode)
|
self.env, refdoc, self.app.builder, 'doc', target, node, contnode
|
||||||
|
)
|
||||||
if doc_ref:
|
if doc_ref:
|
||||||
results.append(('doc', doc_ref))
|
results.append(('doc', doc_ref))
|
||||||
# next, do the standard domain (makes this a priority)
|
# next, do the standard domain (makes this a priority)
|
||||||
results.extend(stddomain.resolve_any_xref(self.env, refdoc, self.app.builder,
|
results.extend(
|
||||||
target, node, contnode))
|
stddomain.resolve_any_xref(
|
||||||
|
self.env, refdoc, self.app.builder, target, node, contnode
|
||||||
|
)
|
||||||
|
)
|
||||||
for domain in self.env.domains.sorted():
|
for domain in self.env.domains.sorted():
|
||||||
if domain.name == 'std':
|
if domain.name == 'std':
|
||||||
continue # we did this one already
|
continue # we did this one already
|
||||||
try:
|
try:
|
||||||
results.extend(domain.resolve_any_xref(self.env, refdoc, self.app.builder,
|
results.extend(
|
||||||
target, node, contnode))
|
domain.resolve_any_xref(
|
||||||
|
self.env, refdoc, self.app.builder, target, node, contnode
|
||||||
|
)
|
||||||
|
)
|
||||||
except NotImplementedError:
|
except NotImplementedError:
|
||||||
# the domain doesn't yet support the new interface
|
# the domain doesn't yet support the new interface
|
||||||
# we have to manually collect possible references (SLOW)
|
# we have to manually collect possible references (SLOW)
|
||||||
for role in domain.roles:
|
for role in domain.roles:
|
||||||
res = domain.resolve_xref(self.env, refdoc, self.app.builder,
|
res = domain.resolve_xref(
|
||||||
role, target, node, contnode)
|
self.env, refdoc, self.app.builder, role, target, node, contnode
|
||||||
|
)
|
||||||
if res and len(res) > 0 and isinstance(res[0], nodes.Element):
|
if res and len(res) > 0 and isinstance(res[0], nodes.Element):
|
||||||
results.append((f'{domain.name}:{role}', res))
|
results.append((f'{domain.name}:{role}', res))
|
||||||
# now, see how many matches we got...
|
# now, see how many matches we got...
|
||||||
if not results:
|
if not results:
|
||||||
return None
|
return None
|
||||||
if len(results) > 1:
|
if len(results) > 1:
|
||||||
|
|
||||||
def stringify(name: str, node: Element) -> str:
|
def stringify(name: str, node: Element) -> str:
|
||||||
reftitle = node.get('reftitle', node.astext())
|
reftitle = node.get('reftitle', node.astext())
|
||||||
return f':{name}:`{reftitle}`'
|
return f':{name}:`{reftitle}`'
|
||||||
|
|
||||||
candidates = ' or '.join(starmap(stringify, results))
|
candidates = ' or '.join(starmap(stringify, results))
|
||||||
logger.warning(__("more than one target found for 'any' cross-"
|
logger.warning(
|
||||||
'reference %r: could be %s'), target, candidates,
|
__(
|
||||||
location=node)
|
"more than one target found for 'any' cross-"
|
||||||
|
'reference %r: could be %s'
|
||||||
|
),
|
||||||
|
target,
|
||||||
|
candidates,
|
||||||
|
location=node,
|
||||||
|
)
|
||||||
res_role, newnode = results[0]
|
res_role, newnode = results[0]
|
||||||
# Override "any" class with the actual role type to get the styling
|
# Override "any" class with the actual role type to get the styling
|
||||||
# approximately correct.
|
# approximately correct.
|
||||||
res_domain = res_role.split(':')[0]
|
res_domain = res_role.split(':')[0]
|
||||||
if (len(newnode) > 0 and
|
if (
|
||||||
isinstance(newnode[0], nodes.Element) and
|
len(newnode) > 0
|
||||||
newnode[0].get('classes')):
|
and isinstance(newnode[0], nodes.Element)
|
||||||
|
and newnode[0].get('classes')
|
||||||
|
):
|
||||||
newnode[0]['classes'].append(res_domain)
|
newnode[0]['classes'].append(res_domain)
|
||||||
newnode[0]['classes'].append(res_role.replace(':', '-'))
|
newnode[0]['classes'].append(res_role.replace(':', '-'))
|
||||||
return newnode
|
return newnode
|
||||||
|
|
||||||
def warn_missing_reference(self, refdoc: str, typ: str, target: str,
|
def warn_missing_reference(
|
||||||
node: pending_xref, domain: Domain | None) -> None:
|
self,
|
||||||
|
refdoc: str,
|
||||||
|
typ: str,
|
||||||
|
target: str,
|
||||||
|
node: pending_xref,
|
||||||
|
domain: Domain | None,
|
||||||
|
) -> None:
|
||||||
warn = node.get('refwarn')
|
warn = node.get('refwarn')
|
||||||
if self.config.nitpicky:
|
if self.config.nitpicky:
|
||||||
warn = True
|
warn = True
|
||||||
@ -179,24 +218,26 @@ class ReferencesResolver(SphinxPostTransform):
|
|||||||
if (dtype, target) in self.config.nitpick_ignore:
|
if (dtype, target) in self.config.nitpick_ignore:
|
||||||
warn = False
|
warn = False
|
||||||
# for "std" types also try without domain name
|
# for "std" types also try without domain name
|
||||||
if (not domain or domain.name == 'std') and \
|
if (
|
||||||
(typ, target) in self.config.nitpick_ignore:
|
(not domain or domain.name == 'std')
|
||||||
|
and (typ, target) in self.config.nitpick_ignore
|
||||||
|
): # fmt: skip
|
||||||
warn = False
|
warn = False
|
||||||
if self.config.nitpick_ignore_regex:
|
if self.config.nitpick_ignore_regex:
|
||||||
|
|
||||||
def matches_ignore(entry_type: str, entry_target: str) -> bool:
|
def matches_ignore(entry_type: str, entry_target: str) -> bool:
|
||||||
return any(
|
return any(
|
||||||
(
|
(
|
||||||
re.fullmatch(ignore_type, entry_type)
|
re.fullmatch(ignore_type, entry_type)
|
||||||
and re.fullmatch(ignore_target, entry_target)
|
and re.fullmatch(ignore_target, entry_target)
|
||||||
)
|
)
|
||||||
for ignore_type, ignore_target
|
for ignore_type, ignore_target in self.config.nitpick_ignore_regex
|
||||||
in self.config.nitpick_ignore_regex
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if matches_ignore(dtype, target):
|
if matches_ignore(dtype, target):
|
||||||
warn = False
|
warn = False
|
||||||
# for "std" types also try without domain name
|
# for "std" types also try without domain name
|
||||||
if (not domain or domain.name == 'std') and \
|
if (not domain or domain.name == 'std') and matches_ignore(typ, target):
|
||||||
matches_ignore(typ, target):
|
|
||||||
warn = False
|
warn = False
|
||||||
if not warn:
|
if not warn:
|
||||||
return
|
return
|
||||||
@ -206,14 +247,20 @@ class ReferencesResolver(SphinxPostTransform):
|
|||||||
elif domain and typ in domain.dangling_warnings:
|
elif domain and typ in domain.dangling_warnings:
|
||||||
msg = domain.dangling_warnings[typ] % {'target': target}
|
msg = domain.dangling_warnings[typ] % {'target': target}
|
||||||
elif node.get('refdomain', 'std') not in ('', 'std'):
|
elif node.get('refdomain', 'std') not in ('', 'std'):
|
||||||
msg = (__('%s:%s reference target not found: %s') %
|
msg = __('%s:%s reference target not found: %s') % (
|
||||||
(node['refdomain'], typ, target))
|
node['refdomain'],
|
||||||
|
typ,
|
||||||
|
target,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
msg = __('%r reference target not found: %s') % (typ, target)
|
msg = __('%r reference target not found: %s') % (typ, target)
|
||||||
logger.warning(msg, location=node, type='ref', subtype=typ)
|
logger.warning(msg, location=node, type='ref', subtype=typ)
|
||||||
|
|
||||||
def find_pending_xref_condition(self, node: pending_xref, conditions: Sequence[str],
|
def find_pending_xref_condition(
|
||||||
) -> list[Node] | None:
|
self,
|
||||||
|
node: pending_xref,
|
||||||
|
conditions: Sequence[str],
|
||||||
|
) -> list[Node] | None:
|
||||||
for condition in conditions:
|
for condition in conditions:
|
||||||
matched = find_pending_xref_condition(node, condition)
|
matched = find_pending_xref_condition(node, condition)
|
||||||
if matched:
|
if matched:
|
||||||
@ -238,8 +285,10 @@ class SigElementFallbackTransform(SphinxPostTransform):
|
|||||||
default_priority = 200
|
default_priority = 200
|
||||||
|
|
||||||
def run(self, **kwargs: Any) -> None:
|
def run(self, **kwargs: Any) -> None:
|
||||||
def has_visitor(translator: type[nodes.NodeVisitor], node: type[Element]) -> bool:
|
def has_visitor(
|
||||||
return hasattr(translator, "visit_%s" % node.__name__)
|
translator: type[nodes.NodeVisitor], node: type[Element]
|
||||||
|
) -> bool:
|
||||||
|
return hasattr(translator, 'visit_%s' % node.__name__)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
translator = self.app.builder.get_translator_class()
|
translator = self.app.builder.get_translator_class()
|
||||||
@ -253,8 +302,10 @@ class SigElementFallbackTransform(SphinxPostTransform):
|
|||||||
|
|
||||||
# for the leaf elements (desc_sig_element), the translator should support _all_,
|
# for the leaf elements (desc_sig_element), the translator should support _all_,
|
||||||
# unless there exists a generic visit_desc_sig_element default visitor
|
# unless there exists a generic visit_desc_sig_element default visitor
|
||||||
if (not all(has_visitor(translator, node) for node in addnodes.SIG_ELEMENTS)
|
if (
|
||||||
and not has_visitor(translator, addnodes.desc_sig_element)):
|
not all(has_visitor(translator, node) for node in addnodes.SIG_ELEMENTS)
|
||||||
|
and not has_visitor(translator, addnodes.desc_sig_element)
|
||||||
|
): # fmt: skip
|
||||||
self.fallback(addnodes.desc_sig_element)
|
self.fallback(addnodes.desc_sig_element)
|
||||||
|
|
||||||
if not has_visitor(translator, addnodes.desc_inline):
|
if not has_visitor(translator, addnodes.desc_inline):
|
||||||
|
@ -37,8 +37,9 @@ class HighlightLanguageTransform(SphinxTransform):
|
|||||||
default_priority = 400
|
default_priority = 400
|
||||||
|
|
||||||
def apply(self, **kwargs: Any) -> None:
|
def apply(self, **kwargs: Any) -> None:
|
||||||
visitor = HighlightLanguageVisitor(self.document,
|
visitor = HighlightLanguageVisitor(
|
||||||
self.config.highlight_language)
|
self.document, self.config.highlight_language
|
||||||
|
)
|
||||||
self.document.walkabout(visitor)
|
self.document.walkabout(visitor)
|
||||||
|
|
||||||
for node in list(self.document.findall(addnodes.highlightlang)):
|
for node in list(self.document.findall(addnodes.highlightlang)):
|
||||||
@ -70,9 +71,9 @@ class HighlightLanguageVisitor(nodes.NodeVisitor):
|
|||||||
self.settings.pop()
|
self.settings.pop()
|
||||||
|
|
||||||
def visit_highlightlang(self, node: addnodes.highlightlang) -> None:
|
def visit_highlightlang(self, node: addnodes.highlightlang) -> None:
|
||||||
self.settings[-1] = HighlightSetting(node['lang'],
|
self.settings[-1] = HighlightSetting(
|
||||||
node['force'],
|
node['lang'], node['force'], node['linenothreshold']
|
||||||
node['linenothreshold'])
|
)
|
||||||
|
|
||||||
def visit_literal_block(self, node: nodes.literal_block) -> None:
|
def visit_literal_block(self, node: nodes.literal_block) -> None:
|
||||||
setting = self.settings[-1]
|
setting = self.settings[-1]
|
||||||
@ -81,7 +82,7 @@ class HighlightLanguageVisitor(nodes.NodeVisitor):
|
|||||||
node['force'] = setting.force
|
node['force'] = setting.force
|
||||||
if 'linenos' not in node:
|
if 'linenos' not in node:
|
||||||
lines = node.astext().count('\n')
|
lines = node.astext().count('\n')
|
||||||
node['linenos'] = (lines >= setting.lineno_threshold - 1)
|
node['linenos'] = lines >= setting.lineno_threshold - 1
|
||||||
|
|
||||||
|
|
||||||
class TrimDoctestFlagsTransform(SphinxTransform):
|
class TrimDoctestFlagsTransform(SphinxTransform):
|
||||||
|
@ -63,8 +63,10 @@ class ImageDownloader(BaseImageConverter):
|
|||||||
basename = basename.split('?')[0]
|
basename = basename.split('?')[0]
|
||||||
if basename == '' or len(basename) > MAX_FILENAME_LEN:
|
if basename == '' or len(basename) > MAX_FILENAME_LEN:
|
||||||
filename, ext = os.path.splitext(node['uri'])
|
filename, ext = os.path.splitext(node['uri'])
|
||||||
basename = sha1(filename.encode(), usedforsecurity=False).hexdigest() + ext
|
basename = (
|
||||||
basename = CRITICAL_PATH_CHAR_RE.sub("_", basename)
|
sha1(filename.encode(), usedforsecurity=False).hexdigest() + ext
|
||||||
|
)
|
||||||
|
basename = CRITICAL_PATH_CHAR_RE.sub('_', basename)
|
||||||
|
|
||||||
uri_hash = sha1(node['uri'].encode(), usedforsecurity=False).hexdigest()
|
uri_hash = sha1(node['uri'].encode(), usedforsecurity=False).hexdigest()
|
||||||
path = Path(self.imagedir, uri_hash, basename)
|
path = Path(self.imagedir, uri_hash, basename)
|
||||||
@ -83,7 +85,8 @@ class ImageDownloader(BaseImageConverter):
|
|||||||
|
|
||||||
config = self.app.config
|
config = self.app.config
|
||||||
r = requests.get(
|
r = requests.get(
|
||||||
node['uri'], headers=headers,
|
node['uri'],
|
||||||
|
headers=headers,
|
||||||
_user_agent=config.user_agent,
|
_user_agent=config.user_agent,
|
||||||
_tls_info=(config.tls_verify, config.tls_cacerts),
|
_tls_info=(config.tls_verify, config.tls_cacerts),
|
||||||
)
|
)
|
||||||
@ -134,8 +137,9 @@ class DataURIExtractor(BaseImageConverter):
|
|||||||
assert image is not None
|
assert image is not None
|
||||||
ext = get_image_extension(image.mimetype)
|
ext = get_image_extension(image.mimetype)
|
||||||
if ext is None:
|
if ext is None:
|
||||||
logger.warning(__('Unknown image format: %s...'), node['uri'][:32],
|
logger.warning(
|
||||||
location=node)
|
__('Unknown image format: %s...'), node['uri'][:32], location=node
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
ensuredir(os.path.join(self.imagedir, 'embeded'))
|
ensuredir(os.path.join(self.imagedir, 'embeded'))
|
||||||
@ -155,7 +159,7 @@ class DataURIExtractor(BaseImageConverter):
|
|||||||
|
|
||||||
def get_filename_for(filename: str, mimetype: str) -> str:
|
def get_filename_for(filename: str, mimetype: str) -> str:
|
||||||
basename = os.path.basename(filename)
|
basename = os.path.basename(filename)
|
||||||
basename = CRITICAL_PATH_CHAR_RE.sub("_", basename)
|
basename = CRITICAL_PATH_CHAR_RE.sub('_', basename)
|
||||||
return os.path.splitext(basename)[0] + (get_image_extension(mimetype) or '')
|
return os.path.splitext(basename)[0] + (get_image_extension(mimetype) or '')
|
||||||
|
|
||||||
|
|
||||||
@ -206,7 +210,9 @@ class ImageConverter(BaseImageConverter):
|
|||||||
return False
|
return False
|
||||||
if '?' in node['candidates']:
|
if '?' in node['candidates']:
|
||||||
return False
|
return False
|
||||||
if set(self.guess_mimetypes(node)) & set(self.app.builder.supported_image_types):
|
node_mime_types = set(self.guess_mimetypes(node))
|
||||||
|
supported_image_types = set(self.app.builder.supported_image_types)
|
||||||
|
if node_mime_types & supported_image_types:
|
||||||
# builder supports the image; no need to convert
|
# builder supports the image; no need to convert
|
||||||
return False
|
return False
|
||||||
if self.available is None:
|
if self.available is None:
|
||||||
|
Loading…
Reference in New Issue
Block a user