mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Enable automatic formatting for `sphinx/transforms/
` (#12971)
This commit is contained in:
parent
3a066f2bbc
commit
a6e449094a
@ -475,6 +475,5 @@ exclude = [
|
||||
"sphinx/ext/todo.py",
|
||||
"sphinx/ext/viewcode.py",
|
||||
"sphinx/registry.py",
|
||||
"sphinx/transforms/*",
|
||||
"sphinx/writers/*",
|
||||
]
|
||||
|
@ -120,7 +120,9 @@ class DefaultSubstitutions(SphinxTransform):
|
||||
if (name := ref['refname']) in to_handle:
|
||||
ref.replace_self(self._handle_default_substitution(name))
|
||||
|
||||
def _handle_default_substitution(self, name: _DEFAULT_SUBSTITUTION_NAMES) -> nodes.Text:
|
||||
def _handle_default_substitution(
|
||||
self, name: _DEFAULT_SUBSTITUTION_NAMES
|
||||
) -> nodes.Text:
|
||||
if name == 'translation progress':
|
||||
# special handling: calculate translation progress
|
||||
return nodes.Text(_calculate_translation_progress(self.document))
|
||||
@ -128,10 +130,8 @@ class DefaultSubstitutions(SphinxTransform):
|
||||
if text := self.config.today:
|
||||
return nodes.Text(text)
|
||||
# special handling: can also specify a strftime format
|
||||
return nodes.Text(format_date(
|
||||
self.config.today_fmt or _('%b %d, %Y'),
|
||||
language=self.config.language,
|
||||
))
|
||||
today_fmt = self.config.today_fmt or _('%b %d, %Y')
|
||||
return nodes.Text(format_date(today_fmt, language=self.config.language))
|
||||
# config.version and config.release
|
||||
return nodes.Text(getattr(self.config, name))
|
||||
|
||||
@ -185,8 +185,7 @@ class HandleCodeBlocks(SphinxTransform):
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
# move doctest blocks out of blockquotes
|
||||
for node in self.document.findall(nodes.block_quote):
|
||||
if all(isinstance(child, nodes.doctest_block) for child
|
||||
in node.children):
|
||||
if all(isinstance(child, nodes.doctest_block) for child in node.children):
|
||||
node.replace_self(node.children)
|
||||
# combine successive doctest blocks
|
||||
# for node in self.document.findall(nodes.doctest_block):
|
||||
@ -211,9 +210,11 @@ class AutoNumbering(SphinxTransform):
|
||||
domain: StandardDomain = self.env.domains.standard_domain
|
||||
|
||||
for node in self.document.findall(nodes.Element):
|
||||
if (domain.is_enumerable_node(node) and
|
||||
domain.get_numfig_title(node) is not None and
|
||||
node['ids'] == []):
|
||||
if (
|
||||
domain.is_enumerable_node(node)
|
||||
and domain.get_numfig_title(node) is not None
|
||||
and node['ids'] == []
|
||||
):
|
||||
self.document.note_implicit_target(node)
|
||||
|
||||
|
||||
@ -262,8 +263,13 @@ class AutoIndexUpgrader(SphinxTransform):
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
for node in self.document.findall(addnodes.index):
|
||||
if 'entries' in node and any(len(entry) == 4 for entry in node['entries']):
|
||||
msg = __('4 column based index found. '
|
||||
'It might be a bug of extensions you use: %r') % node['entries']
|
||||
msg = (
|
||||
__(
|
||||
'4 column based index found. '
|
||||
'It might be a bug of extensions you use: %r'
|
||||
)
|
||||
% node['entries']
|
||||
)
|
||||
logger.warning(msg, location=node)
|
||||
for i, entry in enumerate(node['entries']):
|
||||
if len(entry) == 4:
|
||||
@ -302,13 +308,13 @@ class UnreferencedFootnotesDetector(SphinxTransform):
|
||||
# note we do not warn on duplicate footnotes here
|
||||
# (i.e. where the name has been moved to dupnames)
|
||||
# since this is already reported by docutils
|
||||
if not node['backrefs'] and node["names"]:
|
||||
if not node['backrefs'] and node['names']:
|
||||
logger.warning(
|
||||
__('Footnote [%s] is not referenced.'),
|
||||
node['names'][0] if node['names'] else node['dupnames'][0],
|
||||
type='ref',
|
||||
subtype='footnote',
|
||||
location=node
|
||||
location=node,
|
||||
)
|
||||
for node in self.document.symbol_footnotes:
|
||||
if not node['backrefs']:
|
||||
@ -316,18 +322,18 @@ class UnreferencedFootnotesDetector(SphinxTransform):
|
||||
__('Footnote [*] is not referenced.'),
|
||||
type='ref',
|
||||
subtype='footnote',
|
||||
location=node
|
||||
location=node,
|
||||
)
|
||||
for node in self.document.autofootnotes:
|
||||
# note we do not warn on duplicate footnotes here
|
||||
# (i.e. where the name has been moved to dupnames)
|
||||
# since this is already reported by docutils
|
||||
if not node['backrefs'] and node["names"]:
|
||||
if not node['backrefs'] and node['names']:
|
||||
logger.warning(
|
||||
__('Footnote [#] is not referenced.'),
|
||||
type='ref',
|
||||
subtype='footnote',
|
||||
location=node
|
||||
location=node,
|
||||
)
|
||||
|
||||
|
||||
@ -403,10 +409,7 @@ class SphinxSmartQuotes(SmartQuotes, SphinxTransform):
|
||||
|
||||
# confirm selected language supports smart_quotes or not
|
||||
language = self.env.settings['language_code']
|
||||
return any(
|
||||
tag in smartchars.quotes
|
||||
for tag in normalize_language_tag(language)
|
||||
)
|
||||
return any(tag in smartchars.quotes for tag in normalize_language_tag(language))
|
||||
|
||||
def get_tokens(self, txtnodes: list[Text]) -> Iterator[tuple[str, str]]:
|
||||
# A generator that yields ``(texttype, nodetext)`` tuples for a list
|
||||
@ -439,13 +442,13 @@ class GlossarySorter(SphinxTransform):
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
for glossary in self.document.findall(addnodes.glossary):
|
||||
if glossary["sorted"]:
|
||||
if glossary['sorted']:
|
||||
definition_list = cast(nodes.definition_list, glossary[0])
|
||||
definition_list[:] = sorted(
|
||||
definition_list,
|
||||
key=lambda item: unicodedata.normalize(
|
||||
'NFD',
|
||||
cast(nodes.term, item)[0].astext().lower()),
|
||||
'NFD', cast(nodes.term, item)[0].astext().lower()
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@ -508,7 +511,7 @@ def _reorder_index_target_nodes(start_node: nodes.target) -> None:
|
||||
first_idx = parent.index(nodes_to_reorder[0])
|
||||
last_idx = parent.index(nodes_to_reorder[-1])
|
||||
if first_idx + len(nodes_to_reorder) - 1 == last_idx:
|
||||
parent[first_idx:last_idx + 1] = sorted(nodes_to_reorder, key=_sort_key)
|
||||
parent[first_idx : last_idx + 1] = sorted(nodes_to_reorder, key=_sort_key)
|
||||
|
||||
|
||||
def _sort_key(node: nodes.Node) -> int:
|
||||
|
@ -30,8 +30,9 @@ class RefOnlyListChecker(nodes.GenericNodeVisitor):
|
||||
pass
|
||||
|
||||
def visit_list_item(self, node: nodes.list_item) -> None:
|
||||
children: list[Node] = [child for child in node.children
|
||||
if not isinstance(child, nodes.Invisible)]
|
||||
children: list[Node] = [
|
||||
child for child in node.children if not isinstance(child, nodes.Invisible)
|
||||
]
|
||||
if len(children) != 1:
|
||||
raise nodes.NodeFound
|
||||
if not isinstance(children[0], nodes.paragraph):
|
||||
|
@ -48,8 +48,14 @@ EXCLUDED_PENDING_XREF_ATTRIBUTES = ('refexplicit',)
|
||||
N = TypeVar('N', bound=nodes.Node)
|
||||
|
||||
|
||||
def publish_msgstr(app: Sphinx, source: str, source_path: str, source_line: int,
|
||||
config: Config, settings: Any) -> nodes.Element:
|
||||
def publish_msgstr(
|
||||
app: Sphinx,
|
||||
source: str,
|
||||
source_path: str,
|
||||
source_line: int,
|
||||
config: Config,
|
||||
settings: Any,
|
||||
) -> nodes.Element:
|
||||
"""Publish msgstr (single line) into docutils document
|
||||
|
||||
:param sphinx.application.Sphinx app: sphinx application
|
||||
@ -67,13 +73,15 @@ def publish_msgstr(app: Sphinx, source: str, source_path: str, source_line: int,
|
||||
config.rst_prolog = None
|
||||
|
||||
from sphinx.io import SphinxI18nReader
|
||||
|
||||
reader = SphinxI18nReader()
|
||||
reader.setup(app)
|
||||
filetype = get_filetype(config.source_suffix, source_path)
|
||||
parser = app.registry.create_source_parser(app, filetype)
|
||||
doc = reader.read(
|
||||
source=StringInput(source=source,
|
||||
source_path=f"{source_path}:{source_line}:<translated>"),
|
||||
source=StringInput(
|
||||
source=source, source_path=f'{source_path}:{source_line}:<translated>'
|
||||
),
|
||||
parser=parser,
|
||||
settings=settings,
|
||||
)
|
||||
@ -85,7 +93,7 @@ def publish_msgstr(app: Sphinx, source: str, source_path: str, source_line: int,
|
||||
|
||||
|
||||
def parse_noqa(source: str) -> tuple[str, bool]:
|
||||
m = match(r"(.*)(?<!\\)#\s*noqa\s*$", source, DOTALL)
|
||||
m = match(r'(.*)(?<!\\)#\s*noqa\s*$', source, DOTALL)
|
||||
if m:
|
||||
return m.group(1), True
|
||||
else:
|
||||
@ -108,29 +116,43 @@ class _NodeUpdater:
|
||||
"""Contains logic for updating one node with the translated content."""
|
||||
|
||||
def __init__(
|
||||
self, node: nodes.Element, patch: nodes.Element, document: nodes.document, noqa: bool,
|
||||
self,
|
||||
node: nodes.Element,
|
||||
patch: nodes.Element,
|
||||
document: nodes.document,
|
||||
noqa: bool,
|
||||
) -> None:
|
||||
self.node: nodes.Element = node
|
||||
self.patch: nodes.Element = patch
|
||||
self.document: nodes.document = document
|
||||
self.noqa: bool = noqa
|
||||
|
||||
def compare_references(self, old_refs: Sequence[nodes.Element],
|
||||
new_refs: Sequence[nodes.Element],
|
||||
warning_msg: str) -> None:
|
||||
def compare_references(
|
||||
self,
|
||||
old_refs: Sequence[nodes.Element],
|
||||
new_refs: Sequence[nodes.Element],
|
||||
warning_msg: str,
|
||||
) -> None:
|
||||
"""Warn about mismatches between references in original and translated content."""
|
||||
# FIXME: could use a smarter strategy than len(old_refs) == len(new_refs)
|
||||
if not self.noqa and len(old_refs) != len(new_refs):
|
||||
old_ref_rawsources = [ref.rawsource for ref in old_refs]
|
||||
new_ref_rawsources = [ref.rawsource for ref in new_refs]
|
||||
logger.warning(warning_msg.format(old_ref_rawsources, new_ref_rawsources),
|
||||
location=self.node, type='i18n', subtype='inconsistent_references')
|
||||
logger.warning(
|
||||
warning_msg.format(old_ref_rawsources, new_ref_rawsources),
|
||||
location=self.node,
|
||||
type='i18n',
|
||||
subtype='inconsistent_references',
|
||||
)
|
||||
|
||||
def update_title_mapping(self) -> bool:
|
||||
processed = False # skip flag
|
||||
|
||||
# update title(section) target name-id mapping
|
||||
if isinstance(self.node, nodes.title) and isinstance(self.node.parent, nodes.section):
|
||||
if (
|
||||
isinstance(self.node, nodes.title)
|
||||
and isinstance(self.node.parent, nodes.section)
|
||||
): # fmt: skip
|
||||
section_node = self.node.parent
|
||||
new_name = nodes.fully_normalize_name(self.patch.astext())
|
||||
old_name = nodes.fully_normalize_name(self.node.astext())
|
||||
@ -201,9 +223,14 @@ class _NodeUpdater:
|
||||
is_autofootnote_ref = NodeMatcher(nodes.footnote_reference, auto=Any)
|
||||
old_foot_refs = list(is_autofootnote_ref.findall(self.node))
|
||||
new_foot_refs = list(is_autofootnote_ref.findall(self.patch))
|
||||
self.compare_references(old_foot_refs, new_foot_refs,
|
||||
__('inconsistent footnote references in translated message.'
|
||||
' original: {0}, translated: {1}'))
|
||||
self.compare_references(
|
||||
old_foot_refs,
|
||||
new_foot_refs,
|
||||
__(
|
||||
'inconsistent footnote references in translated message.'
|
||||
' original: {0}, translated: {1}'
|
||||
),
|
||||
)
|
||||
old_foot_namerefs: dict[str, list[nodes.footnote_reference]] = {}
|
||||
for r in old_foot_refs:
|
||||
old_foot_namerefs.setdefault(r.get('refname'), []).append(r)
|
||||
@ -241,9 +268,14 @@ class _NodeUpdater:
|
||||
is_refnamed_ref = NodeMatcher(nodes.reference, refname=Any)
|
||||
old_refs = list(is_refnamed_ref.findall(self.node))
|
||||
new_refs = list(is_refnamed_ref.findall(self.patch))
|
||||
self.compare_references(old_refs, new_refs,
|
||||
__('inconsistent references in translated message.'
|
||||
' original: {0}, translated: {1}'))
|
||||
self.compare_references(
|
||||
old_refs,
|
||||
new_refs,
|
||||
__(
|
||||
'inconsistent references in translated message.'
|
||||
' original: {0}, translated: {1}'
|
||||
),
|
||||
)
|
||||
old_ref_names = [r['refname'] for r in old_refs]
|
||||
new_ref_names = [r['refname'] for r in new_refs]
|
||||
orphans = [*({*old_ref_names} - {*new_ref_names})]
|
||||
@ -266,31 +298,41 @@ class _NodeUpdater:
|
||||
old_foot_refs = list(is_refnamed_footnote_ref.findall(self.node))
|
||||
new_foot_refs = list(is_refnamed_footnote_ref.findall(self.patch))
|
||||
refname_ids_map: dict[str, list[str]] = {}
|
||||
self.compare_references(old_foot_refs, new_foot_refs,
|
||||
__('inconsistent footnote references in translated message.'
|
||||
' original: {0}, translated: {1}'))
|
||||
self.compare_references(
|
||||
old_foot_refs,
|
||||
new_foot_refs,
|
||||
__(
|
||||
'inconsistent footnote references in translated message.'
|
||||
' original: {0}, translated: {1}'
|
||||
),
|
||||
)
|
||||
for oldf in old_foot_refs:
|
||||
refname_ids_map.setdefault(oldf["refname"], []).append(oldf["ids"])
|
||||
refname_ids_map.setdefault(oldf['refname'], []).append(oldf['ids'])
|
||||
for newf in new_foot_refs:
|
||||
refname = newf["refname"]
|
||||
refname = newf['refname']
|
||||
if refname_ids_map.get(refname):
|
||||
newf["ids"] = refname_ids_map[refname].pop(0)
|
||||
newf['ids'] = refname_ids_map[refname].pop(0)
|
||||
|
||||
def update_citation_references(self) -> None:
|
||||
# citation should use original 'ids'.
|
||||
is_citation_ref = NodeMatcher(nodes.citation_reference, refname=Any)
|
||||
old_cite_refs = list(is_citation_ref.findall(self.node))
|
||||
new_cite_refs = list(is_citation_ref.findall(self.patch))
|
||||
self.compare_references(old_cite_refs, new_cite_refs,
|
||||
__('inconsistent citation references in translated message.'
|
||||
' original: {0}, translated: {1}'))
|
||||
self.compare_references(
|
||||
old_cite_refs,
|
||||
new_cite_refs,
|
||||
__(
|
||||
'inconsistent citation references in translated message.'
|
||||
' original: {0}, translated: {1}'
|
||||
),
|
||||
)
|
||||
refname_ids_map: dict[str, list[str]] = {}
|
||||
for oldc in old_cite_refs:
|
||||
refname_ids_map.setdefault(oldc["refname"], []).append(oldc["ids"])
|
||||
refname_ids_map.setdefault(oldc['refname'], []).append(oldc['ids'])
|
||||
for newc in new_cite_refs:
|
||||
refname = newc["refname"]
|
||||
refname = newc['refname']
|
||||
if refname_ids_map.get(refname):
|
||||
newc["ids"] = refname_ids_map[refname].pop()
|
||||
newc['ids'] = refname_ids_map[refname].pop()
|
||||
|
||||
def update_pending_xrefs(self) -> None:
|
||||
# Original pending_xref['reftarget'] contain not-translated
|
||||
@ -298,20 +340,25 @@ class _NodeUpdater:
|
||||
# This code restricts to change ref-targets in the translation.
|
||||
old_xrefs = [*self.node.findall(addnodes.pending_xref)]
|
||||
new_xrefs = [*self.patch.findall(addnodes.pending_xref)]
|
||||
self.compare_references(old_xrefs, new_xrefs,
|
||||
__('inconsistent term references in translated message.'
|
||||
' original: {0}, translated: {1}'))
|
||||
self.compare_references(
|
||||
old_xrefs,
|
||||
new_xrefs,
|
||||
__(
|
||||
'inconsistent term references in translated message.'
|
||||
' original: {0}, translated: {1}'
|
||||
),
|
||||
)
|
||||
|
||||
xref_reftarget_map: dict[tuple[str, str, str] | None, dict[str, Any]] = {}
|
||||
|
||||
def get_ref_key(node: addnodes.pending_xref) -> tuple[str, str, str] | None:
|
||||
case = node["refdomain"], node["reftype"]
|
||||
case = node['refdomain'], node['reftype']
|
||||
if case == ('std', 'term'):
|
||||
return None
|
||||
else:
|
||||
return (
|
||||
node["refdomain"],
|
||||
node["reftype"],
|
||||
node['refdomain'],
|
||||
node['reftype'],
|
||||
node['reftarget'],
|
||||
)
|
||||
|
||||
@ -347,8 +394,10 @@ class Locale(SphinxTransform):
|
||||
textdomain = docname_to_domain(self.env.docname, self.config.gettext_compact)
|
||||
|
||||
# fetch translations
|
||||
dirs = [path.join(self.env.srcdir, directory)
|
||||
for directory in self.config.locale_dirs]
|
||||
dirs = [
|
||||
path.join(self.env.srcdir, directory)
|
||||
for directory in self.config.locale_dirs
|
||||
]
|
||||
catalog, has_catalog = init_locale(dirs, self.config.language, textdomain)
|
||||
if not has_catalog:
|
||||
return
|
||||
@ -393,8 +442,14 @@ class Locale(SphinxTransform):
|
||||
if isinstance(node, LITERAL_TYPE_NODES):
|
||||
msgstr = '::\n\n' + indent(msgstr, ' ' * 3)
|
||||
|
||||
patch = publish_msgstr(self.app, msgstr, source,
|
||||
node.line, self.config, settings) # type: ignore[arg-type]
|
||||
patch = publish_msgstr(
|
||||
self.app,
|
||||
msgstr,
|
||||
source,
|
||||
node.line, # type: ignore[arg-type]
|
||||
self.config,
|
||||
settings,
|
||||
)
|
||||
# FIXME: no warnings about inconsistent references in this part
|
||||
# XXX doctest and other block markup
|
||||
if not isinstance(patch, nodes.paragraph):
|
||||
@ -408,11 +463,21 @@ class Locale(SphinxTransform):
|
||||
for _id in node['ids']:
|
||||
term, first_classifier = split_term_classifiers(msgstr)
|
||||
patch = publish_msgstr(
|
||||
self.app, term or '', source, node.line, self.config, settings, # type: ignore[arg-type]
|
||||
self.app,
|
||||
term or '',
|
||||
source,
|
||||
node.line, # type: ignore[arg-type]
|
||||
self.config,
|
||||
settings,
|
||||
)
|
||||
updater.patch = make_glossary_term(
|
||||
self.env, patch, first_classifier,
|
||||
source, node.line, _id, self.document, # type: ignore[arg-type]
|
||||
self.env,
|
||||
patch,
|
||||
first_classifier,
|
||||
source,
|
||||
node.line, # type: ignore[arg-type]
|
||||
_id,
|
||||
self.document,
|
||||
)
|
||||
processed = True
|
||||
|
||||
@ -474,8 +539,14 @@ class Locale(SphinxTransform):
|
||||
# This generates: <section ...><title>msgstr</title></section>
|
||||
msgstr = msgstr + '\n' + '=' * len(msgstr) * 2
|
||||
|
||||
patch = publish_msgstr(self.app, msgstr, source,
|
||||
node.line, self.config, settings) # type: ignore[arg-type]
|
||||
patch = publish_msgstr(
|
||||
self.app,
|
||||
msgstr,
|
||||
source,
|
||||
node.line, # type: ignore[arg-type]
|
||||
self.config,
|
||||
settings,
|
||||
)
|
||||
# Structural Subelements phase2
|
||||
if isinstance(node, nodes.title):
|
||||
# get <title> node that placed as a first child
|
||||
@ -483,8 +554,8 @@ class Locale(SphinxTransform):
|
||||
|
||||
# ignore unexpected markups in translation message
|
||||
unexpected: tuple[type[nodes.Element], ...] = (
|
||||
nodes.paragraph, # expected form of translation
|
||||
nodes.title, # generated by above "Subelements phase2"
|
||||
nodes.paragraph, # expected form of translation
|
||||
nodes.title, # generated by above "Subelements phase2"
|
||||
)
|
||||
|
||||
# following types are expected if
|
||||
@ -527,7 +598,13 @@ class Locale(SphinxTransform):
|
||||
msgstr = part
|
||||
msgstr_parts.append(msgstr)
|
||||
|
||||
new_entry = entry_type, ';'.join(msgstr_parts), target_id, main, None
|
||||
new_entry = (
|
||||
entry_type,
|
||||
';'.join(msgstr_parts),
|
||||
target_id,
|
||||
main,
|
||||
None,
|
||||
)
|
||||
new_entries.append(new_entry)
|
||||
|
||||
node['raw_entries'] = entries
|
||||
@ -543,6 +620,7 @@ class TranslationProgressTotaliser(SphinxTransform):
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
from sphinx.builders.gettext import MessageCatalogBuilder
|
||||
|
||||
if isinstance(self.app.builder, MessageCatalogBuilder):
|
||||
return
|
||||
|
||||
@ -567,6 +645,7 @@ class AddTranslationClasses(SphinxTransform):
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
from sphinx.builders.gettext import MessageCatalogBuilder
|
||||
|
||||
if isinstance(self.app.builder, MessageCatalogBuilder):
|
||||
return
|
||||
|
||||
@ -582,8 +661,10 @@ class AddTranslationClasses(SphinxTransform):
|
||||
add_translated = False
|
||||
add_untranslated = True
|
||||
else:
|
||||
msg = ('translation_progress_classes must be '
|
||||
'True, False, "translated" or "untranslated"')
|
||||
msg = (
|
||||
'translation_progress_classes must be '
|
||||
'True, False, "translated" or "untranslated"'
|
||||
)
|
||||
raise ConfigError(msg)
|
||||
|
||||
for node in NodeMatcher(nodes.Element, translated=Any).findall(self.document):
|
||||
@ -604,6 +685,7 @@ class RemoveTranslatableInline(SphinxTransform):
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
from sphinx.builders.gettext import MessageCatalogBuilder
|
||||
|
||||
if isinstance(self.app.builder, MessageCatalogBuilder):
|
||||
return
|
||||
|
||||
|
@ -66,7 +66,7 @@ class ReferencesResolver(SphinxPostTransform):
|
||||
|
||||
def run(self, **kwargs: Any) -> None:
|
||||
for node in self.document.findall(addnodes.pending_xref):
|
||||
content = self.find_pending_xref_condition(node, ("resolved", "*"))
|
||||
content = self.find_pending_xref_condition(node, ('resolved', '*'))
|
||||
if content:
|
||||
contnode = cast(Element, content[0].deepcopy())
|
||||
else:
|
||||
@ -87,16 +87,21 @@ class ReferencesResolver(SphinxPostTransform):
|
||||
domain = self.env.domains[node['refdomain']]
|
||||
except KeyError as exc:
|
||||
raise NoUri(target, typ) from exc
|
||||
newnode = domain.resolve_xref(self.env, refdoc, self.app.builder,
|
||||
typ, target, node, contnode)
|
||||
newnode = domain.resolve_xref(
|
||||
self.env, refdoc, self.app.builder, typ, target, node, contnode
|
||||
)
|
||||
# really hardwired reference types
|
||||
elif typ == 'any':
|
||||
newnode = self.resolve_anyref(refdoc, node, contnode)
|
||||
# no new node found? try the missing-reference event
|
||||
if newnode is None:
|
||||
newnode = self.app.emit_firstresult('missing-reference', self.env,
|
||||
node, contnode,
|
||||
allowed_exceptions=(NoUri,))
|
||||
newnode = self.app.emit_firstresult(
|
||||
'missing-reference',
|
||||
self.env,
|
||||
node,
|
||||
contnode,
|
||||
allowed_exceptions=(NoUri,),
|
||||
)
|
||||
# still not found? warn if node wishes to be warned about or
|
||||
# we are in nitpicky mode
|
||||
if newnode is None:
|
||||
@ -108,69 +113,103 @@ class ReferencesResolver(SphinxPostTransform):
|
||||
newnodes: list[Node] = [newnode]
|
||||
else:
|
||||
newnodes = [contnode]
|
||||
if newnode is None and isinstance(node[0], addnodes.pending_xref_condition):
|
||||
matched = self.find_pending_xref_condition(node, ("*",))
|
||||
if newnode is None and isinstance(
|
||||
node[0], addnodes.pending_xref_condition
|
||||
):
|
||||
matched = self.find_pending_xref_condition(node, ('*',))
|
||||
if matched:
|
||||
newnodes = matched
|
||||
else:
|
||||
logger.warning(__('Could not determine the fallback text for the '
|
||||
'cross-reference. Might be a bug.'), location=node)
|
||||
logger.warning(
|
||||
__(
|
||||
'Could not determine the fallback text for the '
|
||||
'cross-reference. Might be a bug.'
|
||||
),
|
||||
location=node,
|
||||
)
|
||||
|
||||
node.replace_self(newnodes)
|
||||
|
||||
def resolve_anyref(
|
||||
self, refdoc: str, node: pending_xref, contnode: Element,
|
||||
self,
|
||||
refdoc: str,
|
||||
node: pending_xref,
|
||||
contnode: Element,
|
||||
) -> Element | None:
|
||||
"""Resolve reference generated by the "any" role."""
|
||||
stddomain = self.env.domains.standard_domain
|
||||
target = node['reftarget']
|
||||
results: list[tuple[str, Element]] = []
|
||||
# first, try resolving as :doc:
|
||||
doc_ref = stddomain.resolve_xref(self.env, refdoc, self.app.builder,
|
||||
'doc', target, node, contnode)
|
||||
doc_ref = stddomain.resolve_xref(
|
||||
self.env, refdoc, self.app.builder, 'doc', target, node, contnode
|
||||
)
|
||||
if doc_ref:
|
||||
results.append(('doc', doc_ref))
|
||||
# next, do the standard domain (makes this a priority)
|
||||
results.extend(stddomain.resolve_any_xref(self.env, refdoc, self.app.builder,
|
||||
target, node, contnode))
|
||||
results.extend(
|
||||
stddomain.resolve_any_xref(
|
||||
self.env, refdoc, self.app.builder, target, node, contnode
|
||||
)
|
||||
)
|
||||
for domain in self.env.domains.sorted():
|
||||
if domain.name == 'std':
|
||||
continue # we did this one already
|
||||
try:
|
||||
results.extend(domain.resolve_any_xref(self.env, refdoc, self.app.builder,
|
||||
target, node, contnode))
|
||||
results.extend(
|
||||
domain.resolve_any_xref(
|
||||
self.env, refdoc, self.app.builder, target, node, contnode
|
||||
)
|
||||
)
|
||||
except NotImplementedError:
|
||||
# the domain doesn't yet support the new interface
|
||||
# we have to manually collect possible references (SLOW)
|
||||
for role in domain.roles:
|
||||
res = domain.resolve_xref(self.env, refdoc, self.app.builder,
|
||||
role, target, node, contnode)
|
||||
res = domain.resolve_xref(
|
||||
self.env, refdoc, self.app.builder, role, target, node, contnode
|
||||
)
|
||||
if res and len(res) > 0 and isinstance(res[0], nodes.Element):
|
||||
results.append((f'{domain.name}:{role}', res))
|
||||
# now, see how many matches we got...
|
||||
if not results:
|
||||
return None
|
||||
if len(results) > 1:
|
||||
|
||||
def stringify(name: str, node: Element) -> str:
|
||||
reftitle = node.get('reftitle', node.astext())
|
||||
return f':{name}:`{reftitle}`'
|
||||
|
||||
candidates = ' or '.join(starmap(stringify, results))
|
||||
logger.warning(__("more than one target found for 'any' cross-"
|
||||
'reference %r: could be %s'), target, candidates,
|
||||
location=node)
|
||||
logger.warning(
|
||||
__(
|
||||
"more than one target found for 'any' cross-"
|
||||
'reference %r: could be %s'
|
||||
),
|
||||
target,
|
||||
candidates,
|
||||
location=node,
|
||||
)
|
||||
res_role, newnode = results[0]
|
||||
# Override "any" class with the actual role type to get the styling
|
||||
# approximately correct.
|
||||
res_domain = res_role.split(':')[0]
|
||||
if (len(newnode) > 0 and
|
||||
isinstance(newnode[0], nodes.Element) and
|
||||
newnode[0].get('classes')):
|
||||
if (
|
||||
len(newnode) > 0
|
||||
and isinstance(newnode[0], nodes.Element)
|
||||
and newnode[0].get('classes')
|
||||
):
|
||||
newnode[0]['classes'].append(res_domain)
|
||||
newnode[0]['classes'].append(res_role.replace(':', '-'))
|
||||
return newnode
|
||||
|
||||
def warn_missing_reference(self, refdoc: str, typ: str, target: str,
|
||||
node: pending_xref, domain: Domain | None) -> None:
|
||||
def warn_missing_reference(
|
||||
self,
|
||||
refdoc: str,
|
||||
typ: str,
|
||||
target: str,
|
||||
node: pending_xref,
|
||||
domain: Domain | None,
|
||||
) -> None:
|
||||
warn = node.get('refwarn')
|
||||
if self.config.nitpicky:
|
||||
warn = True
|
||||
@ -179,24 +218,26 @@ class ReferencesResolver(SphinxPostTransform):
|
||||
if (dtype, target) in self.config.nitpick_ignore:
|
||||
warn = False
|
||||
# for "std" types also try without domain name
|
||||
if (not domain or domain.name == 'std') and \
|
||||
(typ, target) in self.config.nitpick_ignore:
|
||||
if (
|
||||
(not domain or domain.name == 'std')
|
||||
and (typ, target) in self.config.nitpick_ignore
|
||||
): # fmt: skip
|
||||
warn = False
|
||||
if self.config.nitpick_ignore_regex:
|
||||
|
||||
def matches_ignore(entry_type: str, entry_target: str) -> bool:
|
||||
return any(
|
||||
(
|
||||
re.fullmatch(ignore_type, entry_type)
|
||||
and re.fullmatch(ignore_target, entry_target)
|
||||
)
|
||||
for ignore_type, ignore_target
|
||||
in self.config.nitpick_ignore_regex
|
||||
for ignore_type, ignore_target in self.config.nitpick_ignore_regex
|
||||
)
|
||||
|
||||
if matches_ignore(dtype, target):
|
||||
warn = False
|
||||
# for "std" types also try without domain name
|
||||
if (not domain or domain.name == 'std') and \
|
||||
matches_ignore(typ, target):
|
||||
if (not domain or domain.name == 'std') and matches_ignore(typ, target):
|
||||
warn = False
|
||||
if not warn:
|
||||
return
|
||||
@ -206,14 +247,20 @@ class ReferencesResolver(SphinxPostTransform):
|
||||
elif domain and typ in domain.dangling_warnings:
|
||||
msg = domain.dangling_warnings[typ] % {'target': target}
|
||||
elif node.get('refdomain', 'std') not in ('', 'std'):
|
||||
msg = (__('%s:%s reference target not found: %s') %
|
||||
(node['refdomain'], typ, target))
|
||||
msg = __('%s:%s reference target not found: %s') % (
|
||||
node['refdomain'],
|
||||
typ,
|
||||
target,
|
||||
)
|
||||
else:
|
||||
msg = __('%r reference target not found: %s') % (typ, target)
|
||||
logger.warning(msg, location=node, type='ref', subtype=typ)
|
||||
|
||||
def find_pending_xref_condition(self, node: pending_xref, conditions: Sequence[str],
|
||||
) -> list[Node] | None:
|
||||
def find_pending_xref_condition(
|
||||
self,
|
||||
node: pending_xref,
|
||||
conditions: Sequence[str],
|
||||
) -> list[Node] | None:
|
||||
for condition in conditions:
|
||||
matched = find_pending_xref_condition(node, condition)
|
||||
if matched:
|
||||
@ -238,8 +285,10 @@ class SigElementFallbackTransform(SphinxPostTransform):
|
||||
default_priority = 200
|
||||
|
||||
def run(self, **kwargs: Any) -> None:
|
||||
def has_visitor(translator: type[nodes.NodeVisitor], node: type[Element]) -> bool:
|
||||
return hasattr(translator, "visit_%s" % node.__name__)
|
||||
def has_visitor(
|
||||
translator: type[nodes.NodeVisitor], node: type[Element]
|
||||
) -> bool:
|
||||
return hasattr(translator, 'visit_%s' % node.__name__)
|
||||
|
||||
try:
|
||||
translator = self.app.builder.get_translator_class()
|
||||
@ -253,8 +302,10 @@ class SigElementFallbackTransform(SphinxPostTransform):
|
||||
|
||||
# for the leaf elements (desc_sig_element), the translator should support _all_,
|
||||
# unless there exists a generic visit_desc_sig_element default visitor
|
||||
if (not all(has_visitor(translator, node) for node in addnodes.SIG_ELEMENTS)
|
||||
and not has_visitor(translator, addnodes.desc_sig_element)):
|
||||
if (
|
||||
not all(has_visitor(translator, node) for node in addnodes.SIG_ELEMENTS)
|
||||
and not has_visitor(translator, addnodes.desc_sig_element)
|
||||
): # fmt: skip
|
||||
self.fallback(addnodes.desc_sig_element)
|
||||
|
||||
if not has_visitor(translator, addnodes.desc_inline):
|
||||
|
@ -37,8 +37,9 @@ class HighlightLanguageTransform(SphinxTransform):
|
||||
default_priority = 400
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
visitor = HighlightLanguageVisitor(self.document,
|
||||
self.config.highlight_language)
|
||||
visitor = HighlightLanguageVisitor(
|
||||
self.document, self.config.highlight_language
|
||||
)
|
||||
self.document.walkabout(visitor)
|
||||
|
||||
for node in list(self.document.findall(addnodes.highlightlang)):
|
||||
@ -70,9 +71,9 @@ class HighlightLanguageVisitor(nodes.NodeVisitor):
|
||||
self.settings.pop()
|
||||
|
||||
def visit_highlightlang(self, node: addnodes.highlightlang) -> None:
|
||||
self.settings[-1] = HighlightSetting(node['lang'],
|
||||
node['force'],
|
||||
node['linenothreshold'])
|
||||
self.settings[-1] = HighlightSetting(
|
||||
node['lang'], node['force'], node['linenothreshold']
|
||||
)
|
||||
|
||||
def visit_literal_block(self, node: nodes.literal_block) -> None:
|
||||
setting = self.settings[-1]
|
||||
@ -81,7 +82,7 @@ class HighlightLanguageVisitor(nodes.NodeVisitor):
|
||||
node['force'] = setting.force
|
||||
if 'linenos' not in node:
|
||||
lines = node.astext().count('\n')
|
||||
node['linenos'] = (lines >= setting.lineno_threshold - 1)
|
||||
node['linenos'] = lines >= setting.lineno_threshold - 1
|
||||
|
||||
|
||||
class TrimDoctestFlagsTransform(SphinxTransform):
|
||||
|
@ -63,8 +63,10 @@ class ImageDownloader(BaseImageConverter):
|
||||
basename = basename.split('?')[0]
|
||||
if basename == '' or len(basename) > MAX_FILENAME_LEN:
|
||||
filename, ext = os.path.splitext(node['uri'])
|
||||
basename = sha1(filename.encode(), usedforsecurity=False).hexdigest() + ext
|
||||
basename = CRITICAL_PATH_CHAR_RE.sub("_", basename)
|
||||
basename = (
|
||||
sha1(filename.encode(), usedforsecurity=False).hexdigest() + ext
|
||||
)
|
||||
basename = CRITICAL_PATH_CHAR_RE.sub('_', basename)
|
||||
|
||||
uri_hash = sha1(node['uri'].encode(), usedforsecurity=False).hexdigest()
|
||||
path = Path(self.imagedir, uri_hash, basename)
|
||||
@ -83,7 +85,8 @@ class ImageDownloader(BaseImageConverter):
|
||||
|
||||
config = self.app.config
|
||||
r = requests.get(
|
||||
node['uri'], headers=headers,
|
||||
node['uri'],
|
||||
headers=headers,
|
||||
_user_agent=config.user_agent,
|
||||
_tls_info=(config.tls_verify, config.tls_cacerts),
|
||||
)
|
||||
@ -134,8 +137,9 @@ class DataURIExtractor(BaseImageConverter):
|
||||
assert image is not None
|
||||
ext = get_image_extension(image.mimetype)
|
||||
if ext is None:
|
||||
logger.warning(__('Unknown image format: %s...'), node['uri'][:32],
|
||||
location=node)
|
||||
logger.warning(
|
||||
__('Unknown image format: %s...'), node['uri'][:32], location=node
|
||||
)
|
||||
return
|
||||
|
||||
ensuredir(os.path.join(self.imagedir, 'embeded'))
|
||||
@ -155,7 +159,7 @@ class DataURIExtractor(BaseImageConverter):
|
||||
|
||||
def get_filename_for(filename: str, mimetype: str) -> str:
|
||||
basename = os.path.basename(filename)
|
||||
basename = CRITICAL_PATH_CHAR_RE.sub("_", basename)
|
||||
basename = CRITICAL_PATH_CHAR_RE.sub('_', basename)
|
||||
return os.path.splitext(basename)[0] + (get_image_extension(mimetype) or '')
|
||||
|
||||
|
||||
@ -206,7 +210,9 @@ class ImageConverter(BaseImageConverter):
|
||||
return False
|
||||
if '?' in node['candidates']:
|
||||
return False
|
||||
if set(self.guess_mimetypes(node)) & set(self.app.builder.supported_image_types):
|
||||
node_mime_types = set(self.guess_mimetypes(node))
|
||||
supported_image_types = set(self.app.builder.supported_image_types)
|
||||
if node_mime_types & supported_image_types:
|
||||
# builder supports the image; no need to convert
|
||||
return False
|
||||
if self.available is None:
|
||||
|
Loading…
Reference in New Issue
Block a user