mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Migrate to Node.findall() from Node.traverse()
Node.traverse() was marked as deprecated since docutils-0.18. Instead of it, Node.findall() has been added as successor of traverse(). This applies a patch to docutils-0.17 or older to be available Node.findall() and use it.
This commit is contained in:
parent
daf57f2488
commit
05a898ecb4
@ -176,7 +176,7 @@ class Builder:
|
||||
def post_process_images(self, doctree: Node) -> None:
|
||||
"""Pick the best candidate for all image URIs."""
|
||||
images = ImageAdapter(self.env)
|
||||
for node in doctree.traverse(nodes.image):
|
||||
for node in doctree.findall(nodes.image):
|
||||
if '?' in node['candidates']:
|
||||
# don't rewrite nonlocal image URIs
|
||||
continue
|
||||
|
@ -277,7 +277,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
|
||||
new_ids.append(new_id)
|
||||
node['ids'] = new_ids
|
||||
|
||||
for reference in tree.traverse(nodes.reference):
|
||||
for reference in tree.findall(nodes.reference):
|
||||
if 'refuri' in reference:
|
||||
m = self.refuri_re.match(reference['refuri'])
|
||||
if m:
|
||||
@ -285,14 +285,14 @@ class EpubBuilder(StandaloneHTMLBuilder):
|
||||
if 'refid' in reference:
|
||||
reference['refid'] = self.fix_fragment('', reference['refid'])
|
||||
|
||||
for target in tree.traverse(nodes.target):
|
||||
for target in tree.findall(nodes.target):
|
||||
update_node_id(target)
|
||||
|
||||
next_node: Node = target.next_node(ascend=True)
|
||||
if isinstance(next_node, nodes.Element):
|
||||
update_node_id(next_node)
|
||||
|
||||
for desc_signature in tree.traverse(addnodes.desc_signature):
|
||||
for desc_signature in tree.findall(addnodes.desc_signature):
|
||||
update_node_id(desc_signature)
|
||||
|
||||
def add_visible_links(self, tree: nodes.document, show_urls: str = 'inline') -> None:
|
||||
@ -323,14 +323,14 @@ class EpubBuilder(StandaloneHTMLBuilder):
|
||||
# a) place them after the last existing footnote
|
||||
# b) place them after an (empty) Footnotes rubric
|
||||
# c) create an empty Footnotes rubric at the end of the document
|
||||
fns = list(tree.traverse(nodes.footnote))
|
||||
fns = list(tree.findall(nodes.footnote))
|
||||
if fns:
|
||||
fn = fns[-1]
|
||||
return fn.parent, fn.parent.index(fn) + 1
|
||||
for node in tree.traverse(nodes.rubric):
|
||||
for node in tree.findall(nodes.rubric):
|
||||
if len(node) == 1 and node.astext() == FOOTNOTES_RUBRIC_NAME:
|
||||
return node.parent, node.parent.index(node) + 1
|
||||
doc = list(tree.traverse(nodes.document))[0]
|
||||
doc = next(tree.findall(nodes.document))
|
||||
rub = nodes.rubric()
|
||||
rub.append(nodes.Text(FOOTNOTES_RUBRIC_NAME))
|
||||
doc.append(rub)
|
||||
@ -339,10 +339,10 @@ class EpubBuilder(StandaloneHTMLBuilder):
|
||||
if show_urls == 'no':
|
||||
return
|
||||
if show_urls == 'footnote':
|
||||
doc = list(tree.traverse(nodes.document))[0]
|
||||
doc = next(tree.findall(nodes.document))
|
||||
fn_spot, fn_idx = footnote_spot(tree)
|
||||
nr = 1
|
||||
for node in list(tree.traverse(nodes.reference)):
|
||||
for node in list(tree.findall(nodes.reference)):
|
||||
uri = node.get('refuri', '')
|
||||
if (uri.startswith('http:') or uri.startswith('https:') or
|
||||
uri.startswith('ftp:')) and uri not in node.astext():
|
||||
|
@ -146,7 +146,7 @@ class I18nBuilder(Builder):
|
||||
def write_doc(self, docname: str, doctree: nodes.document) -> None:
|
||||
catalog = self.catalogs[docname_to_domain(docname, self.config.gettext_compact)]
|
||||
|
||||
for toctree in self.env.tocs[docname].traverse(addnodes.toctree):
|
||||
for toctree in self.env.tocs[docname].findall(addnodes.toctree):
|
||||
for node, msg in extract_messages(toctree):
|
||||
node.uid = '' # type: ignore # Hack UUID model
|
||||
catalog.add(msg, node)
|
||||
|
@ -866,7 +866,7 @@ class StandaloneHTMLBuilder(Builder):
|
||||
Builder.post_process_images(self, doctree)
|
||||
|
||||
if self.config.html_scaled_image_link and self.html_scaled_image_link:
|
||||
for node in doctree.traverse(nodes.image):
|
||||
for node in doctree.findall(nodes.image):
|
||||
if not any((key in node) for key in ['scale', 'width', 'height']):
|
||||
# resizing options are not given. scaled image link is available
|
||||
# only for resized images.
|
||||
|
@ -48,7 +48,7 @@ class KeyboardTransform(SphinxPostTransform):
|
||||
|
||||
def run(self, **kwargs: Any) -> None:
|
||||
matcher = NodeMatcher(nodes.literal, classes=["kbd"])
|
||||
for node in self.document.traverse(matcher): # type: nodes.literal
|
||||
for node in self.document.findall(matcher): # type: nodes.literal
|
||||
parts = self.pattern.split(node[-1].astext())
|
||||
if len(parts) == 1 or self.is_multiwords_key(parts):
|
||||
continue
|
||||
|
@ -280,7 +280,7 @@ class LaTeXBuilder(Builder):
|
||||
encoding='utf-8', overwrite_if_changed=True)
|
||||
with progress_message(__("processing %s") % targetname):
|
||||
doctree = self.env.get_doctree(docname)
|
||||
toctree = next(iter(doctree.traverse(addnodes.toctree)), None)
|
||||
toctree = next(doctree.findall(addnodes.toctree), None)
|
||||
if toctree and toctree.get('maxdepth') > 0:
|
||||
tocdepth = toctree.get('maxdepth')
|
||||
else:
|
||||
@ -310,7 +310,7 @@ class LaTeXBuilder(Builder):
|
||||
def get_contentsname(self, indexfile: str) -> str:
|
||||
tree = self.env.get_doctree(indexfile)
|
||||
contentsname = None
|
||||
for toctree in tree.traverse(addnodes.toctree):
|
||||
for toctree in tree.findall(addnodes.toctree):
|
||||
if 'caption' in toctree:
|
||||
contentsname = toctree['caption']
|
||||
break
|
||||
@ -338,7 +338,7 @@ class LaTeXBuilder(Builder):
|
||||
new_sect += nodes.title('<Set title in conf.py>',
|
||||
'<Set title in conf.py>')
|
||||
new_tree += new_sect
|
||||
for node in tree.traverse(addnodes.toctree):
|
||||
for node in tree.findall(addnodes.toctree):
|
||||
new_sect += node
|
||||
tree = new_tree
|
||||
largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
|
||||
@ -353,7 +353,7 @@ class LaTeXBuilder(Builder):
|
||||
self.env.resolve_references(largetree, indexfile, self)
|
||||
# resolve :ref:s to distant tex files -- we can't add a cross-reference,
|
||||
# but append the document name
|
||||
for pendingnode in largetree.traverse(addnodes.pending_xref):
|
||||
for pendingnode in largetree.findall(addnodes.pending_xref):
|
||||
docname = pendingnode['refdocname']
|
||||
sectname = pendingnode['refsectname']
|
||||
newnodes: List[Node] = [nodes.emphasis(sectname, sectname)]
|
||||
|
@ -33,7 +33,7 @@ class FootnoteDocnameUpdater(SphinxTransform):
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
matcher = NodeMatcher(*self.TARGET_NODES)
|
||||
for node in self.document.traverse(matcher): # type: Element
|
||||
for node in self.document.findall(matcher): # type: Element
|
||||
node['docname'] = self.env.docname
|
||||
|
||||
|
||||
@ -45,7 +45,7 @@ class SubstitutionDefinitionsRemover(SphinxPostTransform):
|
||||
formats = ('latex',)
|
||||
|
||||
def run(self, **kwargs: Any) -> None:
|
||||
for node in list(self.document.traverse(nodes.substitution_definition)):
|
||||
for node in list(self.document.findall(nodes.substitution_definition)):
|
||||
node.parent.remove(node)
|
||||
|
||||
|
||||
@ -81,7 +81,7 @@ class ShowUrlsTransform(SphinxPostTransform):
|
||||
if show_urls is False or show_urls == 'no':
|
||||
return
|
||||
|
||||
for node in list(self.document.traverse(nodes.reference)):
|
||||
for node in list(self.document.findall(nodes.reference)):
|
||||
uri = node.get('refuri', '')
|
||||
if uri.startswith(URI_SCHEMES):
|
||||
if uri.startswith('mailto:'):
|
||||
@ -348,7 +348,7 @@ class LaTeXFootnoteTransform(SphinxPostTransform):
|
||||
formats = ('latex',)
|
||||
|
||||
def run(self, **kwargs: Any) -> None:
|
||||
footnotes = list(self.document.traverse(nodes.footnote))
|
||||
footnotes = list(self.document.findall(nodes.footnote))
|
||||
for node in footnotes:
|
||||
node.parent.remove(node)
|
||||
|
||||
@ -423,7 +423,7 @@ class LaTeXFootnoteVisitor(nodes.NodeVisitor):
|
||||
self.unrestrict(node)
|
||||
|
||||
def depart_table(self, node: nodes.table) -> None:
|
||||
tbody = list(node.traverse(nodes.tbody))[0]
|
||||
tbody = next(node.findall(nodes.tbody))
|
||||
for footnote in reversed(self.table_footnotes):
|
||||
fntext = footnotetext('', *footnote.children, ids=footnote['ids'])
|
||||
tbody.insert(0, fntext)
|
||||
@ -501,7 +501,7 @@ class BibliographyTransform(SphinxPostTransform):
|
||||
|
||||
def run(self, **kwargs: Any) -> None:
|
||||
citations = thebibliography()
|
||||
for node in list(self.document.traverse(nodes.citation)):
|
||||
for node in list(self.document.findall(nodes.citation)):
|
||||
node.parent.remove(node)
|
||||
citations += node
|
||||
|
||||
@ -521,7 +521,7 @@ class CitationReferenceTransform(SphinxPostTransform):
|
||||
def run(self, **kwargs: Any) -> None:
|
||||
domain = cast(CitationDomain, self.env.get_domain('citation'))
|
||||
matcher = NodeMatcher(addnodes.pending_xref, refdomain='citation', reftype='ref')
|
||||
for node in self.document.traverse(matcher): # type: addnodes.pending_xref
|
||||
for node in self.document.findall(matcher): # type: addnodes.pending_xref
|
||||
docname, labelid, _ = domain.citations.get(node['reftarget'], ('', '', 0))
|
||||
if docname:
|
||||
citation_ref = nodes.citation_reference('', '', *node.children,
|
||||
@ -540,7 +540,7 @@ class MathReferenceTransform(SphinxPostTransform):
|
||||
|
||||
def run(self, **kwargs: Any) -> None:
|
||||
equations = self.env.get_domain('math').data['objects']
|
||||
for node in self.document.traverse(addnodes.pending_xref):
|
||||
for node in self.document.findall(addnodes.pending_xref):
|
||||
if node['refdomain'] == 'math' and node['reftype'] in ('eq', 'numref'):
|
||||
docname, _ = equations.get(node['reftarget'], (None, None))
|
||||
if docname:
|
||||
@ -555,7 +555,7 @@ class LiteralBlockTransform(SphinxPostTransform):
|
||||
|
||||
def run(self, **kwargs: Any) -> None:
|
||||
matcher = NodeMatcher(nodes.container, literal_block=True)
|
||||
for node in self.document.traverse(matcher): # type: nodes.container
|
||||
for node in self.document.findall(matcher): # type: nodes.container
|
||||
newnode = captioned_literal_block('', *node.children, **node.attributes)
|
||||
node.replace_self(newnode)
|
||||
|
||||
@ -566,7 +566,7 @@ class DocumentTargetTransform(SphinxPostTransform):
|
||||
formats = ('latex',)
|
||||
|
||||
def run(self, **kwargs: Any) -> None:
|
||||
for node in self.document.traverse(addnodes.start_of_file):
|
||||
for node in self.document.findall(addnodes.start_of_file):
|
||||
section = node.next_node(nodes.section)
|
||||
if section:
|
||||
section['ids'].append(':doc') # special label for :doc:
|
||||
@ -602,9 +602,9 @@ class IndexInSectionTitleTransform(SphinxPostTransform):
|
||||
formats = ('latex',)
|
||||
|
||||
def run(self, **kwargs: Any) -> None:
|
||||
for node in list(self.document.traverse(nodes.title)):
|
||||
for node in list(self.document.findall(nodes.title)):
|
||||
if isinstance(node.parent, nodes.section):
|
||||
for i, index in enumerate(list(node.traverse(addnodes.index))):
|
||||
for i, index in enumerate(node.findall(addnodes.index)):
|
||||
# move the index node next to the section title
|
||||
node.remove(index)
|
||||
node.parent.insert(i + 1, index)
|
||||
|
@ -650,7 +650,7 @@ class HyperlinkCollector(SphinxPostTransform):
|
||||
hyperlinks = builder.hyperlinks
|
||||
|
||||
# reference nodes
|
||||
for refnode in self.document.traverse(nodes.reference):
|
||||
for refnode in self.document.findall(nodes.reference):
|
||||
if 'refuri' not in refnode:
|
||||
continue
|
||||
uri = refnode['refuri']
|
||||
@ -664,7 +664,7 @@ class HyperlinkCollector(SphinxPostTransform):
|
||||
hyperlinks[uri] = uri_info
|
||||
|
||||
# image nodes
|
||||
for imgnode in self.document.traverse(nodes.image):
|
||||
for imgnode in self.document.findall(nodes.image):
|
||||
uri = imgnode['candidates'].get('?')
|
||||
if uri and '://' in uri:
|
||||
newuri = self.app.emit_firstresult('linkcheck-process-uri', uri)
|
||||
|
@ -98,7 +98,7 @@ class ManualPageBuilder(Builder):
|
||||
logger.info('} ', nonl=True)
|
||||
self.env.resolve_references(largetree, docname, self)
|
||||
# remove pending_xref nodes
|
||||
for pendingnode in largetree.traverse(addnodes.pending_xref):
|
||||
for pendingnode in largetree.findall(addnodes.pending_xref):
|
||||
pendingnode.replace_self(pendingnode.children)
|
||||
|
||||
docwriter.write(largetree, destination)
|
||||
|
@ -54,7 +54,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
|
||||
def fix_refuris(self, tree: Node) -> None:
|
||||
# fix refuris with double anchor
|
||||
fname = self.config.root_doc + self.out_suffix
|
||||
for refnode in tree.traverse(nodes.reference):
|
||||
for refnode in tree.findall(nodes.reference):
|
||||
if 'refuri' not in refnode:
|
||||
continue
|
||||
refuri = refnode['refuri']
|
||||
|
@ -138,7 +138,7 @@ class TexinfoBuilder(Builder):
|
||||
new_sect += nodes.title('<Set title in conf.py>',
|
||||
'<Set title in conf.py>')
|
||||
new_tree += new_sect
|
||||
for node in tree.traverse(addnodes.toctree):
|
||||
for node in tree.findall(addnodes.toctree):
|
||||
new_sect += node
|
||||
tree = new_tree
|
||||
largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
|
||||
@ -152,7 +152,7 @@ class TexinfoBuilder(Builder):
|
||||
logger.info(__("resolving references..."))
|
||||
self.env.resolve_references(largetree, indexfile, self)
|
||||
# TODO: add support for external :ref:s
|
||||
for pendingnode in largetree.traverse(addnodes.pending_xref):
|
||||
for pendingnode in largetree.findall(addnodes.pending_xref):
|
||||
docname = pendingnode['refdocname']
|
||||
sectname = pendingnode['refsectname']
|
||||
newnodes: List[Node] = [nodes.emphasis(sectname, sectname)]
|
||||
|
@ -71,7 +71,7 @@ class XMLBuilder(Builder):
|
||||
# work around multiple string % tuple issues in docutils;
|
||||
# replace tuples in attribute values with lists
|
||||
doctree = doctree.deepcopy()
|
||||
for node in doctree.traverse(nodes.Element):
|
||||
for node in doctree.findall(nodes.Element):
|
||||
for att, value in node.attributes.items():
|
||||
if isinstance(value, tuple):
|
||||
node.attributes[att] = list(value)
|
||||
|
@ -83,7 +83,7 @@ class Meta(MetaBase, SphinxDirective):
|
||||
# docutils' meta nodes aren't picklable because the class is nested
|
||||
meta.__class__ = addnodes.meta
|
||||
|
||||
return result
|
||||
return result # type: ignore
|
||||
|
||||
|
||||
class RSTTable(tables.RSTTable):
|
||||
|
@ -3556,7 +3556,7 @@ class AliasTransform(SphinxTransform):
|
||||
return nodes
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
for node in self.document.traverse(AliasNode):
|
||||
for node in self.document.findall(AliasNode):
|
||||
node = cast(AliasNode, node)
|
||||
sig = node.sig
|
||||
parentKey = node.parentKey
|
||||
|
@ -112,7 +112,7 @@ class CitationDefinitionTransform(SphinxTransform):
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
domain = cast(CitationDomain, self.env.get_domain('citation'))
|
||||
for node in self.document.traverse(nodes.citation):
|
||||
for node in self.document.findall(nodes.citation):
|
||||
# register citation node to domain
|
||||
node['docname'] = self.env.docname
|
||||
domain.note_citation(node)
|
||||
@ -131,7 +131,7 @@ class CitationReferenceTransform(SphinxTransform):
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
domain = cast(CitationDomain, self.env.get_domain('citation'))
|
||||
for node in self.document.traverse(nodes.citation_reference):
|
||||
for node in self.document.findall(nodes.citation_reference):
|
||||
target = node.astext()
|
||||
ref = pending_xref(target, refdomain='citation', reftype='ref',
|
||||
reftarget=target, refwarn=True,
|
||||
|
@ -7459,7 +7459,7 @@ class AliasTransform(SphinxTransform):
|
||||
return nodes
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
for node in self.document.traverse(AliasNode):
|
||||
for node in self.document.findall(AliasNode):
|
||||
node = cast(AliasNode, node)
|
||||
sig = node.sig
|
||||
parentKey = node.parentKey
|
||||
|
@ -48,7 +48,7 @@ class IndexDomain(Domain):
|
||||
def process_doc(self, env: BuildEnvironment, docname: str, document: Node) -> None:
|
||||
"""Process a document after it is read by the environment."""
|
||||
entries = self.entries.setdefault(env.docname, [])
|
||||
for node in list(document.traverse(addnodes.index)):
|
||||
for node in list(document.findall(addnodes.index)):
|
||||
try:
|
||||
for entry in node['entries']:
|
||||
split_index_msg(entry[0], entry[1])
|
||||
|
@ -78,7 +78,7 @@ class MathDomain(Domain):
|
||||
def math_node(node: Node) -> bool:
|
||||
return isinstance(node, (nodes.math, nodes.math_block))
|
||||
|
||||
self.data['has_equations'][docname] = any(document.traverse(math_node))
|
||||
self.data['has_equations'][docname] = any(document.findall(math_node))
|
||||
|
||||
def clear_doc(self, docname: str) -> None:
|
||||
for equation_id, (doc, eqno) in list(self.equations.items()):
|
||||
|
@ -776,7 +776,7 @@ class StandardDomain(Domain):
|
||||
elif self.is_enumerable_node(node):
|
||||
sectname = self.get_numfig_title(node)
|
||||
else:
|
||||
toctree = next(iter(node.traverse(addnodes.toctree)), None)
|
||||
toctree = next(node.findall(addnodes.toctree), None)
|
||||
if toctree and toctree.get('caption'):
|
||||
sectname = toctree.get('caption')
|
||||
else:
|
||||
|
@ -535,7 +535,7 @@ class BuildEnvironment:
|
||||
self.apply_post_transforms(doctree, docname)
|
||||
|
||||
# now, resolve all toctree nodes
|
||||
for toctreenode in doctree.traverse(addnodes.toctree):
|
||||
for toctreenode in doctree.findall(addnodes.toctree):
|
||||
result = TocTree(self).resolve(docname, builder, toctreenode,
|
||||
prune=prune_toctrees,
|
||||
includehidden=includehidden)
|
||||
|
@ -161,7 +161,7 @@ class TocTree:
|
||||
process_only_nodes(toc, builder.tags)
|
||||
if title and toc.children and len(toc.children) == 1:
|
||||
child = toc.children[0]
|
||||
for refnode in child.traverse(nodes.reference):
|
||||
for refnode in child.findall(nodes.reference):
|
||||
if refnode['refuri'] == ref and \
|
||||
not refnode['anchorname']:
|
||||
refnode.children = [nodes.Text(title)]
|
||||
@ -193,13 +193,13 @@ class TocTree:
|
||||
for toplevel in children:
|
||||
# nodes with length 1 don't have any children anyway
|
||||
if len(toplevel) > 1:
|
||||
subtrees = list(toplevel.traverse(addnodes.toctree))
|
||||
subtrees = list(toplevel.findall(addnodes.toctree))
|
||||
if subtrees:
|
||||
toplevel[1][:] = subtrees # type: ignore
|
||||
else:
|
||||
toplevel.pop(1)
|
||||
# resolve all sub-toctrees
|
||||
for subtocnode in list(toc.traverse(addnodes.toctree)):
|
||||
for subtocnode in list(toc.findall(addnodes.toctree)):
|
||||
if not (subtocnode.get('hidden', False) and
|
||||
not includehidden):
|
||||
i = subtocnode.parent.index(subtocnode) + 1
|
||||
@ -257,7 +257,7 @@ class TocTree:
|
||||
|
||||
# set the target paths in the toctrees (they are not known at TOC
|
||||
# generation time)
|
||||
for refnode in newnode.traverse(nodes.reference):
|
||||
for refnode in newnode.findall(nodes.reference):
|
||||
if not url_re.match(refnode['refuri']):
|
||||
refnode['refuri'] = builder.get_relative_uri(
|
||||
docname, refnode['refuri']) + refnode['anchorname']
|
||||
@ -308,7 +308,7 @@ class TocTree:
|
||||
# renders to nothing
|
||||
return nodes.paragraph()
|
||||
process_only_nodes(toc, builder.tags)
|
||||
for node in toc.traverse(nodes.reference):
|
||||
for node in toc.findall(nodes.reference):
|
||||
node['refuri'] = node['anchorname'] or '#'
|
||||
return toc
|
||||
|
||||
@ -324,7 +324,7 @@ class TocTree:
|
||||
else:
|
||||
kwargs['maxdepth'] = int(kwargs['maxdepth'])
|
||||
kwargs['collapse'] = collapse
|
||||
for toctreenode in doctree.traverse(addnodes.toctree):
|
||||
for toctreenode in doctree.findall(addnodes.toctree):
|
||||
toctree = self.resolve(docname, builder, toctreenode, prune=True, **kwargs)
|
||||
if toctree:
|
||||
toctrees.append(toctree)
|
||||
|
@ -43,7 +43,7 @@ class ImageCollector(EnvironmentCollector):
|
||||
"""Process and rewrite image URIs."""
|
||||
docname = app.env.docname
|
||||
|
||||
for node in doctree.traverse(nodes.image):
|
||||
for node in doctree.findall(nodes.image):
|
||||
# Map the mimetype to the corresponding image. The writer may
|
||||
# choose the best image from these candidates. The special key * is
|
||||
# set if there is only single candidate to be used by a writer.
|
||||
@ -124,7 +124,7 @@ class DownloadFileCollector(EnvironmentCollector):
|
||||
|
||||
def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:
|
||||
"""Process downloadable file paths. """
|
||||
for node in doctree.traverse(addnodes.download_reference):
|
||||
for node in doctree.findall(addnodes.download_reference):
|
||||
targetname = node['reftarget']
|
||||
if '://' in targetname:
|
||||
node['refuri'] = targetname
|
||||
|
@ -43,7 +43,7 @@ class TitleCollector(EnvironmentCollector):
|
||||
longtitlenode = nodes.title()
|
||||
longtitlenode += nodes.Text(doctree['title'])
|
||||
# look for first section title and use that as the title
|
||||
for node in doctree.traverse(nodes.section):
|
||||
for node in doctree.findall(nodes.section):
|
||||
visitor = SphinxContentsFilter(doctree)
|
||||
node[0].walkabout(visitor)
|
||||
titlenode += visitor.get_entry_text()
|
||||
|
@ -196,7 +196,7 @@ class TocTreeCollector(EnvironmentCollector):
|
||||
for docname in env.numbered_toctrees:
|
||||
assigned.add(docname)
|
||||
doctree = env.get_doctree(docname)
|
||||
for toctreenode in doctree.traverse(addnodes.toctree):
|
||||
for toctreenode in doctree.findall(addnodes.toctree):
|
||||
depth = toctreenode.get('numbered', 0)
|
||||
if depth:
|
||||
# every numbered toctree gets new numbering
|
||||
|
@ -33,7 +33,7 @@ def get_node_depth(node: Node) -> int:
|
||||
|
||||
def register_sections_as_label(app: Sphinx, document: Node) -> None:
|
||||
domain = cast(StandardDomain, app.env.get_domain('std'))
|
||||
for node in document.traverse(nodes.section):
|
||||
for node in document.findall(nodes.section):
|
||||
if (app.config.autosectionlabel_maxdepth and
|
||||
get_node_depth(node) >= app.config.autosectionlabel_maxdepth):
|
||||
continue
|
||||
|
@ -587,7 +587,7 @@ def extract_summary(doc: List[str], document: Any) -> str:
|
||||
node = parse(doc, document.settings)
|
||||
if summary.endswith(WELL_KNOWN_ABBREVIATIONS):
|
||||
pass
|
||||
elif not list(node.traverse(nodes.system_message)):
|
||||
elif not any(node.findall(nodes.system_message)):
|
||||
# considered as that splitting by period does not break inline markups
|
||||
break
|
||||
|
||||
|
@ -422,7 +422,7 @@ Doctest summary
|
||||
def condition(node: Node) -> bool:
|
||||
return isinstance(node, (nodes.literal_block, nodes.comment)) \
|
||||
and 'testnodetype' in node
|
||||
for node in doctree.traverse(condition): # type: Element
|
||||
for node in doctree.findall(condition): # type: Element
|
||||
if self.skipped(node):
|
||||
continue
|
||||
|
||||
|
@ -55,7 +55,7 @@ class ExternalLinksChecker(SphinxPostTransform):
|
||||
default_priority = 500
|
||||
|
||||
def run(self, **kwargs: Any) -> None:
|
||||
for refnode in self.document.traverse(nodes.reference):
|
||||
for refnode in self.document.findall(nodes.reference):
|
||||
self.check_uri(refnode)
|
||||
|
||||
def check_uri(self, refnode: nodes.reference) -> None:
|
||||
|
@ -56,7 +56,7 @@ def process_ifconfig_nodes(app: Sphinx, doctree: nodes.document, docname: str) -
|
||||
ns = {confval.name: confval.value for confval in app.config}
|
||||
ns.update(app.config.__dict__.copy())
|
||||
ns['builder'] = app.builder.name
|
||||
for node in doctree.traverse(ifconfig):
|
||||
for node in doctree.findall(ifconfig):
|
||||
try:
|
||||
res = eval(node['expr'], ns)
|
||||
except Exception as err:
|
||||
|
@ -39,7 +39,7 @@ def doctree_read(app: Sphinx, doctree: Node) -> None:
|
||||
'js': ['object', 'fullname'],
|
||||
}
|
||||
|
||||
for objnode in list(doctree.traverse(addnodes.desc)):
|
||||
for objnode in list(doctree.findall(addnodes.desc)):
|
||||
domain = objnode.get('domain')
|
||||
uris: Set[str] = set()
|
||||
for signode in objnode:
|
||||
|
@ -93,7 +93,7 @@ class TodoDomain(Domain):
|
||||
def process_doc(self, env: BuildEnvironment, docname: str,
|
||||
document: nodes.document) -> None:
|
||||
todos = self.todos.setdefault(docname, [])
|
||||
for todo in document.traverse(todo_node):
|
||||
for todo in document.findall(todo_node):
|
||||
env.app.emit('todo-defined', todo)
|
||||
todos.append(todo)
|
||||
|
||||
@ -131,7 +131,7 @@ class TodoListProcessor:
|
||||
|
||||
def process(self, doctree: nodes.document, docname: str) -> None:
|
||||
todos: List[todo_node] = sum(self.domain.todos.values(), [])
|
||||
for node in list(doctree.traverse(todolist)):
|
||||
for node in list(doctree.findall(todolist)):
|
||||
if not self.config.todo_include_todos:
|
||||
node.parent.remove(node)
|
||||
continue
|
||||
@ -184,7 +184,7 @@ class TodoListProcessor:
|
||||
|
||||
def resolve_reference(self, todo: todo_node, docname: str) -> None:
|
||||
"""Resolve references in the todo content."""
|
||||
for node in todo.traverse(addnodes.pending_xref):
|
||||
for node in todo.findall(addnodes.pending_xref):
|
||||
if 'refdoc' in node:
|
||||
node['refdoc'] = docname
|
||||
|
||||
|
@ -108,7 +108,7 @@ def doctree_read(app: Sphinx, doctree: Node) -> None:
|
||||
|
||||
return False
|
||||
|
||||
for objnode in list(doctree.traverse(addnodes.desc)):
|
||||
for objnode in list(doctree.findall(addnodes.desc)):
|
||||
if objnode.get('domain') != 'py':
|
||||
continue
|
||||
names: Set[str] = set()
|
||||
@ -184,14 +184,14 @@ class ViewcodeAnchorTransform(SphinxPostTransform):
|
||||
self.remove_viewcode_anchors()
|
||||
|
||||
def convert_viewcode_anchors(self) -> None:
|
||||
for node in self.document.traverse(viewcode_anchor):
|
||||
for node in self.document.findall(viewcode_anchor):
|
||||
anchor = nodes.inline('', _('[source]'), classes=['viewcode-link'])
|
||||
refnode = make_refnode(self.app.builder, node['refdoc'], node['reftarget'],
|
||||
node['refid'], anchor)
|
||||
node.replace_self(refnode)
|
||||
|
||||
def remove_viewcode_anchors(self) -> None:
|
||||
for node in list(self.document.traverse(viewcode_anchor)):
|
||||
for node in list(self.document.findall(viewcode_anchor)):
|
||||
node.parent.remove(node)
|
||||
|
||||
|
||||
|
@ -107,7 +107,7 @@ class DefaultSubstitutions(SphinxTransform):
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
# only handle those not otherwise defined in the document
|
||||
to_handle = default_substitutions - set(self.document.substitution_defs)
|
||||
for ref in self.document.traverse(nodes.substitution_reference):
|
||||
for ref in self.document.findall(nodes.substitution_reference):
|
||||
refname = ref['refname']
|
||||
if refname in to_handle:
|
||||
text = self.config[refname]
|
||||
@ -128,7 +128,7 @@ class MoveModuleTargets(SphinxTransform):
|
||||
default_priority = 210
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
for node in list(self.document.traverse(nodes.target)):
|
||||
for node in list(self.document.findall(nodes.target)):
|
||||
if not node['ids']:
|
||||
continue
|
||||
if ('ismod' in node and
|
||||
@ -147,12 +147,12 @@ class HandleCodeBlocks(SphinxTransform):
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
# move doctest blocks out of blockquotes
|
||||
for node in self.document.traverse(nodes.block_quote):
|
||||
for node in self.document.findall(nodes.block_quote):
|
||||
if all(isinstance(child, nodes.doctest_block) for child
|
||||
in node.children):
|
||||
node.replace_self(node.children)
|
||||
# combine successive doctest blocks
|
||||
# for node in self.document.traverse(nodes.doctest_block):
|
||||
# for node in self.document.findall(nodes.doctest_block):
|
||||
# if node not in node.parent.children:
|
||||
# continue
|
||||
# parindex = node.parent.index(node)
|
||||
@ -172,7 +172,7 @@ class AutoNumbering(SphinxTransform):
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
domain: StandardDomain = self.env.get_domain('std')
|
||||
|
||||
for node in self.document.traverse(nodes.Element):
|
||||
for node in self.document.findall(nodes.Element):
|
||||
if (domain.is_enumerable_node(node) and
|
||||
domain.get_numfig_title(node) is not None and
|
||||
node['ids'] == []):
|
||||
@ -186,7 +186,7 @@ class SortIds(SphinxTransform):
|
||||
default_priority = 261
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
for node in self.document.traverse(nodes.section):
|
||||
for node in self.document.findall(nodes.section):
|
||||
if len(node['ids']) > 1 and node['ids'][0].startswith('id'):
|
||||
node['ids'] = node['ids'][1:] + [node['ids'][0]]
|
||||
|
||||
@ -207,7 +207,7 @@ class ApplySourceWorkaround(SphinxTransform):
|
||||
default_priority = 10
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
for node in self.document.traverse(): # type: Node
|
||||
for node in self.document.findall(): # type: Node
|
||||
if isinstance(node, (nodes.TextElement, nodes.image, nodes.topic)):
|
||||
apply_source_workaround(node)
|
||||
|
||||
@ -219,7 +219,7 @@ class AutoIndexUpgrader(SphinxTransform):
|
||||
default_priority = 210
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
for node in self.document.traverse(addnodes.index):
|
||||
for node in self.document.findall(addnodes.index):
|
||||
if 'entries' in node and any(len(entry) == 4 for entry in node['entries']):
|
||||
msg = __('4 column based index found. '
|
||||
'It might be a bug of extensions you use: %r') % node['entries']
|
||||
@ -244,7 +244,7 @@ class ExtraTranslatableNodes(SphinxTransform):
|
||||
def is_translatable_node(node: Node) -> bool:
|
||||
return isinstance(node, tuple(target_nodes))
|
||||
|
||||
for node in self.document.traverse(is_translatable_node): # type: Element
|
||||
for node in self.document.findall(is_translatable_node): # type: Element
|
||||
node['translatable'] = True
|
||||
|
||||
|
||||
@ -276,7 +276,7 @@ class DoctestTransform(SphinxTransform):
|
||||
default_priority = 500
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
for node in self.document.traverse(nodes.doctest_block):
|
||||
for node in self.document.findall(nodes.doctest_block):
|
||||
node['classes'].append('doctest')
|
||||
|
||||
|
||||
@ -293,7 +293,7 @@ class FigureAligner(SphinxTransform):
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
matcher = NodeMatcher(nodes.table, nodes.figure)
|
||||
for node in self.document.traverse(matcher): # type: Element
|
||||
for node in self.document.findall(matcher): # type: Element
|
||||
node.setdefault('align', 'default')
|
||||
|
||||
|
||||
@ -303,7 +303,7 @@ class FilterSystemMessages(SphinxTransform):
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
filterlevel = 2 if self.config.keep_warnings else 5
|
||||
for node in list(self.document.traverse(nodes.system_message)):
|
||||
for node in list(self.document.findall(nodes.system_message)):
|
||||
if node['level'] < filterlevel:
|
||||
logger.debug('%s [filtered system message]', node.astext())
|
||||
node.parent.remove(node)
|
||||
@ -392,7 +392,7 @@ class ManpageLink(SphinxTransform):
|
||||
default_priority = 999
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
for node in self.document.traverse(addnodes.manpage):
|
||||
for node in self.document.findall(addnodes.manpage):
|
||||
manpage = ' '.join([str(x) for x in node.children
|
||||
if isinstance(x, nodes.Text)])
|
||||
pattern = r'^(?P<path>(?P<page>.+)[\(\.](?P<section>[1-9]\w*)?\)?)$' # noqa
|
||||
|
@ -74,9 +74,9 @@ class RefOnlyBulletListTransform(SphinxTransform):
|
||||
else:
|
||||
return True
|
||||
|
||||
for node in self.document.traverse(nodes.bullet_list):
|
||||
for node in self.document.findall(nodes.bullet_list):
|
||||
if check_refonly_list(node):
|
||||
for item in node.traverse(nodes.list_item):
|
||||
for item in node.findall(nodes.list_item):
|
||||
para = cast(nodes.paragraph, item[0])
|
||||
ref = cast(nodes.reference, para[0])
|
||||
compact_para = addnodes.compact_paragraph()
|
||||
|
@ -89,7 +89,7 @@ class PreserveTranslatableMessages(SphinxTransform):
|
||||
default_priority = 10 # this MUST be invoked before Locale transform
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
for node in self.document.traverse(addnodes.translatable):
|
||||
for node in self.document.findall(addnodes.translatable):
|
||||
node.preserve_original_messages()
|
||||
|
||||
|
||||
@ -199,7 +199,7 @@ class Locale(SphinxTransform):
|
||||
|
||||
# replace target's refname to new target name
|
||||
matcher = NodeMatcher(nodes.target, refname=old_name)
|
||||
for old_target in self.document.traverse(matcher): # type: nodes.target
|
||||
for old_target in self.document.findall(matcher): # type: nodes.target
|
||||
old_target['refname'] = new_name
|
||||
|
||||
processed = True
|
||||
@ -301,8 +301,8 @@ class Locale(SphinxTransform):
|
||||
lst.append(new)
|
||||
|
||||
is_autofootnote_ref = NodeMatcher(nodes.footnote_reference, auto=Any)
|
||||
old_foot_refs: List[nodes.footnote_reference] = list(node.traverse(is_autofootnote_ref)) # NOQA
|
||||
new_foot_refs: List[nodes.footnote_reference] = list(patch.traverse(is_autofootnote_ref)) # NOQA
|
||||
old_foot_refs: List[nodes.footnote_reference] = list(node.findall(is_autofootnote_ref)) # NOQA
|
||||
new_foot_refs: List[nodes.footnote_reference] = list(patch.findall(is_autofootnote_ref)) # NOQA
|
||||
if len(old_foot_refs) != len(new_foot_refs):
|
||||
old_foot_ref_rawsources = [ref.rawsource for ref in old_foot_refs]
|
||||
new_foot_ref_rawsources = [ref.rawsource for ref in new_foot_refs]
|
||||
@ -344,8 +344,8 @@ class Locale(SphinxTransform):
|
||||
# * use translated refname for section refname.
|
||||
# * inline reference "`Python <...>`_" has no 'refname'.
|
||||
is_refnamed_ref = NodeMatcher(nodes.reference, refname=Any)
|
||||
old_refs: List[nodes.reference] = list(node.traverse(is_refnamed_ref))
|
||||
new_refs: List[nodes.reference] = list(patch.traverse(is_refnamed_ref))
|
||||
old_refs: List[nodes.reference] = list(node.findall(is_refnamed_ref))
|
||||
new_refs: List[nodes.reference] = list(patch.findall(is_refnamed_ref))
|
||||
if len(old_refs) != len(new_refs):
|
||||
old_ref_rawsources = [ref.rawsource for ref in old_refs]
|
||||
new_ref_rawsources = [ref.rawsource for ref in new_refs]
|
||||
@ -371,8 +371,8 @@ class Locale(SphinxTransform):
|
||||
|
||||
# refnamed footnote should use original 'ids'.
|
||||
is_refnamed_footnote_ref = NodeMatcher(nodes.footnote_reference, refname=Any)
|
||||
old_foot_refs = list(node.traverse(is_refnamed_footnote_ref))
|
||||
new_foot_refs = list(patch.traverse(is_refnamed_footnote_ref))
|
||||
old_foot_refs = list(node.findall(is_refnamed_footnote_ref))
|
||||
new_foot_refs = list(patch.findall(is_refnamed_footnote_ref))
|
||||
refname_ids_map: Dict[str, List[str]] = {}
|
||||
if len(old_foot_refs) != len(new_foot_refs):
|
||||
old_foot_ref_rawsources = [ref.rawsource for ref in old_foot_refs]
|
||||
@ -390,8 +390,8 @@ class Locale(SphinxTransform):
|
||||
|
||||
# citation should use original 'ids'.
|
||||
is_citation_ref = NodeMatcher(nodes.citation_reference, refname=Any)
|
||||
old_cite_refs: List[nodes.citation_reference] = list(node.traverse(is_citation_ref)) # NOQA
|
||||
new_cite_refs: List[nodes.citation_reference] = list(patch.traverse(is_citation_ref)) # NOQA
|
||||
old_cite_refs: List[nodes.citation_reference] = list(node.findall(is_citation_ref))
|
||||
new_cite_refs: List[nodes.citation_reference] = list(patch.findall(is_citation_ref)) # NOQA
|
||||
refname_ids_map = {}
|
||||
if len(old_cite_refs) != len(new_cite_refs):
|
||||
old_cite_ref_rawsources = [ref.rawsource for ref in old_cite_refs]
|
||||
@ -410,8 +410,8 @@ class Locale(SphinxTransform):
|
||||
# Original pending_xref['reftarget'] contain not-translated
|
||||
# target name, new pending_xref must use original one.
|
||||
# This code restricts to change ref-targets in the translation.
|
||||
old_xrefs = list(node.traverse(addnodes.pending_xref))
|
||||
new_xrefs = list(patch.traverse(addnodes.pending_xref))
|
||||
old_xrefs = list(node.findall(addnodes.pending_xref))
|
||||
new_xrefs = list(patch.findall(addnodes.pending_xref))
|
||||
xref_reftarget_map = {}
|
||||
if len(old_xrefs) != len(new_xrefs):
|
||||
old_xref_rawsources = [xref.rawsource for xref in old_xrefs]
|
||||
@ -477,7 +477,7 @@ class Locale(SphinxTransform):
|
||||
node['entries'] = new_entries
|
||||
|
||||
# remove translated attribute that is used for avoiding double translation.
|
||||
for translated in self.document.traverse(NodeMatcher(translated=Any)): # type: Element # NOQA
|
||||
for translated in self.document.findall(NodeMatcher(translated=Any)): # type: Element # NOQA
|
||||
translated.delattr('translated')
|
||||
|
||||
|
||||
@ -493,7 +493,7 @@ class RemoveTranslatableInline(SphinxTransform):
|
||||
return
|
||||
|
||||
matcher = NodeMatcher(nodes.inline, translatable=Any)
|
||||
for inline in list(self.document.traverse(matcher)): # type: nodes.inline
|
||||
for inline in list(self.document.findall(matcher)): # type: nodes.inline
|
||||
inline.parent.remove(inline)
|
||||
inline.parent += inline.children
|
||||
|
||||
|
@ -67,7 +67,7 @@ class ReferencesResolver(SphinxPostTransform):
|
||||
default_priority = 10
|
||||
|
||||
def run(self, **kwargs: Any) -> None:
|
||||
for node in self.document.traverse(addnodes.pending_xref):
|
||||
for node in self.document.findall(addnodes.pending_xref):
|
||||
content = self.find_pending_xref_condition(node, ("resolved", "*"))
|
||||
if content:
|
||||
contnode = cast(Element, content[0].deepcopy())
|
||||
@ -251,7 +251,7 @@ class SigElementFallbackTransform(SphinxPostTransform):
|
||||
self.fallback(addnodes.desc_inline)
|
||||
|
||||
def fallback(self, nodeType: Any) -> None:
|
||||
for node in self.document.traverse(nodeType):
|
||||
for node in self.document.findall(nodeType):
|
||||
newnode = nodes.inline()
|
||||
newnode.update_all_atts(node)
|
||||
newnode.extend(node)
|
||||
@ -263,7 +263,7 @@ class PropagateDescDomain(SphinxPostTransform):
|
||||
default_priority = 200
|
||||
|
||||
def run(self, **kwargs: Any) -> None:
|
||||
for node in self.document.traverse(addnodes.desc_signature):
|
||||
for node in self.document.findall(addnodes.desc_signature):
|
||||
if node.parent.get('domain'):
|
||||
node['classes'].append(node.parent['domain'])
|
||||
|
||||
|
@ -42,7 +42,7 @@ class HighlightLanguageTransform(SphinxTransform):
|
||||
self.config.highlight_language)
|
||||
self.document.walkabout(visitor)
|
||||
|
||||
for node in list(self.document.traverse(addnodes.highlightlang)):
|
||||
for node in list(self.document.findall(addnodes.highlightlang)):
|
||||
node.parent.remove(node)
|
||||
|
||||
|
||||
@ -94,11 +94,11 @@ class TrimDoctestFlagsTransform(SphinxTransform):
|
||||
default_priority = HighlightLanguageTransform.default_priority + 1
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
for lbnode in self.document.traverse(nodes.literal_block): # type: nodes.literal_block
|
||||
for lbnode in self.document.findall(nodes.literal_block):
|
||||
if self.is_pyconsole(lbnode):
|
||||
self.strip_doctest_flags(lbnode)
|
||||
|
||||
for dbnode in self.document.traverse(nodes.doctest_block): # type: nodes.doctest_block
|
||||
for dbnode in self.document.findall(nodes.doctest_block):
|
||||
self.strip_doctest_flags(dbnode)
|
||||
|
||||
def strip_doctest_flags(self, node: TextElement) -> None:
|
||||
|
@ -30,7 +30,7 @@ CRITICAL_PATH_CHAR_RE = re.compile('[:;<>|*" ]')
|
||||
|
||||
class BaseImageConverter(SphinxTransform):
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
for node in self.document.traverse(nodes.image):
|
||||
for node in self.document.findall(nodes.image):
|
||||
if self.match(node):
|
||||
self.handle(node)
|
||||
|
||||
|
@ -500,6 +500,16 @@ class SphinxTranslator(nodes.NodeVisitor):
|
||||
logger.warning(__('unknown node type: %r'), node, location=node)
|
||||
|
||||
|
||||
# Node.findall() is a new interface to traverse a doctree since docutils-0.18.
|
||||
# This applies a patch docutils-0.17 or older to be available Node.findall()
|
||||
# method to use it from our codebase.
|
||||
if __version_info__ < (0, 18):
|
||||
def findall(self, *args, **kwargs):
|
||||
return iter(self.traverse(*args, **kwargs))
|
||||
|
||||
Node.findall = findall # type: ignore
|
||||
|
||||
|
||||
# cache a vanilla instance of nodes.document
|
||||
# Used in new_document() function
|
||||
__document_cache__: Optional[nodes.document] = None
|
||||
|
@ -38,7 +38,7 @@ caption_ref_re = explicit_title_re # b/w compat alias
|
||||
|
||||
|
||||
class NodeMatcher:
|
||||
"""A helper class for Node.traverse().
|
||||
"""A helper class for Node.findall().
|
||||
|
||||
It checks that the given node is an instance of the specified node-classes and
|
||||
has the specified node-attributes.
|
||||
@ -47,7 +47,7 @@ class NodeMatcher:
|
||||
and ``reftype`` attributes::
|
||||
|
||||
matcher = NodeMatcher(nodes.reference, refdomain='std', reftype='citation')
|
||||
doctree.traverse(matcher)
|
||||
doctree.findall(matcher)
|
||||
# => [<reference ...>, <reference ...>, ...]
|
||||
|
||||
A special value ``typing.Any`` matches any kind of node-attributes. For example,
|
||||
@ -55,7 +55,7 @@ class NodeMatcher:
|
||||
|
||||
from typing import Any
|
||||
matcher = NodeMatcher(nodes.reference, refdomain=Any)
|
||||
doctree.traverse(matcher)
|
||||
doctree.findall(matcher)
|
||||
# => [<reference ...>, <reference ...>, ...]
|
||||
"""
|
||||
|
||||
@ -147,7 +147,7 @@ def apply_source_workaround(node: Element) -> None:
|
||||
logger.debug('[i18n] PATCH: %r to have rawsource: %s',
|
||||
get_full_module_name(node), repr_domxml(node))
|
||||
# strip classifier from rawsource of term
|
||||
for classifier in reversed(list(node.parent.traverse(nodes.classifier))):
|
||||
for classifier in reversed(list(node.parent.findall(nodes.classifier))):
|
||||
node.rawsource = re.sub(r'\s*:\s*%s' % re.escape(classifier.astext()),
|
||||
'', node.rawsource)
|
||||
if isinstance(node, nodes.topic) and node.source is None:
|
||||
@ -259,7 +259,7 @@ META_TYPE_NODES = (
|
||||
|
||||
def extract_messages(doctree: Element) -> Iterable[Tuple[Element, str]]:
|
||||
"""Extract translatable messages from a document tree."""
|
||||
for node in doctree.traverse(is_translatable): # type: Element
|
||||
for node in doctree.findall(is_translatable): # type: Element
|
||||
if isinstance(node, addnodes.translatable):
|
||||
for msg in node.extract_original_messages():
|
||||
yield node, msg
|
||||
@ -323,7 +323,8 @@ def get_prev_node(node: Node) -> Optional[Node]:
|
||||
|
||||
def traverse_translatable_index(doctree: Element) -> Iterable[Tuple[Element, List["IndexEntry"]]]: # NOQA
|
||||
"""Traverse translatable index node from a document tree."""
|
||||
for node in doctree.traverse(NodeMatcher(addnodes.index, inline=False)): # type: addnodes.index # NOQA
|
||||
matcher = NodeMatcher(addnodes.index, inline=False)
|
||||
for node in doctree.findall(matcher): # type: addnodes.index
|
||||
if 'raw_entries' in node:
|
||||
entries = node['raw_entries']
|
||||
else:
|
||||
@ -353,9 +354,9 @@ def nested_parse_with_titles(state: Any, content: StringList, node: Node) -> str
|
||||
def clean_astext(node: Element) -> str:
|
||||
"""Like node.astext(), but ignore images."""
|
||||
node = node.deepcopy()
|
||||
for img in node.traverse(nodes.image):
|
||||
for img in node.findall(nodes.image):
|
||||
img['alt'] = ''
|
||||
for raw in list(node.traverse(nodes.raw)):
|
||||
for raw in list(node.findall(nodes.raw)):
|
||||
raw.parent.remove(raw)
|
||||
return node.astext()
|
||||
|
||||
@ -420,7 +421,7 @@ def inline_all_toctrees(builder: "Builder", docnameset: Set[str], docname: str,
|
||||
Record all docnames in *docnameset*, and output docnames with *colorfunc*.
|
||||
"""
|
||||
tree = cast(nodes.document, tree.deepcopy())
|
||||
for toctreenode in list(tree.traverse(addnodes.toctree)):
|
||||
for toctreenode in list(tree.findall(addnodes.toctree)):
|
||||
newnodes = []
|
||||
includefiles = map(str, toctreenode['includefiles'])
|
||||
for includefile in includefiles:
|
||||
@ -438,7 +439,7 @@ def inline_all_toctrees(builder: "Builder", docnameset: Set[str], docname: str,
|
||||
else:
|
||||
sof = addnodes.start_of_file(docname=includefile)
|
||||
sof.children = subtree.children
|
||||
for sectionnode in sof.traverse(nodes.section):
|
||||
for sectionnode in sof.findall(nodes.section):
|
||||
if 'docname' not in sectionnode:
|
||||
sectionnode['docname'] = includefile
|
||||
newnodes.append(sof)
|
||||
@ -615,7 +616,7 @@ def is_smartquotable(node: Node) -> bool:
|
||||
|
||||
def process_only_nodes(document: Node, tags: "Tags") -> None:
|
||||
"""Filter ``only`` nodes which do not match *tags*."""
|
||||
for node in document.traverse(addnodes.only):
|
||||
for node in document.findall(addnodes.only):
|
||||
try:
|
||||
ret = tags.eval_condition(node['expr'])
|
||||
except Exception as err:
|
||||
|
@ -42,7 +42,7 @@ def add_uids(doctree: Node, condition: Any) -> Iterator[Node]:
|
||||
:param condition:
|
||||
A callable which returns either ``True`` or ``False`` for a given node.
|
||||
"""
|
||||
for node in doctree.traverse(condition):
|
||||
for node in doctree.findall(condition):
|
||||
node.uid = uuid4().hex
|
||||
yield node
|
||||
|
||||
@ -57,8 +57,8 @@ def merge_doctrees(old: Node, new: Node, condition: Any) -> Iterator[Node]:
|
||||
:param condition:
|
||||
A callable which returns either ``True`` or ``False`` for a given node.
|
||||
"""
|
||||
old_iter = old.traverse(condition)
|
||||
new_iter = new.traverse(condition)
|
||||
old_iter = old.findall(condition)
|
||||
new_iter = new.findall(condition)
|
||||
old_nodes = []
|
||||
new_nodes = []
|
||||
ratios = {}
|
||||
|
@ -651,7 +651,7 @@ class LaTeXTranslator(SphinxTranslator):
|
||||
raise nodes.SkipNode
|
||||
else:
|
||||
short = ''
|
||||
if list(node.traverse(nodes.image)):
|
||||
if any(node.findall(nodes.image)):
|
||||
short = ('[%s]' % self.escape(' '.join(clean_astext(node).split())))
|
||||
|
||||
try:
|
||||
@ -1009,7 +1009,7 @@ class LaTeXTranslator(SphinxTranslator):
|
||||
context = (r'\par' + CR + r'\vskip-\baselineskip'
|
||||
r'\vbox{\hbox{\strut}}\end{varwidth}%' + CR + context)
|
||||
self.needs_linetrimming = 1
|
||||
if len(list(node.traverse(nodes.paragraph))) >= 2:
|
||||
if len(list(node.findall(nodes.paragraph))) >= 2:
|
||||
self.table.has_oldproblematic = True
|
||||
if isinstance(node.parent.parent, nodes.thead) or (cell.col in self.table.stubs):
|
||||
if len(node) == 1 and isinstance(node[0], nodes.paragraph) and node.astext() == '':
|
||||
|
@ -56,7 +56,7 @@ class NestedInlineTransform:
|
||||
|
||||
def apply(self, **kwargs: Any) -> None:
|
||||
matcher = NodeMatcher(nodes.literal, nodes.emphasis, nodes.strong)
|
||||
for node in list(self.document.traverse(matcher)): # type: TextElement
|
||||
for node in list(self.document.findall(matcher)): # type: TextElement
|
||||
if any(matcher(subnode) for subnode in node):
|
||||
pos = node.parent.index(node)
|
||||
for subnode in reversed(list(node)):
|
||||
@ -227,7 +227,7 @@ class ManualPageTranslator(SphinxTranslator, BaseTranslator):
|
||||
|
||||
# overwritten -- don't make whole of term bold if it includes strong node
|
||||
def visit_term(self, node: Element) -> None:
|
||||
if list(node.traverse(nodes.strong)):
|
||||
if any(node.findall(nodes.strong)):
|
||||
self.body.append('\n')
|
||||
else:
|
||||
super().visit_term(node)
|
||||
|
@ -286,7 +286,7 @@ class TexinfoTranslator(SphinxTranslator):
|
||||
self.indices = [(add_node_name(name), content)
|
||||
for name, content in self.indices]
|
||||
# each section is also a node
|
||||
for section in self.document.traverse(nodes.section):
|
||||
for section in self.document.findall(nodes.section):
|
||||
title = cast(nodes.TextElement, section.next_node(nodes.Titular))
|
||||
name = title.astext() if title else '<untitled>'
|
||||
section['node_name'] = add_node_name(name)
|
||||
@ -295,7 +295,7 @@ class TexinfoTranslator(SphinxTranslator):
|
||||
"""Collect the menu entries for each "node" section."""
|
||||
node_menus = self.node_menus
|
||||
targets: List[Element] = [self.document]
|
||||
targets.extend(self.document.traverse(nodes.section))
|
||||
targets.extend(self.document.findall(nodes.section))
|
||||
for node in targets:
|
||||
assert 'node_name' in node and node['node_name']
|
||||
entries = [s['node_name'] for s in find_subsections(node)]
|
||||
|
@ -850,7 +850,7 @@ class TextTranslator(SphinxTranslator):
|
||||
self.end_state(first='%s. ' % self.list_counter[-1])
|
||||
|
||||
def visit_definition_list_item(self, node: Element) -> None:
|
||||
self._classifier_count_in_li = len(list(node.traverse(nodes.classifier)))
|
||||
self._classifier_count_in_li = len(list(node.findall(nodes.classifier)))
|
||||
|
||||
def depart_definition_list_item(self, node: Element) -> None:
|
||||
pass
|
||||
|
@ -553,7 +553,7 @@ def test_literalinclude_pydecorators(app, status, warning):
|
||||
def test_code_block_highlighted(app, status, warning):
|
||||
app.builder.build(['highlight'])
|
||||
doctree = app.env.get_doctree('highlight')
|
||||
codeblocks = list(doctree.traverse(nodes.literal_block))
|
||||
codeblocks = list(doctree.findall(nodes.literal_block))
|
||||
|
||||
assert codeblocks[0]['language'] == 'default'
|
||||
assert codeblocks[1]['language'] == 'python2'
|
||||
|
@ -42,7 +42,7 @@ def test_domain_js_xrefs(app, status, warning):
|
||||
assert_node(node, **attributes)
|
||||
|
||||
doctree = app.env.get_doctree('roles')
|
||||
refnodes = list(doctree.traverse(addnodes.pending_xref))
|
||||
refnodes = list(doctree.findall(addnodes.pending_xref))
|
||||
assert_refnode(refnodes[0], None, None, 'TopLevel', 'class')
|
||||
assert_refnode(refnodes[1], None, None, 'top_level', 'func')
|
||||
assert_refnode(refnodes[2], None, 'NestedParentA', 'child_1', 'func')
|
||||
@ -60,7 +60,7 @@ def test_domain_js_xrefs(app, status, warning):
|
||||
assert len(refnodes) == 13
|
||||
|
||||
doctree = app.env.get_doctree('module')
|
||||
refnodes = list(doctree.traverse(addnodes.pending_xref))
|
||||
refnodes = list(doctree.findall(addnodes.pending_xref))
|
||||
assert_refnode(refnodes[0], 'module_a.submodule', None, 'ModTopLevel',
|
||||
'class')
|
||||
assert_refnode(refnodes[1], 'module_a.submodule', 'ModTopLevel',
|
||||
|
@ -424,7 +424,7 @@ def test_productionlist2(app):
|
||||
" A: `:A` `A`\n"
|
||||
" B: `P1:B` `~P1:B`\n")
|
||||
doctree = restructuredtext.parse(app, text)
|
||||
refnodes = list(doctree.traverse(pending_xref))
|
||||
refnodes = list(doctree.findall(pending_xref))
|
||||
assert_node(refnodes[0], pending_xref, reftarget="A")
|
||||
assert_node(refnodes[1], pending_xref, reftarget="P2:A")
|
||||
assert_node(refnodes[2], pending_xref, reftarget="P1:B")
|
||||
|
@ -36,7 +36,7 @@ def test_build(app, status, warning):
|
||||
def test_highlight_language_default(app, status, warning):
|
||||
app.build()
|
||||
doctree = app.env.get_doctree('doctest')
|
||||
for node in doctree.traverse(nodes.literal_block):
|
||||
for node in doctree.findall(nodes.literal_block):
|
||||
assert node['language'] in ('python3', 'pycon3', 'none')
|
||||
|
||||
|
||||
@ -45,7 +45,7 @@ def test_highlight_language_default(app, status, warning):
|
||||
def test_highlight_language_python2(app, status, warning):
|
||||
app.build()
|
||||
doctree = app.env.get_doctree('doctest')
|
||||
for node in doctree.traverse(nodes.literal_block):
|
||||
for node in doctree.findall(nodes.literal_block):
|
||||
assert node['language'] in ('python', 'pycon', 'none')
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user