mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Add reStructuredText parsing functions to `SphinxDirective` (#12492)
This commit is contained in:
12
CHANGES.rst
12
CHANGES.rst
@@ -34,6 +34,18 @@ Features added
|
||||
Patch by James Addison.
|
||||
* #12319: ``sphinx.ext.extlinks``: Add ``extlink-{name}`` CSS class to links.
|
||||
Patch by Hugo van Kemenade.
|
||||
* Add helper methods for parsing reStructuredText content into nodes from
|
||||
within a directive.
|
||||
|
||||
- :py:meth:`~sphinx.util.docutils.SphinxDirective.parse_content_to_nodes()`
|
||||
parses the directive's content and returns a list of Docutils nodes.
|
||||
- :py:meth:`~sphinx.util.docutils.SphinxDirective.parse_text_to_nodes()`
|
||||
parses the provided text and returns a list of Docutils nodes.
|
||||
- :py:meth:`~sphinx.util.docutils.SphinxDirective.parse_inline()`
|
||||
parses the provided text into inline elements and text nodes.
|
||||
|
||||
Patch by Adam Turner.
|
||||
|
||||
|
||||
Bugs fixed
|
||||
----------
|
||||
|
||||
@@ -186,6 +186,7 @@ nitpick_ignore = {
|
||||
('py:class', 'NullTranslations'), # gettext.NullTranslations
|
||||
('py:class', 'RoleFunction'), # sphinx.domains.Domain
|
||||
('py:class', 'Theme'), # sphinx.application.TemplateBridge
|
||||
('py:class', 'system_message'), # sphinx.utils.docutils
|
||||
('py:class', 'TitleGetter'), # sphinx.domains.Domain
|
||||
('py:class', 'XRefRole'), # sphinx.domains.Domain
|
||||
('py:class', 'docutils.nodes.Element'),
|
||||
|
||||
@@ -38,7 +38,7 @@ class TodoDirective(SphinxDirective):
|
||||
|
||||
todo_node = todo('\n'.join(self.content))
|
||||
todo_node += nodes.title(_('Todo'), _('Todo'))
|
||||
self.state.nested_parse(self.content, self.content_offset, todo_node)
|
||||
todo_node += self.parse_content_to_nodes()
|
||||
|
||||
if not hasattr(self.env, 'todo_all_todos'):
|
||||
self.env.todo_all_todos = []
|
||||
|
||||
@@ -13,7 +13,6 @@ from sphinx.addnodes import desc_signature # NoQA: TCH001
|
||||
from sphinx.util import docutils
|
||||
from sphinx.util.docfields import DocFieldTransformer, Field, TypedField
|
||||
from sphinx.util.docutils import SphinxDirective
|
||||
from sphinx.util.nodes import nested_parse_with_titles
|
||||
from sphinx.util.typing import ExtensionMetadata, OptionSpec # NoQA: TCH001
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -127,7 +126,7 @@ class ObjectDescription(SphinxDirective, Generic[ObjDescT]):
|
||||
"""
|
||||
pass
|
||||
|
||||
def transform_content(self, contentnode: addnodes.desc_content) -> None:
|
||||
def transform_content(self, content_node: addnodes.desc_content) -> None:
|
||||
"""
|
||||
Called after creating the content through nested parsing,
|
||||
but before the ``object-description-transform`` event is emitted,
|
||||
@@ -275,18 +274,16 @@ class ObjectDescription(SphinxDirective, Generic[ObjDescT]):
|
||||
# description of the object with this name in this desc block
|
||||
self.add_target_and_index(name, sig, signode)
|
||||
|
||||
contentnode = addnodes.desc_content()
|
||||
node.append(contentnode)
|
||||
|
||||
if self.names:
|
||||
# needed for association of version{added,changed} directives
|
||||
self.env.temp_data['object'] = self.names[0]
|
||||
self.before_content()
|
||||
nested_parse_with_titles(self.state, self.content, contentnode, self.content_offset)
|
||||
self.transform_content(contentnode)
|
||||
content_node = addnodes.desc_content('', *self.parse_content_to_nodes())
|
||||
node.append(content_node)
|
||||
self.transform_content(content_node)
|
||||
self.env.app.emit('object-description-transform',
|
||||
self.domain, self.objtype, contentnode)
|
||||
DocFieldTransformer(self).transform_all(contentnode)
|
||||
self.domain, self.objtype, content_node)
|
||||
DocFieldTransformer(self).transform_all(content_node)
|
||||
self.env.temp_data['object'] = None
|
||||
self.after_content()
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ from typing import TYPE_CHECKING, Any, ClassVar
|
||||
|
||||
from docutils import nodes
|
||||
from docutils.parsers.rst import directives
|
||||
from docutils.statemachine import StringList
|
||||
|
||||
from sphinx import addnodes
|
||||
from sphinx.directives import optional_int
|
||||
@@ -75,15 +74,13 @@ def container_wrapper(
|
||||
) -> nodes.container:
|
||||
container_node = nodes.container('', literal_block=True,
|
||||
classes=['literal-block-wrapper'])
|
||||
parsed = nodes.Element()
|
||||
directive.state.nested_parse(StringList([caption], source=''),
|
||||
directive.content_offset, parsed)
|
||||
if isinstance(parsed[0], nodes.system_message):
|
||||
msg = __('Invalid caption: %s' % parsed[0].astext())
|
||||
parsed = directive.parse_text_to_nodes(caption, offset=directive.content_offset)
|
||||
node = parsed[0]
|
||||
if isinstance(node, nodes.system_message):
|
||||
msg = __('Invalid caption: %s') % node.astext()
|
||||
raise ValueError(msg)
|
||||
if isinstance(parsed[0], nodes.Element):
|
||||
caption_node = nodes.caption(parsed[0].rawsource, '',
|
||||
*parsed[0].children)
|
||||
if isinstance(node, nodes.Element):
|
||||
caption_node = nodes.caption(node.rawsource, '', *node.children)
|
||||
caption_node.source = literal_node.source
|
||||
caption_node.line = literal_node.line
|
||||
container_node += caption_node
|
||||
|
||||
@@ -198,7 +198,7 @@ class Author(SphinxDirective):
|
||||
else:
|
||||
text = _('Author: ')
|
||||
emph += nodes.Text(text)
|
||||
inodes, messages = self.state.inline_text(self.arguments[0], self.lineno)
|
||||
inodes, messages = self.parse_inline(self.arguments[0])
|
||||
emph.extend(inodes)
|
||||
|
||||
ret: list[Node] = [para]
|
||||
@@ -247,7 +247,7 @@ class Centered(SphinxDirective):
|
||||
if not self.arguments:
|
||||
return []
|
||||
subnode: Element = addnodes.centered()
|
||||
inodes, messages = self.state.inline_text(self.arguments[0], self.lineno)
|
||||
inodes, messages = self.parse_inline(self.arguments[0])
|
||||
subnode.extend(inodes)
|
||||
|
||||
ret: list[Node] = [subnode]
|
||||
@@ -267,15 +267,12 @@ class Acks(SphinxDirective):
|
||||
option_spec: ClassVar[OptionSpec] = {}
|
||||
|
||||
def run(self) -> list[Node]:
|
||||
node = addnodes.acks()
|
||||
node.document = self.state.document
|
||||
self.state.nested_parse(self.content, self.content_offset, node)
|
||||
if len(node.children) != 1 or not isinstance(node.children[0],
|
||||
nodes.bullet_list):
|
||||
children = self.parse_content_to_nodes()
|
||||
if len(children) != 1 or not isinstance(children[0], nodes.bullet_list):
|
||||
logger.warning(__('.. acks content is not a list'),
|
||||
location=(self.env.docname, self.lineno))
|
||||
return []
|
||||
return [node]
|
||||
return [addnodes.acks('', *children)]
|
||||
|
||||
|
||||
class HList(SphinxDirective):
|
||||
@@ -293,15 +290,12 @@ class HList(SphinxDirective):
|
||||
|
||||
def run(self) -> list[Node]:
|
||||
ncolumns = self.options.get('columns', 2)
|
||||
node = nodes.paragraph()
|
||||
node.document = self.state.document
|
||||
self.state.nested_parse(self.content, self.content_offset, node)
|
||||
if len(node.children) != 1 or not isinstance(node.children[0],
|
||||
nodes.bullet_list):
|
||||
children = self.parse_content_to_nodes()
|
||||
if len(children) != 1 or not isinstance(children[0], nodes.bullet_list):
|
||||
logger.warning(__('.. hlist content is not a list'),
|
||||
location=(self.env.docname, self.lineno))
|
||||
return []
|
||||
fulllist = node.children[0]
|
||||
fulllist = children[0]
|
||||
# create a hlist node where the items are distributed
|
||||
npercol, nmore = divmod(len(fulllist), ncolumns)
|
||||
index = 0
|
||||
|
||||
@@ -62,15 +62,14 @@ class VersionChange(SphinxDirective):
|
||||
node['version'] = self.arguments[0]
|
||||
text = versionlabels[self.name] % self.arguments[0]
|
||||
if len(self.arguments) == 2:
|
||||
inodes, messages = self.state.inline_text(self.arguments[1],
|
||||
self.lineno + 1)
|
||||
inodes, messages = self.parse_inline(self.arguments[1], lineno=self.lineno + 1)
|
||||
para = nodes.paragraph(self.arguments[1], '', *inodes, translatable=False)
|
||||
self.set_source_info(para)
|
||||
node.append(para)
|
||||
else:
|
||||
messages = []
|
||||
if self.content:
|
||||
self.state.nested_parse(self.content, self.content_offset, node)
|
||||
node += self.parse_content_to_nodes()
|
||||
classes = ['versionmodified', versionlabel_classes[self.name]]
|
||||
if len(node) > 0 and isinstance(node[0], nodes.paragraph):
|
||||
# the contents start with a paragraph
|
||||
|
||||
@@ -763,10 +763,9 @@ class CPPAliasObject(ObjectDescription):
|
||||
for sig in signatures:
|
||||
node.append(AliasNode(sig, aliasOptions, env=self.env))
|
||||
|
||||
contentnode = addnodes.desc_content()
|
||||
node.append(contentnode)
|
||||
self.before_content()
|
||||
self.state.nested_parse(self.content, self.content_offset, contentnode)
|
||||
content_node = addnodes.desc_content('', *self.parse_content_to_nodes())
|
||||
node.append(content_node)
|
||||
self.env.temp_data['object'] = None
|
||||
self.after_content()
|
||||
return [node]
|
||||
|
||||
@@ -17,7 +17,7 @@ from sphinx.roles import XRefRole
|
||||
from sphinx.util import logging
|
||||
from sphinx.util.docfields import Field, GroupedField, TypedField
|
||||
from sphinx.util.docutils import SphinxDirective
|
||||
from sphinx.util.nodes import make_id, make_refnode, nested_parse_with_titles
|
||||
from sphinx.util.nodes import make_id, make_refnode
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterator
|
||||
@@ -311,10 +311,7 @@ class JSModule(SphinxDirective):
|
||||
self.env.ref_context['js:module'] = mod_name
|
||||
no_index = 'no-index' in self.options or 'noindex' in self.options
|
||||
|
||||
content_node: Element = nodes.section()
|
||||
# necessary so that the child nodes get the right source/line set
|
||||
content_node.document = self.state.document
|
||||
nested_parse_with_titles(self.state, self.content, content_node, self.content_offset)
|
||||
content_nodes = self.parse_content_to_nodes()
|
||||
|
||||
ret: list[Node] = []
|
||||
if not no_index:
|
||||
@@ -334,7 +331,7 @@ class JSModule(SphinxDirective):
|
||||
target = nodes.target('', '', ids=[node_id], ismod=True)
|
||||
self.state.document.note_explicit_target(target)
|
||||
ret.append(target)
|
||||
ret.extend(content_node.children)
|
||||
ret.extend(content_nodes)
|
||||
return ret
|
||||
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ from sphinx.util.nodes import (
|
||||
find_pending_xref_condition,
|
||||
make_id,
|
||||
make_refnode,
|
||||
nested_parse_with_titles,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -417,10 +416,7 @@ class PyModule(SphinxDirective):
|
||||
no_index = 'no-index' in self.options or 'noindex' in self.options
|
||||
self.env.ref_context['py:module'] = modname
|
||||
|
||||
content_node: Element = nodes.section()
|
||||
# necessary so that the child nodes get the right source/line set
|
||||
content_node.document = self.state.document
|
||||
nested_parse_with_titles(self.state, self.content, content_node, self.content_offset)
|
||||
content_nodes = self.parse_content_to_nodes()
|
||||
|
||||
ret: list[Node] = []
|
||||
if not no_index:
|
||||
@@ -444,7 +440,7 @@ class PyModule(SphinxDirective):
|
||||
# The node order is: index node first, then target node.
|
||||
ret.append(inode)
|
||||
ret.append(target)
|
||||
ret.extend(content_node.children)
|
||||
ret.extend(content_nodes)
|
||||
return ret
|
||||
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ from sphinx.roles import EmphasizedLiteral, XRefRole
|
||||
from sphinx.util import docname_join, logging, ws_re
|
||||
from sphinx.util.docutils import SphinxDirective
|
||||
from sphinx.util.nodes import clean_astext, make_id, make_refnode
|
||||
from sphinx.util.parsing import nested_parse_to_nodes
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable, Iterator
|
||||
@@ -260,10 +261,15 @@ class OptionXRefRole(XRefRole):
|
||||
return title, target
|
||||
|
||||
|
||||
def split_term_classifiers(line: str) -> list[str | None]:
|
||||
_term_classifiers_re = re.compile(' +: +')
|
||||
|
||||
|
||||
def split_term_classifiers(line: str) -> tuple[str, str | None]:
|
||||
# split line into a term and classifiers. if no classifier, None is used..
|
||||
parts: list[str | None] = [*re.split(' +: +', line), None]
|
||||
return parts
|
||||
parts = _term_classifiers_re.split(line)
|
||||
term = parts[0]
|
||||
first_classifier = parts[1] if len(parts) >= 2 else None
|
||||
return term, first_classifier
|
||||
|
||||
|
||||
def make_glossary_term(env: BuildEnvironment, textnodes: Iterable[Node], index_key: str,
|
||||
@@ -382,15 +388,14 @@ class Glossary(SphinxDirective):
|
||||
termnodes: list[Node] = []
|
||||
system_messages: list[Node] = []
|
||||
for line, source, lineno in terms:
|
||||
parts = split_term_classifiers(line)
|
||||
term_, first_classifier = split_term_classifiers(line)
|
||||
# parse the term with inline markup
|
||||
# classifiers (parts[1:]) will not be shown on doctree
|
||||
textnodes, sysmsg = self.state.inline_text(parts[0],
|
||||
lineno)
|
||||
textnodes, sysmsg = self.parse_inline(term_, lineno=lineno)
|
||||
|
||||
# use first classifier as a index key
|
||||
term = make_glossary_term(self.env, textnodes,
|
||||
parts[1], source, lineno, # type: ignore[arg-type]
|
||||
first_classifier, source, lineno, # type: ignore[arg-type]
|
||||
node_id=None, document=self.state.document)
|
||||
term.rawsource = line
|
||||
system_messages.extend(sysmsg)
|
||||
@@ -398,11 +403,12 @@ class Glossary(SphinxDirective):
|
||||
|
||||
termnodes.extend(system_messages)
|
||||
|
||||
defnode = nodes.definition()
|
||||
if definition:
|
||||
self.state.nested_parse(definition, definition.items[0][1],
|
||||
defnode)
|
||||
termnodes.append(defnode)
|
||||
offset = definition.items[0][1]
|
||||
definition_nodes = nested_parse_to_nodes(self.state, definition, offset=offset)
|
||||
else:
|
||||
definition_nodes = []
|
||||
termnodes.append(nodes.definition('', *definition_nodes))
|
||||
items.append(nodes.definition_list_item('', *termnodes))
|
||||
|
||||
dlist = nodes.definition_list('', *items)
|
||||
|
||||
@@ -9,10 +9,10 @@ from docutils.utils import Reporter, assemble_option_dict
|
||||
from sphinx.ext.autodoc import Documenter, Options
|
||||
from sphinx.util import logging
|
||||
from sphinx.util.docutils import SphinxDirective, switch_source_input
|
||||
from sphinx.util.nodes import nested_parse_with_titles
|
||||
from sphinx.util.parsing import nested_parse_to_nodes
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from docutils.nodes import Element, Node
|
||||
from docutils.nodes import Node
|
||||
from docutils.parsers.rst.states import RSTState
|
||||
|
||||
from sphinx.config import Config
|
||||
@@ -86,15 +86,12 @@ def parse_generated_content(state: RSTState, content: StringList, documenter: Do
|
||||
"""Parse an item of content generated by Documenter."""
|
||||
with switch_source_input(state, content):
|
||||
if documenter.titles_allowed:
|
||||
node: Element = nodes.section()
|
||||
return nested_parse_to_nodes(state, content)
|
||||
|
||||
node = nodes.paragraph()
|
||||
# necessary so that the child nodes get the right source/line set
|
||||
node.document = state.document
|
||||
nested_parse_with_titles(state, content, node)
|
||||
else:
|
||||
node = nodes.paragraph()
|
||||
node.document = state.document
|
||||
state.nested_parse(content, 0, node)
|
||||
|
||||
state.nested_parse(content, 0, node, match_titles=False)
|
||||
return node.children
|
||||
|
||||
|
||||
|
||||
@@ -87,6 +87,7 @@ from sphinx.util.docutils import (
|
||||
)
|
||||
from sphinx.util.inspect import getmro, signature_from_str
|
||||
from sphinx.util.matching import Matcher
|
||||
from sphinx.util.parsing import nested_parse_to_nodes
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Sequence
|
||||
@@ -406,16 +407,13 @@ class Autosummary(SphinxDirective):
|
||||
row = nodes.row('')
|
||||
source, line = self.state_machine.get_source_and_line()
|
||||
for text in column_texts:
|
||||
node = nodes.paragraph('')
|
||||
vl = StringList()
|
||||
vl.append(text, '%s:%d:<autosummary>' % (source, line))
|
||||
vl = StringList([text], f'{source}:{line}:<autosummary>')
|
||||
with switch_source_input(self.state, vl):
|
||||
self.state.nested_parse(vl, 0, node)
|
||||
try:
|
||||
if isinstance(node[0], nodes.paragraph):
|
||||
node = node[0]
|
||||
except IndexError:
|
||||
pass
|
||||
col_nodes = nested_parse_to_nodes(self.state, vl)
|
||||
if col_nodes and isinstance(col_nodes[0], nodes.paragraph):
|
||||
node = col_nodes[0]
|
||||
else:
|
||||
node = nodes.paragraph('')
|
||||
row.append(nodes.entry('', node))
|
||||
body.append(row)
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ from typing import TYPE_CHECKING, Any, ClassVar
|
||||
from urllib.parse import urlsplit, urlunsplit
|
||||
|
||||
from docutils import nodes
|
||||
from docutils.parsers.rst import Directive, directives
|
||||
from docutils.parsers.rst import directives
|
||||
|
||||
import sphinx
|
||||
from sphinx.errors import SphinxError
|
||||
@@ -91,12 +91,12 @@ class graphviz(nodes.General, nodes.Inline, nodes.Element):
|
||||
pass
|
||||
|
||||
|
||||
def figure_wrapper(directive: Directive, node: graphviz, caption: str) -> nodes.figure:
|
||||
def figure_wrapper(directive: SphinxDirective, node: graphviz, caption: str) -> nodes.figure:
|
||||
figure_node = nodes.figure('', node)
|
||||
if 'align' in node:
|
||||
figure_node['align'] = node.attributes.pop('align')
|
||||
|
||||
inodes, messages = directive.state.inline_text(caption, directive.lineno)
|
||||
inodes, messages = directive.parse_inline(caption)
|
||||
caption_node = nodes.caption(caption, '', *inodes)
|
||||
caption_node.extend(messages)
|
||||
set_source_info(directive, caption_node)
|
||||
|
||||
@@ -22,7 +22,6 @@ from docutils import nodes
|
||||
|
||||
import sphinx
|
||||
from sphinx.util.docutils import SphinxDirective
|
||||
from sphinx.util.nodes import nested_parse_with_titles
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from docutils.nodes import Node
|
||||
@@ -48,7 +47,7 @@ class IfConfig(SphinxDirective):
|
||||
node.document = self.state.document
|
||||
self.set_source_info(node)
|
||||
node['expr'] = self.arguments[0]
|
||||
nested_parse_with_titles(self.state, self.content, node, self.content_offset)
|
||||
node += self.parse_content_to_nodes()
|
||||
return [node]
|
||||
|
||||
|
||||
|
||||
@@ -406,12 +406,13 @@ class Locale(SphinxTransform):
|
||||
# glossary terms update refid
|
||||
if isinstance(node, nodes.term):
|
||||
for _id in node['ids']:
|
||||
parts = split_term_classifiers(msgstr)
|
||||
term, first_classifier = split_term_classifiers(msgstr)
|
||||
patch = publish_msgstr(
|
||||
self.app, parts[0] or '', source, node.line, self.config, settings, # type: ignore[arg-type]
|
||||
self.app, term or '', source, node.line, self.config, settings, # type: ignore[arg-type]
|
||||
)
|
||||
updater.patch = make_glossary_term(
|
||||
self.env, patch, parts[1] or '', source, node.line, _id, self.document, # type: ignore[arg-type]
|
||||
self.env, patch, first_classifier or '',
|
||||
source, node.line, _id, self.document, # type: ignore[arg-type]
|
||||
)
|
||||
processed = True
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ from docutils.writers._html_base import HTMLTranslator
|
||||
from sphinx.errors import SphinxError
|
||||
from sphinx.locale import _, __
|
||||
from sphinx.util import logging
|
||||
from sphinx.util.parsing import inliner_parse_text, nested_parse_to_nodes
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
report_re = re.compile('^(.+?:(?:\\d+)?): \\((DEBUG|INFO|WARNING|ERROR|SEVERE)/(\\d+)?\\) ')
|
||||
@@ -426,6 +427,40 @@ class SphinxDirective(Directive):
|
||||
"""Get current location info for logging."""
|
||||
return ':'.join(str(s) for s in self.get_source_info())
|
||||
|
||||
def parse_content_to_nodes(self) -> list[Node]:
|
||||
"""Parse the directive's content into nodes."""
|
||||
return nested_parse_to_nodes(self.state, self.content, offset=self.content_offset)
|
||||
|
||||
def parse_text_to_nodes(self, text: str = '', /, *, offset: int = -1) -> list[Node]:
|
||||
"""Parse *text* into nodes.
|
||||
|
||||
:param text:
|
||||
Text, in string form. ``StringList`` is also accepted.
|
||||
:param offset:
|
||||
The offset of the content.
|
||||
"""
|
||||
if offset == -1:
|
||||
offset = self.content_offset
|
||||
return nested_parse_to_nodes(self.state, text, offset=offset)
|
||||
|
||||
def parse_inline(
|
||||
self, text: str, *, lineno: int = -1,
|
||||
) -> tuple[list[Node], list[system_message]]:
|
||||
"""Parse *text* as inline elements.
|
||||
|
||||
:param text:
|
||||
The text to parse, which should be a single line or paragraph.
|
||||
This cannot contain any structural elements (headings,
|
||||
transitions, directives, etc).
|
||||
:param lineno:
|
||||
The line number where the interpreted text begins.
|
||||
:returns:
|
||||
A list of nodes (text and inline elements) and a list of system_messages.
|
||||
"""
|
||||
if lineno == -1:
|
||||
lineno = self.lineno
|
||||
return inliner_parse_text(text, state=self.state, lineno=lineno)
|
||||
|
||||
|
||||
class SphinxRole:
|
||||
"""A base class for Sphinx roles.
|
||||
|
||||
@@ -13,13 +13,14 @@ from docutils.nodes import Node
|
||||
from sphinx import addnodes
|
||||
from sphinx.locale import __
|
||||
from sphinx.util import logging
|
||||
from sphinx.util.parsing import _fresh_title_style_context
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable, Iterator
|
||||
|
||||
from docutils.nodes import Element
|
||||
from docutils.parsers.rst import Directive
|
||||
from docutils.parsers.rst.states import Inliner
|
||||
from docutils.parsers.rst.states import Inliner, RSTState
|
||||
from docutils.statemachine import StringList
|
||||
|
||||
from sphinx.builders import Builder
|
||||
@@ -324,24 +325,20 @@ def traverse_translatable_index(
|
||||
yield node, entries
|
||||
|
||||
|
||||
def nested_parse_with_titles(state: Any, content: StringList, node: Node,
|
||||
def nested_parse_with_titles(state: RSTState, content: StringList, node: Node,
|
||||
content_offset: int = 0) -> str:
|
||||
"""Version of state.nested_parse() that allows titles and does not require
|
||||
titles to have the same decoration as the calling document.
|
||||
|
||||
This is useful when the parsed content comes from a completely different
|
||||
context, such as docstrings.
|
||||
|
||||
This function is retained for compatability and will be deprecated in
|
||||
Sphinx 8. Prefer ``parse_block_text()``.
|
||||
"""
|
||||
# hack around title style bookkeeping
|
||||
surrounding_title_styles = state.memo.title_styles
|
||||
surrounding_section_level = state.memo.section_level
|
||||
state.memo.title_styles = []
|
||||
state.memo.section_level = 0
|
||||
try:
|
||||
return state.nested_parse(content, content_offset, node, match_titles=1)
|
||||
finally:
|
||||
state.memo.title_styles = surrounding_title_styles
|
||||
state.memo.section_level = surrounding_section_level
|
||||
with _fresh_title_style_context(state):
|
||||
ret = state.nested_parse(content, content_offset, node, match_titles=True)
|
||||
return ret
|
||||
|
||||
|
||||
def clean_astext(node: Element) -> str:
|
||||
|
||||
97
sphinx/util/parsing.py
Normal file
97
sphinx/util/parsing.py
Normal file
@@ -0,0 +1,97 @@
|
||||
"""Docutils utility functions for parsing text."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from docutils import nodes
|
||||
from docutils.statemachine import StringList, string2lines
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterator
|
||||
|
||||
from docutils.parsers.rst.states import Inliner, RSTState, Struct
|
||||
|
||||
|
||||
def nested_parse_to_nodes(
|
||||
state: RSTState,
|
||||
text: str | StringList,
|
||||
*,
|
||||
source: str = '<generated text>',
|
||||
offset: int = 0,
|
||||
keep_title_context: bool = False,
|
||||
) -> list[nodes.Node]: # Element | nodes.Text
|
||||
"""Parse *text* into nodes.
|
||||
|
||||
:param state:
|
||||
The state machine state. Must be a subclass of ``RSTState``.
|
||||
:param text:
|
||||
Text, in string form. ``StringList`` is also accepted.
|
||||
:param source:
|
||||
The text's source, used when creating a new ``StringList``.
|
||||
:param offset:
|
||||
The offset of the content.
|
||||
:param keep_title_context:
|
||||
If this is False (the default), then *content* is parsed as if it were
|
||||
an independent document, meaning that title decorations (e.g. underlines)
|
||||
do not need to match the surrounding document.
|
||||
This is useful when the parsed content comes from
|
||||
a completely different context, such as docstrings.
|
||||
If this is True, then title underlines must match those in
|
||||
the surrounding document, otherwise errors will occur. TODO: check!
|
||||
"""
|
||||
document = state.document
|
||||
content = _text_to_string_list(
|
||||
text, source=source, tab_width=document.settings.tab_width,
|
||||
)
|
||||
node = nodes.Element() # Anonymous container for parsing
|
||||
node.document = document
|
||||
|
||||
if keep_title_context:
|
||||
state.nested_parse(content, offset, node, match_titles=True)
|
||||
else:
|
||||
with _fresh_title_style_context(state):
|
||||
state.nested_parse(content, offset, node, match_titles=True)
|
||||
return node.children
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _fresh_title_style_context(state: RSTState) -> Iterator[None]:
|
||||
# hack around title style bookkeeping
|
||||
memo = state.memo
|
||||
surrounding_title_styles: list[str | tuple[str, str]] = memo.title_styles
|
||||
surrounding_section_level: int = memo.section_level
|
||||
# clear current title styles
|
||||
memo.title_styles = []
|
||||
memo.section_level = 0
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
# reset title styles
|
||||
memo.title_styles = surrounding_title_styles
|
||||
memo.section_level = surrounding_section_level
|
||||
|
||||
|
||||
def inliner_parse_text(
|
||||
text: str, *, state: RSTState, lineno: int = 1,
|
||||
) -> tuple[list[nodes.Node], list[nodes.system_message]]:
|
||||
"""Parse *text* as inline nodes.
|
||||
|
||||
The text cannot contain any structural elements (headings, transitions,
|
||||
directives, etc), so should be a simple line or paragraph of text.
|
||||
"""
|
||||
inliner: Inliner = state.inliner
|
||||
memo: Struct = state.memo
|
||||
parent: nodes.Element = state.parent
|
||||
return inliner.parse(text, lineno, memo, parent)
|
||||
|
||||
|
||||
def _text_to_string_list(
|
||||
text: str | StringList, /, *, source: str, tab_width: int,
|
||||
) -> StringList:
|
||||
# Doesn't really belong in this module, but avoids circular imports.
|
||||
if isinstance(text, StringList):
|
||||
return text
|
||||
content = string2lines(text, tab_width, convert_whitespace=True)
|
||||
return StringList(content, source=source)
|
||||
139
tests/test_util/test_util_docutils_sphinx_directive.py
Normal file
139
tests/test_util/test_util_docutils_sphinx_directive.py
Normal file
@@ -0,0 +1,139 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from types import SimpleNamespace
|
||||
|
||||
from docutils import nodes
|
||||
from docutils.parsers.rst.languages import en as english # type: ignore[attr-defined]
|
||||
from docutils.parsers.rst.states import Inliner, RSTState, RSTStateMachine, state_classes
|
||||
from docutils.statemachine import StringList
|
||||
|
||||
from sphinx.util.docutils import SphinxDirective, new_document
|
||||
|
||||
|
||||
def make_directive(*, env: SimpleNamespace, input_lines: StringList | None = None) -> SphinxDirective:
|
||||
state, directive = make_directive_and_state(env=env, input_lines=input_lines)
|
||||
return directive
|
||||
|
||||
|
||||
def make_directive_and_state(*, env: SimpleNamespace, input_lines: StringList | None = None) -> tuple[RSTState, SphinxDirective]:
|
||||
sm = RSTStateMachine(state_classes, initial_state='Body')
|
||||
sm.reporter = object()
|
||||
if input_lines is not None:
|
||||
sm.input_lines = input_lines
|
||||
state = RSTState(sm)
|
||||
state.document = new_document('<tests>')
|
||||
state.document.settings.env = env
|
||||
state.document.settings.tab_width = 4
|
||||
state.document.settings.pep_references = None
|
||||
state.document.settings.rfc_references = None
|
||||
inliner = Inliner()
|
||||
inliner.init_customizations(state.document.settings)
|
||||
state.inliner = inliner
|
||||
state.parent = None
|
||||
state.memo = SimpleNamespace(
|
||||
document=state.document,
|
||||
language=english,
|
||||
inliner=state.inliner,
|
||||
reporter=state.document.reporter,
|
||||
section_level=0,
|
||||
title_styles=[],
|
||||
)
|
||||
directive = SphinxDirective(
|
||||
name='test_directive',
|
||||
arguments=[],
|
||||
options={},
|
||||
content=StringList(),
|
||||
lineno=0,
|
||||
content_offset=0,
|
||||
block_text='',
|
||||
state=state,
|
||||
state_machine=state.state_machine,
|
||||
)
|
||||
return state, directive
|
||||
|
||||
|
||||
def test_sphinx_directive_env():
|
||||
state, directive = make_directive_and_state(env=SimpleNamespace())
|
||||
|
||||
assert hasattr(directive, 'env')
|
||||
assert directive.env is state.document.settings.env
|
||||
|
||||
|
||||
def test_sphinx_directive_config():
|
||||
env = SimpleNamespace(config=object())
|
||||
state, directive = make_directive_and_state(env=env)
|
||||
|
||||
assert hasattr(directive, 'config')
|
||||
assert directive.config is directive.env.config
|
||||
assert directive.config is state.document.settings.env.config
|
||||
|
||||
|
||||
def test_sphinx_directive_get_source_info():
|
||||
env = SimpleNamespace()
|
||||
input_lines = StringList(['spam'], source='<source>')
|
||||
directive = make_directive(env=env, input_lines=input_lines)
|
||||
|
||||
assert directive.get_source_info() == ('<source>', 1)
|
||||
|
||||
|
||||
def test_sphinx_directive_set_source_info():
|
||||
env = SimpleNamespace()
|
||||
input_lines = StringList(['spam'], source='<source>')
|
||||
directive = make_directive(env=env, input_lines=input_lines)
|
||||
|
||||
node = nodes.Element()
|
||||
directive.set_source_info(node)
|
||||
assert node.source == '<source>'
|
||||
assert node.line == 1
|
||||
|
||||
|
||||
def test_sphinx_directive_get_location():
|
||||
env = SimpleNamespace()
|
||||
input_lines = StringList(['spam'], source='<source>')
|
||||
directive = make_directive(env=env, input_lines=input_lines)
|
||||
|
||||
assert directive.get_location() == '<source>:1'
|
||||
|
||||
|
||||
def test_sphinx_directive_parse_content_to_nodes():
|
||||
directive = make_directive(env=SimpleNamespace())
|
||||
content = 'spam\n====\n\nEggs! *Lobster thermidor.*'
|
||||
directive.content = StringList(content.split('\n'), source='<source>')
|
||||
|
||||
parsed = directive.parse_content_to_nodes()
|
||||
assert len(parsed) == 1
|
||||
node = parsed[0]
|
||||
assert isinstance(node, nodes.section)
|
||||
assert len(node.children) == 2
|
||||
assert isinstance(node.children[0], nodes.title)
|
||||
assert node.children[0].astext() == 'spam'
|
||||
assert isinstance(node.children[1], nodes.paragraph)
|
||||
assert node.children[1].astext() == 'Eggs! Lobster thermidor.'
|
||||
|
||||
|
||||
def test_sphinx_directive_parse_text_to_nodes():
|
||||
directive = make_directive(env=SimpleNamespace())
|
||||
content = 'spam\n====\n\nEggs! *Lobster thermidor.*'
|
||||
|
||||
parsed = directive.parse_text_to_nodes(content)
|
||||
assert len(parsed) == 1
|
||||
node = parsed[0]
|
||||
assert isinstance(node, nodes.section)
|
||||
assert len(node.children) == 2
|
||||
assert isinstance(node.children[0], nodes.title)
|
||||
assert node.children[0].astext() == 'spam'
|
||||
assert isinstance(node.children[1], nodes.paragraph)
|
||||
assert node.children[1].astext() == 'Eggs! Lobster thermidor.'
|
||||
|
||||
|
||||
def test_sphinx_directive_parse_inline():
|
||||
directive = make_directive(env=SimpleNamespace())
|
||||
content = 'Eggs! *Lobster thermidor.*'
|
||||
|
||||
parsed, messages = directive.parse_inline(content)
|
||||
assert len(parsed) == 2
|
||||
assert messages == []
|
||||
assert parsed[0] == nodes.Text('Eggs! ')
|
||||
assert isinstance(parsed[1], nodes.emphasis)
|
||||
assert parsed[1].rawsource == '*Lobster thermidor.*'
|
||||
assert parsed[1][0] == nodes.Text('Lobster thermidor.')
|
||||
Reference in New Issue
Block a user