2022-02-19 21:05:56 -06:00
|
|
|
|
"""Tests uti.nodes functions."""
|
2024-08-11 08:58:56 -05:00
|
|
|
|
|
2022-12-30 14:14:18 -06:00
|
|
|
|
from __future__ import annotations
|
|
|
|
|
|
2022-04-21 20:53:25 -05:00
|
|
|
|
import warnings
|
2014-09-21 10:17:02 -05:00
|
|
|
|
from textwrap import dedent
|
2024-09-25 02:57:11 -05:00
|
|
|
|
from typing import TYPE_CHECKING, Any
|
2014-09-21 10:17:02 -05:00
|
|
|
|
|
2018-02-19 07:39:14 -06:00
|
|
|
|
import pytest
|
2020-11-11 05:00:27 -06:00
|
|
|
|
from docutils import frontend, nodes
|
2014-09-21 10:17:02 -05:00
|
|
|
|
from docutils.parsers import rst
|
|
|
|
|
from docutils.utils import new_document
|
|
|
|
|
|
2015-08-16 05:20:45 -05:00
|
|
|
|
from sphinx.transforms import ApplySourceWorkaround
|
2023-01-07 12:31:15 -06:00
|
|
|
|
from sphinx.util.nodes import (
|
|
|
|
|
NodeMatcher,
|
|
|
|
|
apply_source_workaround,
|
|
|
|
|
clean_astext,
|
|
|
|
|
extract_messages,
|
|
|
|
|
make_id,
|
|
|
|
|
split_explicit_title,
|
|
|
|
|
)
|
2014-09-21 10:17:02 -05:00
|
|
|
|
|
2024-09-25 02:57:11 -05:00
|
|
|
|
if TYPE_CHECKING:
|
|
|
|
|
from docutils.nodes import document
|
2014-09-21 10:17:02 -05:00
|
|
|
|
|
2024-09-25 02:57:11 -05:00
|
|
|
|
|
|
|
|
|
def _transform(doctree) -> None:
|
2015-08-16 05:20:45 -05:00
|
|
|
|
ApplySourceWorkaround(doctree).apply()
|
|
|
|
|
|
|
|
|
|
|
2024-09-25 02:57:11 -05:00
|
|
|
|
def create_new_document() -> document:
|
2022-04-21 20:53:25 -05:00
|
|
|
|
with warnings.catch_warnings():
|
|
|
|
|
warnings.filterwarnings('ignore', category=DeprecationWarning)
|
|
|
|
|
# DeprecationWarning: The frontend.OptionParser class will be replaced
|
|
|
|
|
# by a subclass of argparse.ArgumentParser in Docutils 0.21 or later.
|
2024-08-11 08:58:56 -05:00
|
|
|
|
settings = frontend.OptionParser(components=(rst.Parser,)).get_default_values()
|
2020-01-01 09:09:32 -06:00
|
|
|
|
settings.id_prefix = 'id'
|
2014-09-21 10:17:02 -05:00
|
|
|
|
document = new_document('dummy.txt', settings)
|
2015-08-16 05:20:45 -05:00
|
|
|
|
return document
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _get_doctree(text):
|
|
|
|
|
document = create_new_document()
|
2014-09-21 10:17:02 -05:00
|
|
|
|
rst.Parser().parse(text, document)
|
2015-08-16 05:20:45 -05:00
|
|
|
|
_transform(document)
|
2014-09-21 10:17:02 -05:00
|
|
|
|
return document
|
|
|
|
|
|
|
|
|
|
|
2024-09-25 02:57:11 -05:00
|
|
|
|
def assert_node_count(messages, node_type, expect_count) -> None:
|
2014-09-21 10:17:02 -05:00
|
|
|
|
count = 0
|
|
|
|
|
node_list = [node for node, msg in messages]
|
|
|
|
|
for node in node_list:
|
|
|
|
|
if isinstance(node, node_type):
|
|
|
|
|
count += 1
|
|
|
|
|
|
|
|
|
|
assert count == expect_count, (
|
2024-08-11 08:58:56 -05:00
|
|
|
|
f'Count of {node_type!r} in the {node_list!r} '
|
|
|
|
|
f'is {count} instead of {expect_count}'
|
|
|
|
|
)
|
2014-09-21 10:17:02 -05:00
|
|
|
|
|
|
|
|
|
|
2018-08-25 02:17:44 -05:00
|
|
|
|
def test_NodeMatcher():
|
|
|
|
|
doctree = nodes.document(None, None)
|
|
|
|
|
doctree += nodes.paragraph('', 'Hello')
|
|
|
|
|
doctree += nodes.paragraph('', 'Sphinx', block=1)
|
|
|
|
|
doctree += nodes.paragraph('', 'World', block=2)
|
|
|
|
|
doctree += nodes.literal_block('', 'blah blah blah', block=3)
|
|
|
|
|
|
|
|
|
|
# search by node class
|
|
|
|
|
matcher = NodeMatcher(nodes.paragraph)
|
2022-01-02 10:05:46 -06:00
|
|
|
|
assert len(list(doctree.findall(matcher))) == 3
|
2018-08-25 02:17:44 -05:00
|
|
|
|
|
|
|
|
|
# search by multiple node classes
|
|
|
|
|
matcher = NodeMatcher(nodes.paragraph, nodes.literal_block)
|
2022-01-02 10:05:46 -06:00
|
|
|
|
assert len(list(doctree.findall(matcher))) == 4
|
2018-08-25 02:17:44 -05:00
|
|
|
|
|
|
|
|
|
# search by node attribute
|
|
|
|
|
matcher = NodeMatcher(block=1)
|
2022-01-02 10:05:46 -06:00
|
|
|
|
assert len(list(doctree.findall(matcher))) == 1
|
2018-08-25 02:17:44 -05:00
|
|
|
|
|
|
|
|
|
# search by node attribute (Any)
|
|
|
|
|
matcher = NodeMatcher(block=Any)
|
2022-01-02 10:05:46 -06:00
|
|
|
|
assert len(list(doctree.findall(matcher))) == 3
|
2018-08-25 02:17:44 -05:00
|
|
|
|
|
|
|
|
|
# search by both class and attribute
|
|
|
|
|
matcher = NodeMatcher(nodes.paragraph, block=Any)
|
2022-01-02 10:05:46 -06:00
|
|
|
|
assert len(list(doctree.findall(matcher))) == 2
|
2018-08-25 02:17:44 -05:00
|
|
|
|
|
|
|
|
|
# mismatched
|
|
|
|
|
matcher = NodeMatcher(nodes.title)
|
2022-01-02 10:05:46 -06:00
|
|
|
|
assert len(list(doctree.findall(matcher))) == 0
|
2018-08-25 02:17:44 -05:00
|
|
|
|
|
2018-11-24 10:16:17 -06:00
|
|
|
|
# search with Any does not match to Text node
|
|
|
|
|
matcher = NodeMatcher(blah=Any)
|
2022-01-02 10:05:46 -06:00
|
|
|
|
assert len(list(doctree.findall(matcher))) == 0
|
2018-11-24 10:16:17 -06:00
|
|
|
|
|
2018-08-25 02:17:44 -05:00
|
|
|
|
|
2017-01-06 09:46:26 -06:00
|
|
|
|
@pytest.mark.parametrize(
|
2023-07-28 01:02:40 -05:00
|
|
|
|
('rst', 'node_cls', 'count'),
|
2017-01-06 09:46:26 -06:00
|
|
|
|
[
|
|
|
|
|
(
|
|
|
|
|
"""
|
|
|
|
|
.. admonition:: admonition title
|
|
|
|
|
|
|
|
|
|
admonition body
|
|
|
|
|
""",
|
2024-08-11 08:58:56 -05:00
|
|
|
|
nodes.title,
|
|
|
|
|
1,
|
2017-01-06 09:46:26 -06:00
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"""
|
|
|
|
|
.. figure:: foo.jpg
|
|
|
|
|
|
|
|
|
|
this is title
|
|
|
|
|
""",
|
2024-08-11 08:58:56 -05:00
|
|
|
|
nodes.caption,
|
|
|
|
|
1,
|
2017-01-06 09:46:26 -06:00
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"""
|
|
|
|
|
.. rubric:: spam
|
|
|
|
|
""",
|
2024-08-11 08:58:56 -05:00
|
|
|
|
nodes.rubric,
|
|
|
|
|
1,
|
2017-01-06 09:46:26 -06:00
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"""
|
|
|
|
|
| spam
|
|
|
|
|
| egg
|
|
|
|
|
""",
|
2024-08-11 08:58:56 -05:00
|
|
|
|
nodes.line,
|
|
|
|
|
2,
|
2017-01-06 09:46:26 -06:00
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"""
|
|
|
|
|
section
|
|
|
|
|
=======
|
|
|
|
|
|
|
|
|
|
+----------------+
|
|
|
|
|
| | **Title 1** |
|
|
|
|
|
| | Message 1 |
|
|
|
|
|
+----------------+
|
|
|
|
|
""",
|
2024-08-11 08:58:56 -05:00
|
|
|
|
nodes.line,
|
|
|
|
|
2,
|
2017-01-06 09:46:26 -06:00
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"""
|
|
|
|
|
* | **Title 1**
|
|
|
|
|
| Message 1
|
|
|
|
|
""",
|
2024-08-11 08:58:56 -05:00
|
|
|
|
nodes.line,
|
|
|
|
|
2,
|
2017-01-06 09:46:26 -06:00
|
|
|
|
),
|
2023-02-17 16:11:14 -06:00
|
|
|
|
],
|
2017-01-06 09:46:26 -06:00
|
|
|
|
)
|
|
|
|
|
def test_extract_messages(rst, node_cls, count):
|
|
|
|
|
msg = extract_messages(_get_doctree(dedent(rst)))
|
|
|
|
|
assert_node_count(msg, node_cls, count)
|
2015-08-16 05:20:45 -05:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_extract_messages_without_rawsource():
|
2025-01-14 09:55:02 -06:00
|
|
|
|
"""Check node.rawsource is fall-backed by using node.astext() value.
|
2015-08-16 05:20:45 -05:00
|
|
|
|
|
2015-11-29 07:23:56 -06:00
|
|
|
|
`extract_message` which is used from Sphinx i18n feature drop ``not node.rawsource``
|
|
|
|
|
nodes. So, all nodes which want to translate must have ``rawsource`` value.
|
2015-08-16 05:20:45 -05:00
|
|
|
|
However, sometimes node.rawsource is not set.
|
|
|
|
|
|
|
|
|
|
For example: recommonmark-0.2.0 doesn't set rawsource to `paragraph` node.
|
|
|
|
|
|
|
|
|
|
refs #1994: Fall back to node's astext() during i18n message extraction.
|
|
|
|
|
"""
|
|
|
|
|
p = nodes.paragraph()
|
|
|
|
|
p.append(nodes.Text('test'))
|
|
|
|
|
p.append(nodes.Text('sentence'))
|
|
|
|
|
assert not p.rawsource # target node must not have rawsource value
|
|
|
|
|
document = create_new_document()
|
|
|
|
|
document.append(p)
|
|
|
|
|
_transform(document)
|
|
|
|
|
assert_node_count(extract_messages(document), nodes.TextElement, 1)
|
2024-11-30 13:19:34 -06:00
|
|
|
|
assert next(m for n, m in extract_messages(document)), 'text sentence'
|
2016-12-17 02:28:19 -06:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_clean_astext():
|
|
|
|
|
node = nodes.paragraph(text='hello world')
|
2022-12-29 18:06:11 -06:00
|
|
|
|
assert clean_astext(node) == 'hello world'
|
2016-12-17 02:28:19 -06:00
|
|
|
|
|
|
|
|
|
node = nodes.image(alt='hello world')
|
2022-12-29 18:06:11 -06:00
|
|
|
|
assert clean_astext(node) == ''
|
2016-12-17 02:28:19 -06:00
|
|
|
|
|
|
|
|
|
node = nodes.paragraph(text='hello world')
|
|
|
|
|
node += nodes.raw('', 'raw text', format='html')
|
2022-12-29 18:06:11 -06:00
|
|
|
|
assert clean_astext(node) == 'hello world'
|
2019-05-16 09:14:57 -05:00
|
|
|
|
|
|
|
|
|
|
2020-03-20 05:34:19 -05:00
|
|
|
|
@pytest.mark.parametrize(
|
2023-07-28 01:02:40 -05:00
|
|
|
|
('prefix', 'term', 'expected'),
|
2020-03-20 05:34:19 -05:00
|
|
|
|
[
|
|
|
|
|
('', '', 'id0'),
|
|
|
|
|
('term', '', 'term-0'),
|
2020-03-24 11:43:23 -05:00
|
|
|
|
('term', 'Sphinx', 'term-Sphinx'),
|
2024-08-11 08:58:56 -05:00
|
|
|
|
('', 'io.StringIO', 'io.StringIO'), # contains a dot
|
|
|
|
|
(
|
|
|
|
|
# contains a dot & underscore
|
|
|
|
|
'',
|
|
|
|
|
'sphinx.setup_command',
|
|
|
|
|
'sphinx.setup_command',
|
|
|
|
|
),
|
2020-03-24 11:43:23 -05:00
|
|
|
|
('', '_io.StringIO', 'io.StringIO'), # starts with underscore
|
2020-03-20 05:34:19 -05:00
|
|
|
|
('', 'sphinx', 'sphinx'), # alphabets in unicode fullwidth characters
|
|
|
|
|
('', '悠好', 'id0'), # multibytes text (in Chinese)
|
2020-03-24 11:43:23 -05:00
|
|
|
|
('', 'Hello=悠好=こんにちは', 'Hello'), # alphabets and multibytes text
|
2020-03-20 05:34:19 -05:00
|
|
|
|
('', 'fünf', 'funf'), # latin1 (umlaut)
|
|
|
|
|
('', '0sphinx', 'sphinx'), # starts with number
|
|
|
|
|
('', 'sphinx-', 'sphinx'), # ends with hyphen
|
2024-08-11 08:58:56 -05:00
|
|
|
|
],
|
|
|
|
|
)
|
2024-08-12 16:34:03 -05:00
|
|
|
|
@pytest.mark.sphinx('html', testroot='root')
|
2020-03-20 05:34:19 -05:00
|
|
|
|
def test_make_id(app, prefix, term, expected):
|
|
|
|
|
document = create_new_document()
|
|
|
|
|
assert make_id(app.env, document, prefix, term) == expected
|
|
|
|
|
|
|
|
|
|
|
2024-08-12 16:34:03 -05:00
|
|
|
|
@pytest.mark.sphinx('html', testroot='root')
|
2020-03-20 05:34:19 -05:00
|
|
|
|
def test_make_id_already_registered(app):
|
2020-01-01 09:09:32 -06:00
|
|
|
|
document = create_new_document()
|
2020-03-24 11:43:23 -05:00
|
|
|
|
document.ids['term-Sphinx'] = True # register "term-Sphinx" manually
|
2020-03-20 05:34:19 -05:00
|
|
|
|
assert make_id(app.env, document, 'term', 'Sphinx') == 'term-0'
|
2020-01-01 09:09:32 -06:00
|
|
|
|
|
|
|
|
|
|
2024-08-12 16:34:03 -05:00
|
|
|
|
@pytest.mark.sphinx('html', testroot='root')
|
2020-03-20 05:34:19 -05:00
|
|
|
|
def test_make_id_sequential(app):
|
|
|
|
|
document = create_new_document()
|
|
|
|
|
document.ids['term-0'] = True
|
|
|
|
|
assert make_id(app.env, document, 'term') == 'term-1'
|
2020-01-01 09:09:32 -06:00
|
|
|
|
|
|
|
|
|
|
2019-05-16 09:14:57 -05:00
|
|
|
|
@pytest.mark.parametrize(
|
2023-07-28 01:02:40 -05:00
|
|
|
|
('title', 'expected'),
|
2019-05-16 09:14:57 -05:00
|
|
|
|
[
|
|
|
|
|
# implicit
|
|
|
|
|
('hello', (False, 'hello', 'hello')),
|
|
|
|
|
# explicit
|
|
|
|
|
('hello <world>', (True, 'hello', 'world')),
|
|
|
|
|
# explicit (title having angle brackets)
|
|
|
|
|
('hello <world> <sphinx>', (True, 'hello <world>', 'sphinx')),
|
2023-02-17 16:11:14 -06:00
|
|
|
|
],
|
2019-05-16 09:14:57 -05:00
|
|
|
|
)
|
|
|
|
|
def test_split_explicit_target(title, expected):
|
2024-08-11 08:58:56 -05:00
|
|
|
|
assert split_explicit_title(title) == expected
|
2023-01-05 10:56:27 -06:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_apply_source_workaround_literal_block_no_source():
|
|
|
|
|
"""Regression test for #11091.
|
|
|
|
|
|
2024-01-14 15:13:46 -06:00
|
|
|
|
Test that apply_source_workaround doesn't raise.
|
|
|
|
|
"""
|
2023-01-05 10:56:27 -06:00
|
|
|
|
literal_block = nodes.literal_block('', '')
|
|
|
|
|
list_item = nodes.list_item('', literal_block)
|
|
|
|
|
bullet_list = nodes.bullet_list('', list_item)
|
|
|
|
|
|
|
|
|
|
assert literal_block.source is None
|
|
|
|
|
assert list_item.source is None
|
|
|
|
|
assert bullet_list.source is None
|
|
|
|
|
|
|
|
|
|
apply_source_workaround(literal_block)
|
|
|
|
|
|
|
|
|
|
assert literal_block.source is None
|
|
|
|
|
assert list_item.source is None
|
|
|
|
|
assert bullet_list.source is None
|