Bump Ruff to 0.7.2

This commit is contained in:
Adam Turner 2024-11-03 02:50:31 +00:00
parent db1a190c58
commit 1094556afb
48 changed files with 150 additions and 238 deletions

View File

@ -81,7 +81,7 @@ docs = [
]
lint = [
"flake8>=6.0",
"ruff==0.7.0",
"ruff==0.7.2",
"mypy==1.13.0",
"sphinx-lint>=0.9",
"types-colorama==0.4.15.20240311",

View File

@ -170,7 +170,7 @@ class _RootArgumentParser(argparse.ArgumentParser):
def error(self, message: str) -> NoReturn:
sys.stderr.write(
__(
'{0}: error: {1}\n' "Run '{0} --help' for information" # NoQA: COM812
"{0}: error: {1}\nRun '{0} --help' for information" # NoQA: COM812
).format(self.prog, message)
)
raise SystemExit(2)

View File

@ -203,7 +203,7 @@ class Builder:
image_uri = images.get_original_image_uri(node['uri'])
if mimetypes:
logger.warning(
__('a suitable image for %s builder not found: ' '%s (%s)'),
__('a suitable image for %s builder not found: %s (%s)'),
self.name,
mimetypes,
image_uri,

View File

@ -166,10 +166,7 @@ class LaTeXBuilder(Builder):
docname = entry[0]
if docname not in self.env.all_docs:
logger.warning(
__(
'"latex_documents" config value references unknown '
'document %s'
),
__('"latex_documents" config value references unknown document %s'),
docname,
)
continue

View File

@ -44,10 +44,7 @@ class ManualPageBuilder(Builder):
def init(self) -> None:
if not self.config.man_pages:
logger.warning(
__(
'no "man_pages" config value found; no manual pages '
'will be written'
)
__('no "man_pages" config value found; no manual pages will be written')
)
def get_outdated_docs(self) -> str | list[str]:
@ -73,7 +70,7 @@ class ManualPageBuilder(Builder):
docname, name, description, authors, section = info
if docname not in self.env.all_docs:
logger.warning(
__('"man_pages" config value references unknown ' 'document %s'),
__('"man_pages" config value references unknown document %s'),
docname,
)
continue

View File

@ -217,14 +217,14 @@ files can be built by specifying individual filenames.
'-a',
action='store_true',
dest='force_all',
help=__('write all files (default: only write new and ' 'changed files)'),
help=__('write all files (default: only write new and changed files)'),
)
group.add_argument(
'--fresh-env',
'-E',
action='store_true',
dest='freshenv',
help=__("don't use a saved environment, always read " 'all files'),
help=__("don't use a saved environment, always read all files"),
)
group = parser.add_argument_group(__('path options'))
@ -243,9 +243,7 @@ files can be built by specifying individual filenames.
'-c',
metavar='PATH',
dest='confdir',
help=__(
'directory for the configuration file (conf.py) ' '(default: SOURCE_DIR)'
),
help=__('directory for the configuration file (conf.py) (default: SOURCE_DIR)'),
)
group = parser.add_argument_group('build configuration options')

View File

@ -49,7 +49,7 @@ BUILDERS = [
(
'',
'doctest',
'to run all doctests embedded in the documentation ' '(if enabled)',
'to run all doctests embedded in the documentation (if enabled)',
),
('', 'coverage', 'to run coverage check of the documentation (if enabled)'),
('', 'clean', 'to remove everything in the build directory'),

View File

@ -507,7 +507,7 @@ def generate(
end='',
)
if d['makefile'] or d['batchfile']:
print(__('Use the Makefile to build the docs, like so:\n' ' make builder'))
print(__('Use the Makefile to build the docs, like so:\n make builder'))
else:
print(
__(

View File

@ -837,7 +837,7 @@ class CDomain(Domain):
objectType = symbol.declaration.objectType
docname = symbol.docname
newestId = symbol.declaration.get_newest_id()
yield (name, dispname, objectType, docname, newestId, 1)
yield name, dispname, objectType, docname, newestId, 1
def setup(app: Sphinx) -> ExtensionMetadata:

View File

@ -465,7 +465,7 @@ class DefinitionParser(BaseParser):
brackets = {'(': ')', '{': '}', '[': ']'}
symbols: list[str] = []
while not self.eof:
if (len(symbols) == 0 and self.current_char in end):
if len(symbols) == 0 and self.current_char in end:
break
if self.current_char in brackets:
symbols.append(brackets[self.current_char])

View File

@ -1142,7 +1142,7 @@ class CPPDomain(Domain):
objectType = symbol.declaration.objectType
docname = symbol.docname
newestId = symbol.declaration.get_newest_id()
yield (name, dispname, objectType, docname, newestId, 1)
yield name, dispname, objectType, docname, newestId, 1
def get_full_qualified_name(self, node: Element) -> str | None:
target = node.get('reftarget', None)

View File

@ -795,7 +795,7 @@ class DefinitionParser(BaseParser):
brackets = {'(': ')', '{': '}', '[': ']', '<': '>'}
symbols: list[str] = []
while not self.eof:
if (len(symbols) == 0 and self.current_char in end):
if len(symbols) == 0 and self.current_char in end:
break
if self.current_char in brackets:
symbols.append(brackets[self.current_char])

View File

@ -927,14 +927,14 @@ class PythonDomain(Domain):
def get_objects(self) -> Iterator[tuple[str, str, str, str, str, int]]:
for modname, mod in self.modules.items():
yield (modname, modname, 'module', mod.docname, mod.node_id, 0)
yield modname, modname, 'module', mod.docname, mod.node_id, 0
for refname, obj in self.objects.items():
if obj.objtype != 'module': # modules are already handled
if obj.aliased:
# aliased names are not full-text searchable.
yield (refname, refname, obj.objtype, obj.docname, obj.node_id, -1)
yield refname, refname, obj.objtype, obj.docname, obj.node_id, -1
else:
yield (refname, refname, obj.objtype, obj.docname, obj.node_id, 1)
yield refname, refname, obj.objtype, obj.docname, obj.node_id, 1
def get_full_qualified_name(self, node: Element) -> str | None:
modname = node.get('py:module')

View File

@ -99,15 +99,15 @@ def parse_directive(d: str) -> tuple[str, str]:
dir = d.strip()
if not dir.startswith('.'):
# Assume it is a directive without syntax
return (dir, '')
return dir, ''
m = dir_sig_re.match(dir)
if not m:
return (dir, '')
return dir, ''
parsed_dir, parsed_args = m.groups()
if parsed_args.strip():
return (parsed_dir.strip(), ' ' + parsed_args.strip())
return parsed_dir.strip(), ' ' + parsed_args.strip()
else:
return (parsed_dir.strip(), '')
return parsed_dir.strip(), ''
class ReSTDirective(ReSTMarkup):

View File

@ -1089,23 +1089,23 @@ class StandardDomain(Domain):
def get_objects(self) -> Iterator[tuple[str, str, str, str, str, int]]:
# handle the special 'doc' reference here
for doc in self.env.all_docs:
yield (doc, clean_astext(self.env.titles[doc]), 'doc', doc, '', -1)
yield doc, clean_astext(self.env.titles[doc]), 'doc', doc, '', -1
for (prog, option), info in self.progoptions.items():
if prog:
fullname = f'{prog}.{option}'
yield (fullname, fullname, 'cmdoption', info[0], info[1], 1)
yield fullname, fullname, 'cmdoption', info[0], info[1], 1
else:
yield (option, option, 'cmdoption', info[0], info[1], 1)
yield option, option, 'cmdoption', info[0], info[1], 1
for (type, name), info in self.objects.items():
yield (name, name, type, info[0], info[1],
self.object_types[type].attrs['searchprio'])
for name, (docname, labelid, sectionname) in self.labels.items():
yield (name, sectionname, 'label', docname, labelid, -1)
yield name, sectionname, 'label', docname, labelid, -1
# add anonymous-only labels as well
non_anon_labels = set(self.labels)
for name, (docname, labelid) in self.anonlabels.items():
if name not in non_anon_labels:
yield (name, name, 'label', docname, labelid, -1)
yield name, name, 'label', docname, labelid, -1
def get_type_name(self, type: ObjType, primary: bool = False) -> str:
# never prepend "Default"

View File

@ -315,7 +315,7 @@ def _toctree_entry(
else:
if ref in parents:
logger.warning(
__('circular toctree references ' 'detected, ignoring: %s <- %s'),
__('circular toctree references detected, ignoring: %s <- %s'),
ref,
' <- '.join(parents),
location=ref,

View File

@ -67,7 +67,7 @@ def _filter_enum_dict(
def query(name: str, defining_class: type) -> tuple[str, type, Any] | None:
value = attrgetter(enum_class, name, sentinel)
if value is not sentinel:
return (name, defining_class, value)
return name, defining_class, value
return None
# attributes defined on a parent type, possibly shadowed later by

View File

@ -272,7 +272,7 @@ def _fetch_inventory_group(
else:
issues = '\n'.join(f[0] % f[1:] for f in failures)
LOGGER.warning(
__('failed to reach any of the inventories ' 'with the following issues:')
__('failed to reach any of the inventories with the following issues:')
+ '\n'
+ issues
)

View File

@ -516,9 +516,9 @@ class IntersphinxRole(SphinxRole):
return None
if domain and self.is_existent_role(domain, role):
return (domain, role)
return domain, role
elif self.is_existent_role('std', role):
return ('std', role)
return 'std', role
else:
return None

View File

@ -311,7 +311,7 @@ def collect_pages(app: Sphinx) -> Iterator[tuple[str, dict[str, Any], str]]:
'body': (_('<h1>Source code for %s</h1>') % modname +
'\n'.join(lines)),
}
yield (pagename, context, 'page.html')
yield pagename, context, 'page.html'
if not modnames:
return
@ -339,7 +339,7 @@ def collect_pages(app: Sphinx) -> Iterator[tuple[str, dict[str, Any], str]]:
''.join(html)),
}
yield (posixpath.join(OUTPUT_DIRNAME, 'index'), context, 'page.html')
yield posixpath.join(OUTPUT_DIRNAME, 'index'), context, 'page.html'
def setup(app: Sphinx) -> ExtensionMetadata:

View File

@ -13,7 +13,7 @@ if TYPE_CHECKING:
from collections.abc import Callable
warnings.warn(
"'sphinx.testing.path' is deprecated. " "Use 'os.path' or 'pathlib' instead.",
"'sphinx.testing.path' is deprecated. Use 'os.path' or 'pathlib' instead.",
RemovedInSphinx90Warning,
stacklevel=2,
)

View File

@ -40,9 +40,9 @@ def assert_node(node: Node, cls: Any = None, xpath: str = '', **kwargs: Any) ->
assert (
isinstance(node, nodes.Element)
), f'The node{xpath} does not have any children' # fmt: skip
assert (
len(node) == 1
), f'The node{xpath} has {len(node)} child nodes, not one'
assert len(node) == 1, (
f'The node{xpath} has {len(node)} child nodes, not one'
)
assert_node(node[0], cls[1:], xpath=xpath + '[0]', **kwargs)
elif isinstance(cls, tuple):
assert (
@ -71,9 +71,9 @@ def assert_node(node: Node, cls: Any = None, xpath: str = '', **kwargs: Any) ->
if (key := key.replace('_', '-')) not in node:
msg = f'The node{xpath} does not have {key!r} attribute: {node!r}'
raise AssertionError(msg)
assert (
node[key] == value
), f'The node{xpath}[{key}] is not {value!r}: {node[key]!r}'
assert node[key] == value, (
f'The node{xpath}[{key}] is not {value!r}: {node[key]!r}'
)
# keep this to restrict the API usage and to have a correct return type

View File

@ -132,7 +132,7 @@ class Field:
]
def make_entry(self, fieldarg: str, content: list[Node]) -> tuple[str, list[Node]]:
return (fieldarg, content)
return fieldarg, content
def make_field(
self,

View File

@ -716,7 +716,7 @@ class SphinxTranslator(nodes.NodeVisitor):
3. ``self.unknown_visit()``
"""
for node_class in node.__class__.__mro__:
method = getattr(self, 'visit_%s' % (node_class.__name__), None)
method = getattr(self, 'visit_%s' % node_class.__name__, None)
if method:
method(node)
break
@ -733,7 +733,7 @@ class SphinxTranslator(nodes.NodeVisitor):
3. ``self.unknown_departure()``
"""
for node_class in node.__class__.__mro__:
method = getattr(self, 'depart_%s' % (node_class.__name__), None)
method = getattr(self, 'depart_%s' % node_class.__name__, None)
if method:
method(node)
break

View File

@ -327,9 +327,9 @@ class HTML5Translator(SphinxTranslator, BaseTranslator): # type: ignore[misc]
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = True
else:
assert (
'refid' in node
), 'References must have "refuri" or "refid" attribute.'
assert 'refid' in node, (
'References must have "refuri" or "refid" attribute.'
)
atts['href'] = '#' + node['refid']
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image) # NoQA: PT018
@ -379,7 +379,7 @@ class HTML5Translator(SphinxTranslator, BaseTranslator): # type: ignore[misc]
if isinstance(node.parent, nodes.section):
if self.builder.name == 'singlehtml':
docname = self.docnames[-1]
anchorname = f"{docname}/#{node.parent['ids'][0]}"
anchorname = f'{docname}/#{node.parent["ids"][0]}'
if anchorname not in self.builder.secnumbers:
# try first heading which has no anchor
anchorname = f'{docname}/'

View File

@ -2452,7 +2452,7 @@ class LaTeXTranslator(SphinxTranslator):
def visit_math_block(self, node: Element) -> None:
if node.get('label'):
label = f"equation:{node['docname']}:{node['label']}"
label = f'equation:{node["docname"]}:{node["label"]}'
else:
label = None
@ -2469,7 +2469,7 @@ class LaTeXTranslator(SphinxTranslator):
raise nodes.SkipNode
def visit_math_reference(self, node: Element) -> None:
label = f"equation:{node['docname']}:{node['target']}"
label = f'equation:{node["docname"]}:{node["target"]}'
eqref_format = self.config.math_eqref_format
if eqref_format:
try:

View File

@ -117,7 +117,7 @@ class ManualPageTranslator(SphinxTranslator, BaseTranslator): # type: ignore[mi
' "%(date)s" "%(version)s" "%(manual_group)s"\n'
)
if self._docinfo['subtitle']:
tmpl += '.SH NAME\n' '%(title)s \\- %(subtitle)s\n'
tmpl += '.SH NAME\n%(title)s \\- %(subtitle)s\n'
return tmpl % self._docinfo
def visit_start_of_file(self, node: Element) -> None:

View File

@ -250,9 +250,10 @@ class TexinfoTranslator(SphinxTranslator):
'(%s)' % elements['filename'],
self.escape_arg(self.settings.texinfo_dir_description),
)
elements['direntry'] = (
'@dircategory %s\n' '@direntry\n' '%s' '@end direntry\n'
) % (self.escape_id(self.settings.texinfo_dir_category), entry)
elements['direntry'] = '@dircategory %s\n@direntry\n%s@end direntry\n' % (
self.escape_id(self.settings.texinfo_dir_category),
entry,
)
elements['copying'] = COPYING % elements
# allow the user to override them all
elements.update(self.settings.texinfo_elements)
@ -448,10 +449,10 @@ class TexinfoTranslator(SphinxTranslator):
for subentry in entries:
_add_detailed_menu(subentry)
self.body.append('\n@detailmenu\n' ' --- The Detailed Node Listing ---\n')
self.body.append('\n@detailmenu\n --- The Detailed Node Listing ---\n')
for entry in entries:
_add_detailed_menu(entry)
self.body.append('\n@end detailmenu\n' '@end menu\n')
self.body.append('\n@end detailmenu\n@end menu\n')
def tex_image_length(self, width_str: str) -> str:
match = re.match(r'(\d*\.?\d*)\s*(\S*)', width_str)
@ -1119,7 +1120,7 @@ class TexinfoTranslator(SphinxTranslator):
def depart_admonition(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end quotation\n' '@end cartouche\n')
self.body.append('@end quotation\n@end cartouche\n')
visit_attention = _visit_named_admonition
depart_attention = depart_admonition
@ -1236,7 +1237,7 @@ class TexinfoTranslator(SphinxTranslator):
width = self.tex_image_length(node.get('width', ''))
height = self.tex_image_length(node.get('height', ''))
alt = self.escape_arg(node.get('alt', ''))
filename = f"{self.elements['filename'][:-5]}-figures/{name}" # type: ignore[index]
filename = f'{self.elements["filename"][:-5]}-figures/{name}' # type: ignore[index]
self.body.append(f'\n@image{{{filename},{width},{height},{alt},{ext[1:]}}}\n')
def depart_image(self, node: Element) -> None:
@ -1280,7 +1281,7 @@ class TexinfoTranslator(SphinxTranslator):
def visit_system_message(self, node: Element) -> None:
self.body.append(
'\n@verbatim\n' '<SYSTEM MESSAGE: %s>\n' '@end verbatim\n' % node.astext()
'\n@verbatim\n<SYSTEM MESSAGE: %s>\n@end verbatim\n' % node.astext()
)
raise nodes.SkipNode

View File

@ -300,9 +300,7 @@ def test_html_raw_directive(app):
[
(".//link[@href='_static/persistent.css'][@rel='stylesheet']", '', True),
(
".//link[@href='_static/default.css']"
"[@rel='stylesheet']"
"[@title='Default']",
".//link[@href='_static/default.css'][@rel='stylesheet'][@title='Default']",
'',
True,
),
@ -338,8 +336,7 @@ def test_html_raw_directive(app):
True,
),
(
".//link[@href='_static/more_alternate2.css']"
"[@rel='alternate stylesheet']",
".//link[@href='_static/more_alternate2.css'][@rel='alternate stylesheet']",
'',
True,
),

View File

@ -162,8 +162,7 @@ def tail_check(check: str) -> Callable[[Iterable[Element]], Literal[True]]:
),
(
'markup.html',
".//a[@href='#with']"
"[@class='reference internal']/code/span[@class='pre']",
".//a[@href='#with'][@class='reference internal']/code/span[@class='pre']",
'^with$',
),
(

View File

@ -389,8 +389,7 @@ def test_numref(app):
print(app.status.getvalue())
print(app.warning.getvalue())
assert (
'\\hyperref[\\detokenize{index:fig1}]'
'{Fig.\\@ \\ref{\\detokenize{index:fig1}}}'
'\\hyperref[\\detokenize{index:fig1}]{Fig.\\@ \\ref{\\detokenize{index:fig1}}}'
) in result
assert (
'\\hyperref[\\detokenize{baz:fig22}]{Figure\\ref{\\detokenize{baz:fig22}}}'
@ -400,8 +399,7 @@ def test_numref(app):
'{Table \\ref{\\detokenize{index:table-1}}}'
) in result
assert (
'\\hyperref[\\detokenize{baz:table22}]'
'{Table:\\ref{\\detokenize{baz:table22}}}'
'\\hyperref[\\detokenize{baz:table22}]{Table:\\ref{\\detokenize{baz:table22}}}'
) in result
assert (
'\\hyperref[\\detokenize{index:code-1}]'
@ -462,8 +460,7 @@ def test_numref_with_prefix1(app):
assert '\\ref{\\detokenize{index:code-1}}' in result
assert '\\ref{\\detokenize{baz:code22}}' in result
assert (
'\\hyperref[\\detokenize{index:fig1}]'
'{Figure:\\ref{\\detokenize{index:fig1}}}'
'\\hyperref[\\detokenize{index:fig1}]{Figure:\\ref{\\detokenize{index:fig1}}}'
) in result
assert (
'\\hyperref[\\detokenize{baz:fig22}]{Figure\\ref{\\detokenize{baz:fig22}}}'
@ -473,8 +470,7 @@ def test_numref_with_prefix1(app):
'{Tab\\_\\ref{\\detokenize{index:table-1}}}'
) in result
assert (
'\\hyperref[\\detokenize{baz:table22}]'
'{Table:\\ref{\\detokenize{baz:table22}}}'
'\\hyperref[\\detokenize{baz:table22}]{Table:\\ref{\\detokenize{baz:table22}}}'
) in result
assert (
'\\hyperref[\\detokenize{index:code-1}]'
@ -540,8 +536,7 @@ def test_numref_with_prefix2(app):
'{Tab\\_\\ref{\\detokenize{index:table-1}}:}'
) in result
assert (
'\\hyperref[\\detokenize{baz:table22}]'
'{Table:\\ref{\\detokenize{baz:table22}}}'
'\\hyperref[\\detokenize{baz:table22}]{Table:\\ref{\\detokenize{baz:table22}}}'
) in result
assert (
'\\hyperref[\\detokenize{index:code-1}]{Code\\sphinxhyphen{}\\ref{\\detokenize{index:code-1}} '
@ -552,8 +547,7 @@ def test_numref_with_prefix2(app):
'{Code\\sphinxhyphen{}\\ref{\\detokenize{baz:code22}}}'
) in result
assert (
'\\hyperref[\\detokenize{foo:foo}]'
'{SECTION\\_\\ref{\\detokenize{foo:foo}}\\_}'
'\\hyperref[\\detokenize{foo:foo}]{SECTION\\_\\ref{\\detokenize{foo:foo}}\\_}'
) in result
assert (
'\\hyperref[\\detokenize{bar:bar-a}]'
@ -590,8 +584,7 @@ def test_numref_with_language_ja(app):
print(app.status.getvalue())
print(app.warning.getvalue())
assert (
'\\hyperref[\\detokenize{index:fig1}]'
'{\u56f3 \\ref{\\detokenize{index:fig1}}}'
'\\hyperref[\\detokenize{index:fig1}]{\u56f3 \\ref{\\detokenize{index:fig1}}}'
) in result
assert (
'\\hyperref[\\detokenize{baz:fig22}]{Figure\\ref{\\detokenize{baz:fig22}}}'
@ -601,8 +594,7 @@ def test_numref_with_language_ja(app):
'{\u8868 \\ref{\\detokenize{index:table-1}}}'
) in result
assert (
'\\hyperref[\\detokenize{baz:table22}]'
'{Table:\\ref{\\detokenize{baz:table22}}}'
'\\hyperref[\\detokenize{baz:table22}]{Table:\\ref{\\detokenize{baz:table22}}}'
) in result
assert (
'\\hyperref[\\detokenize{index:code-1}]'
@ -937,8 +929,7 @@ def test_footnote(app):
'numbered\n%\n\\end{footnote}'
) in result
assert (
'\\begin{footnote}[2]\\sphinxAtStartFootnote\nauto numbered\n%\n'
'\\end{footnote}'
'\\begin{footnote}[2]\\sphinxAtStartFootnote\nauto numbered\n%\n\\end{footnote}'
) in result
assert (
'\\begin{footnote}[3]\\sphinxAtStartFootnote\nnamed\n%\n\\end{footnote}'
@ -1880,8 +1871,7 @@ def test_latex_nested_enumerated_list(app):
result = (app.outdir / 'projectnamenotset.tex').read_text(encoding='utf8')
assert (
'\\sphinxsetlistlabels{\\arabic}{enumi}{enumii}{}{.}%\n'
'\\setcounter{enumi}{4}\n'
'\\sphinxsetlistlabels{\\arabic}{enumi}{enumii}{}{.}%\n\\setcounter{enumi}{4}\n'
) in result
assert (
'\\sphinxsetlistlabels{\\alph}{enumii}{enumiii}{}{.}%\n'
@ -2201,9 +2191,9 @@ def test_duplicated_labels_before_module(app):
):
tex_label_name = 'index:' + rst_label_name.replace('_', '-')
tex_label_code = r'\phantomsection\label{\detokenize{%s}}' % tex_label_name
assert (
content.count(tex_label_code) == 1
), f'duplicated label: {tex_label_name!r}'
assert content.count(tex_label_code) == 1, (
f'duplicated label: {tex_label_name!r}'
)
tested_labels.add(tex_label_code)
# ensure that we did not forget any label to check

View File

@ -44,14 +44,7 @@ def test_lineblock(app):
# regression test for #1109: need empty line after line block
app.build()
result = (app.outdir / 'lineblock.txt').read_text(encoding='utf8')
expect = (
'* one\n'
'\n'
' line-block 1\n'
' line-block 2\n'
'\n'
'followed paragraph.\n'
)
expect = '* one\n\n line-block 1\n line-block 2\n\nfollowed paragraph.\n'
assert result == expect
@ -265,16 +258,5 @@ def test_secnums(app):
assert lines[5] == ''
assert lines[6] == ' * Sub Bb'
doc2 = (app.outdir / 'doc2.txt').read_text(encoding='utf8')
expect = (
'Section B\n'
'*********\n'
'\n'
'\n'
'Sub Ba\n'
'======\n'
'\n'
'\n'
'Sub Bb\n'
'======\n'
)
expect = 'Section B\n*********\n\n\nSub Ba\n======\n\n\nSub Bb\n======\n'
assert doc2 == expect

View File

@ -463,16 +463,15 @@ def test_config_eol(logger, tmp_path):
)
def test_builtin_conf(app):
warnings = app.warning.getvalue()
assert (
'root_doc'
) in warnings, 'override on builtin "root_doc" should raise a type warning'
assert 'root_doc' in warnings, (
'override on builtin "root_doc" should raise a type warning'
)
assert 'language' not in warnings, (
'explicitly permitted override on builtin "language" should NOT raise '
'a type warning'
)
assert 'primary_domain' not in warnings, (
'override to None on builtin "primary_domain" should NOT raise a type '
'warning'
'override to None on builtin "primary_domain" should NOT raise a type warning'
)

View File

@ -536,12 +536,7 @@ def test_literalinclude_pydecorators(app):
assert actual == expect
actual = literal_include[2].text
expect = (
'@function_decorator\n'
'@other_decorator()\n'
'def the_function():\n'
' pass\n'
)
expect = '@function_decorator\n@other_decorator()\ndef the_function():\n pass\n'
assert actual == expect

View File

@ -28,9 +28,9 @@ def test_sectioning(app):
assert prefix == parent_num, f'Section out of place: {title!r}'
for i, subsect in enumerate(sects[1]):
num = subsect[0].split()[0]
assert re.match(
'[0-9]+[.0-9]*[.]', num
), f'Unnumbered section: {subsect[0]!r}'
assert re.match('[0-9]+[.0-9]*[.]', num), (
f'Unnumbered section: {subsect[0]!r}'
)
testsects(prefix + str(i + 1) + '.', subsect, indent + 4)
app.build(filenames=[app.srcdir / 'only.rst'])
@ -41,6 +41,6 @@ def test_sectioning(app):
for i, s in enumerate(parts):
testsects(str(i + 1) + '.', s, 4)
actual_headings = '\n'.join(p[0] for p in parts)
assert (
len(parts) == 4
), f'Expected 4 document level headings, got:\n{actual_headings}'
assert len(parts) == 4, (
f'Expected 4 document level headings, got:\n{actual_headings}'
)

View File

@ -923,11 +923,7 @@ def test_domain_c_parse_cvar(app):
@pytest.mark.sphinx('html', testroot='root')
def test_domain_c_parse_no_index_entry(app):
text = (
'.. c:function:: void f()\n'
'.. c:function:: void g()\n'
' :no-index-entry:\n'
)
text = '.. c:function:: void f()\n.. c:function:: void g()\n :no-index-entry:\n'
doctree = restructuredtext.parse(app, text)
assert_node(doctree, (addnodes.index, desc, addnodes.index, desc))
assert_node(

View File

@ -159,7 +159,7 @@ def test_domain_cpp_ast_fundamental_types(type_, id_v2):
id1 = make_id_v1()
id2 = make_id_v2()
input = f"void f({type_.replace(' ', ' ')} arg)"
input = f'void f({type_.replace(" ", " ")} arg)'
output = f'void f({type_} arg)'
check('function', input, {1: id1, 2: id2}, output=output)
@ -167,7 +167,7 @@ def test_domain_cpp_ast_fundamental_types(type_, id_v2):
# try permutations of all components
tcs = type_.split()
for p in itertools.permutations(tcs):
input = f"void f({' '.join(p)} arg)"
input = f'void f({" ".join(p)} arg)'
check('function', input, {1: id1, 2: id2})
@ -1347,8 +1347,7 @@ def test_domain_cpp_ast_template_args():
# from breathe#218
check(
'function',
'template<typename F> '
'void allow(F *f, typename func<F, B, G != 1>::type tt)',
'template<typename F> void allow(F *f, typename func<F, B, G != 1>::type tt)',
{
2: 'I0E5allowP1FN4funcI1F1BXG != 1EE4typeE',
3: 'I0E5allowP1FN4funcI1F1BXne1GL1EEE4typeE',
@ -1906,9 +1905,7 @@ _var cpp:member 1 index.html#_CPPv44$ -
@pytest.mark.sphinx('html', testroot='root')
def test_domain_cpp_parse_no_index_entry(app):
text = (
'.. cpp:function:: void f()\n'
'.. cpp:function:: void g()\n'
' :no-index-entry:\n'
'.. cpp:function:: void f()\n.. cpp:function:: void g()\n :no-index-entry:\n'
)
doctree = restructuredtext.parse(app, text)
assert_node(doctree, (addnodes.index, desc, addnodes.index, desc))

View File

@ -165,10 +165,7 @@ def test_pydata_with_union_type_operator(app):
@pytest.mark.sphinx('html', testroot='root')
def test_pyobject_prefix(app):
text = (
'.. py:class:: Foo\n'
'\n'
' .. py:method:: Foo.say\n'
' .. py:method:: FooBar.say'
'.. py:class:: Foo\n\n .. py:method:: Foo.say\n .. py:method:: FooBar.say'
)
doctree = restructuredtext.parse(app, text)
assert_node(

View File

@ -372,9 +372,9 @@ def test_toc_all_references_should_exist_pep420_enabled(apidoc):
missing_files.append(filename)
all_missing = ', '.join(missing_files)
assert (
len(missing_files) == 0
), f'File(s) referenced in TOC not found: {all_missing}\nTOC:\n{toc}'
assert len(missing_files) == 0, (
f'File(s) referenced in TOC not found: {all_missing}\nTOC:\n{toc}'
)
@pytest.mark.apidoc(
@ -403,9 +403,9 @@ def test_toc_all_references_should_exist_pep420_disabled(apidoc):
missing_files.append(filename)
all_missing = ', '.join(missing_files)
assert (
len(missing_files) == 0
), f'File(s) referenced in TOC not found: {all_missing}\nTOC:\n{toc}'
assert len(missing_files) == 0, (
f'File(s) referenced in TOC not found: {all_missing}\nTOC:\n{toc}'
)
def extract_toc(path):

View File

@ -1558,9 +1558,9 @@ class _EnumFormatter:
def preamble_lookup(
self, doc: str, *, indent: int = 0, **options: Any
) -> list[str]:
assert (
doc
), f'enumeration class {self.target!r} should have an explicit docstring'
assert doc, (
f'enumeration class {self.target!r} should have an explicit docstring'
)
args = self._preamble_args(functional_constructor=False)
return self._preamble(doc=doc, args=args, indent=indent, **options)
@ -1568,9 +1568,9 @@ class _EnumFormatter:
def preamble_constructor(
self, doc: str, *, indent: int = 0, **options: Any
) -> list[str]:
assert (
doc
), f'enumeration class {self.target!r} should have an explicit docstring'
assert doc, (
f'enumeration class {self.target!r} should have an explicit docstring'
)
args = self._preamble_args(functional_constructor=True)
return self._preamble(doc=doc, args=args, indent=indent, **options)

View File

@ -192,9 +192,9 @@ def test_get_items_summary(make_app, app_params):
'C.C2': 'This is a nested inner class docstring',
}
for key, expected in expected_values.items():
assert (
autosummary_items[key][2] == expected
), f'Summary for {key} was {autosummary_items[key]!r} - expected {expected!r}'
assert autosummary_items[key][2] == expected, (
f'Summary for {key} was {autosummary_items[key]!r} - expected {expected!r}'
)
# check an item in detail
assert 'func' in autosummary_items
@ -566,11 +566,7 @@ def test_autosummary_generate(app):
Foo = path.read_text(encoding='utf8')
assert '.. automethod:: __init__' in Foo
assert (
' .. autosummary::\n'
' \n'
' ~Foo.__init__\n'
' ~Foo.bar\n'
' \n'
' .. autosummary::\n \n ~Foo.__init__\n ~Foo.bar\n \n'
) in Foo
assert (
' .. autosummary::\n'
@ -591,9 +587,7 @@ def test_autosummary_generate(app):
path = app.srcdir / 'generated' / 'autosummary_dummy_module.Foo.value.rst'
Foo_value = path.read_text(encoding='utf8')
assert (
'.. currentmodule:: autosummary_dummy_module\n'
'\n'
'.. autoattribute:: Foo.value'
'.. currentmodule:: autosummary_dummy_module\n\n.. autoattribute:: Foo.value'
) in Foo_value
path = app.srcdir / 'generated' / 'autosummary_dummy_module.qux.rst'
@ -820,17 +814,10 @@ def test_autosummary_module_all(app):
).read_text(encoding='utf8')
assert ' .. autosummary::\n \n PublicBar\n \n' in module
assert (
' .. autosummary::\n'
' \n'
' public_foo\n'
' public_baz\n'
' \n'
' .. autosummary::\n \n public_foo\n public_baz\n \n'
) in module
assert (
'.. autosummary::\n'
' :toctree:\n'
' :recursive:\n\n'
' extra_dummy_module\n'
'.. autosummary::\n :toctree:\n :recursive:\n\n extra_dummy_module\n'
) in module
finally:
sys.modules.pop('autosummary_dummy_package_all', None)

View File

@ -168,12 +168,7 @@ def test_graphviz_parse_mapfile():
assert cmap.generate_clickable_map() == ''
# normal graph
code = (
'digraph {\n'
' foo [href="https://www.google.com/"];\n'
' foo -> bar;\n'
'}\n'
)
code = 'digraph {\n foo [href="https://www.google.com/"];\n foo -> bar;\n}\n'
content = (
'<map id="%3" name="%3">\n'
'<area shape="poly" id="node1" href="https://www.google.com/" title="foo" alt=""'

View File

@ -443,9 +443,7 @@ class TestGoogleDocstring:
assert str(actual) == expect
# Single line
actual = GoogleDocstring(
f'{section}:\n' ' this is a single line\n', config
)
actual = GoogleDocstring(f'{section}:\n this is a single line\n', config)
expect = f'.. {admonition}:: this is a single line\n'
assert str(actual) == expect
@ -1599,7 +1597,7 @@ class TestNumpyDocstring:
# Single line
actual = NumpyDocstring(
f"{section}\n{'-' * len(section)}\n this is a single line\n",
f'{section}\n{"-" * len(section)}\n this is a single line\n',
config,
)
expect = f'.. {admonition}:: this is a single line\n'

View File

@ -100,9 +100,9 @@ def test_text_emit_warnings(app):
'.*/warnings.txt:4:<translated>:1: '
'WARNING: Inline literal start-string without end-string. \\[docutils\\]\n'
)
assert re.search(
warning_expr, warnings
), f'{warning_expr!r} did not match {warnings!r}'
assert re.search(warning_expr, warnings), (
f'{warning_expr!r} did not match {warnings!r}'
)
@sphinx_intl
@ -196,21 +196,21 @@ def test_text_inconsistency_warnings(app):
'translated': "\\['`I18N WITH REFS INCONSISTENCY`_'\\]",
}
)
assert re.search(
expected_warning_expr, warnings
), f'{expected_warning_expr!r} did not match {warnings!r}'
assert re.search(expected_warning_expr, warnings), (
f'{expected_warning_expr!r} did not match {warnings!r}'
)
expected_citation_ref_warning_expr = '.*/refs_inconsistency.txt:\\d+: WARNING: Citation \\[ref2\\] is not referenced.'
assert re.search(
expected_citation_ref_warning_expr, warnings
), f'{expected_citation_ref_warning_expr!r} did not match {warnings!r}'
assert re.search(expected_citation_ref_warning_expr, warnings), (
f'{expected_citation_ref_warning_expr!r} did not match {warnings!r}'
)
expected_citation_warning_expr = (
'.*/refs_inconsistency.txt:\\d+: WARNING: citation not found: ref3'
)
assert re.search(
expected_citation_warning_expr, warnings
), f'{expected_citation_warning_expr!r} did not match {warnings!r}'
assert re.search(expected_citation_warning_expr, warnings), (
f'{expected_citation_warning_expr!r} did not match {warnings!r}'
)
@sphinx_intl
@ -261,9 +261,9 @@ def test_text_literalblock_warnings(app):
expected_warning_expr = (
'.*/literalblock.txt:\\d+: WARNING: Literal block expected; none found.'
)
assert re.search(
expected_warning_expr, warnings
), f'{expected_warning_expr!r} did not match {warnings!r}'
assert re.search(expected_warning_expr, warnings), (
f'{expected_warning_expr!r} did not match {warnings!r}'
)
@sphinx_intl
@ -347,16 +347,16 @@ def test_text_glossary_term_inconsistencies(app):
" original: \\[':term:`Some term`', ':term:`Some other term`'\\],"
" translated: \\[':term:`SOME NEW TERM`'\\] \\[i18n.inconsistent_references\\]\n"
)
assert re.search(
expected_warning_expr, warnings
), f'{expected_warning_expr!r} did not match {warnings!r}'
assert re.search(expected_warning_expr, warnings), (
f'{expected_warning_expr!r} did not match {warnings!r}'
)
expected_warning_expr = (
'.*/glossary_terms_inconsistency.txt:\\d+:<translated>:1: '
"WARNING: term not in glossary: 'TERM NOT IN GLOSSARY'"
)
assert re.search(
expected_warning_expr, warnings
), f'{expected_warning_expr!r} did not match {warnings!r}'
assert re.search(expected_warning_expr, warnings), (
f'{expected_warning_expr!r} did not match {warnings!r}'
)
@sphinx_intl
@ -1051,9 +1051,9 @@ def test_html_index_entries(app):
wrap_nest('li', 'ul', 'SEE'),
]
for expr in expected_exprs:
assert re.search(
expr, result, re.MULTILINE
), f'{expr!r} did not match {result!r}'
assert re.search(expr, result, re.MULTILINE), (
f'{expr!r} did not match {result!r}'
)
@sphinx_intl
@ -1185,9 +1185,9 @@ def test_xml_footnotes(app):
warnings = getwarning(app.warning)
warning_expr = '.*/footnote.xml:\\d*: SEVERE: Duplicate ID: ".*".\n'
assert not re.search(
warning_expr, warnings
), f'{warning_expr!r} did match {warnings!r}'
assert not re.search(warning_expr, warnings), (
f'{warning_expr!r} did match {warnings!r}'
)
@sphinx_intl

View File

@ -403,10 +403,7 @@ def get_verifier(verify, verify_re):
'verify',
':kbd:`-`',
'<p><kbd class="kbd docutils literal notranslate">-</kbd></p>',
(
'\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{\\sphinxhyphen{}}}'
),
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{\\sphinxhyphen{}}}',
),
(
# kbd role

View File

@ -31,10 +31,7 @@ def test_comment_picker_basic():
def test_comment_picker_location():
# multiple "before" comments
source = (
'#: comment before assignment1\n'
'#:\n'
'#: comment before assignment2\n'
'a = 1 + 1\n'
'#: comment before assignment1\n#:\n#: comment before assignment2\na = 1 + 1\n'
)
parser = Parser(source)
parser.parse()

View File

@ -454,9 +454,9 @@ def assert_is_sorted(
elif isinstance(item, list):
if not is_title_tuple_type(item) and path not in lists_not_to_sort:
# sort nulls last; http://stackoverflow.com/questions/19868767/
assert item == sorted(
item, key=lambda x: (x is None, x)
), f'{err_path} is not sorted'
assert item == sorted(item, key=lambda x: (x is None, x)), (
f'{err_path} is not sorted'
)
for i, child in enumerate(item):
assert_is_sorted(child, f'{path}[{i}]')

View File

@ -10,14 +10,7 @@ from sphinx.testing.util import assert_node
@pytest.mark.sphinx('html', testroot='root')
def test_transforms_reorder_consecutive_target_and_index_nodes_preserve_order(app):
text = (
'.. index:: abc\n'
'.. index:: def\n'
'.. index:: ghi\n'
'.. index:: jkl\n'
'\n'
'text\n'
)
text = '.. index:: abc\n.. index:: def\n.. index:: ghi\n.. index:: jkl\n\ntext\n'
doctree = restructuredtext.parse(app, text)
assert_node(
doctree,