[lint] add RUFF005 lint (#12068)

Co-authored-by: daniel.eades <daniel.eades@seebyte.com>
Co-authored-by: Bénédikt Tran <10796600+picnixz@users.noreply.github.com>
This commit is contained in:
danieleades 2024-03-14 10:26:30 +00:00 committed by GitHub
parent d4c739cdd1
commit 92380e60d1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 79 additions and 61 deletions

View File

@ -287,7 +287,7 @@ select = [
# "RUF001", # String contains ambiguous {}. Did you mean {}?
"RUF002", # Docstring contains ambiguous {}. Did you mean {}?
# "RUF003", # Comment contains ambiguous {}. Did you mean {}?
# "RUF005", # Consider `{expression}` instead of concatenation
"RUF005", # Consider `{expression}` instead of concatenation
"RUF006", # Store a reference to the return value of `asyncio.{method}`
"RUF007", # Prefer `itertools.pairwise()` over `zip()` when iterating over successive pairs
"RUF008", # Do not use mutable default values for dataclass attributes

View File

@ -510,11 +510,19 @@ class EpubBuilder(StandaloneHTMLBuilder):
# files
self.files: list[str] = []
self.ignored_files = ['.buildinfo', 'mimetype', 'content.opf',
'toc.ncx', 'META-INF/container.xml',
'Thumbs.db', 'ehthumbs.db', '.DS_Store',
'nav.xhtml', self.config.epub_basename + '.epub'] + \
self.config.epub_exclude_files
self.ignored_files = [
'.buildinfo',
'mimetype',
'content.opf',
'toc.ncx',
'META-INF/container.xml',
'Thumbs.db',
'ehthumbs.db',
'.DS_Store',
'nav.xhtml',
self.config.epub_basename + '.epub',
*self.config.epub_exclude_files,
]
if not self.use_index:
self.ignored_files.append('genindex' + self.out_suffix)
for root, dirs, files in os.walk(self.outdir):

View File

@ -827,7 +827,7 @@ class StandaloneHTMLBuilder(Builder):
logger.warning(__('Failed to copy a file in html_static_file: %s: %r'),
filename, error)
excluded = Matcher(self.config.exclude_patterns + ["**/.*"])
excluded = Matcher([*self.config.exclude_patterns, '**/.*'])
for entry in self.config.html_static_path:
copy_asset(path.join(self.confdir, entry),
path.join(self.outdir, '_static'),

View File

@ -342,7 +342,7 @@ class LaTeXBuilder(Builder):
def assemble_doctree(
self, indexfile: str, toctree_only: bool, appendices: list[str],
) -> nodes.document:
self.docnames = set([indexfile] + appendices)
self.docnames = {indexfile, *appendices}
logger.info(darkgreen(indexfile) + " ", nonl=True)
tree = self.env.get_doctree(indexfile)
tree['docname'] = indexfile

View File

@ -134,7 +134,7 @@ class TexinfoBuilder(Builder):
def assemble_doctree(
self, indexfile: str, toctree_only: bool, appendices: list[str],
) -> nodes.document:
self.docnames = set([indexfile] + appendices)
self.docnames = {indexfile, *appendices}
logger.info(darkgreen(indexfile) + " ", nonl=True)
tree = self.env.get_doctree(indexfile)
tree['docname'] = indexfile

View File

@ -652,7 +652,7 @@ def check_confval_types(app: Sphinx | None, config: Config) -> None:
if type_value in valid_types: # check explicitly listed types
continue
common_bases = (set(type_value.__bases__ + (type_value,))
common_bases = ({*type_value.__bases__, type_value}
& set(type_default.__bases__))
common_bases.discard(object)
if common_bases:

View File

@ -318,7 +318,7 @@ class CPPObject(ObjectDescription[ASTDeclaration]):
if config.toc_object_entries_show_parents == 'hide':
return name + parens
if config.toc_object_entries_show_parents == 'all':
return '::'.join(parents + [name + parens])
return '::'.join([*parents, name + parens])
return ''
@ -337,18 +337,28 @@ class CPPMemberObject(CPPObject):
class CPPFunctionObject(CPPObject):
object_type = 'function'
doc_field_types = CPPObject.doc_field_types + [
GroupedField('parameter', label=_('Parameters'),
names=('param', 'parameter', 'arg', 'argument'),
can_collapse=True),
GroupedField('exceptions', label=_('Throws'), rolename='expr',
names=('throws', 'throw', 'exception'),
can_collapse=True),
GroupedField('retval', label=_('Return values'),
names=('retvals', 'retval'),
can_collapse=True),
Field('returnvalue', label=_('Returns'), has_arg=False,
names=('returns', 'return')),
doc_field_types = [
*CPPObject.doc_field_types,
GroupedField(
"parameter",
label=_("Parameters"),
names=("param", "parameter", "arg", "argument"),
can_collapse=True,
),
GroupedField(
"exceptions",
label=_("Throws"),
rolename="expr",
names=("throws", "throw", "exception"),
can_collapse=True,
),
GroupedField(
"retval",
label=_("Return values"),
names=("retvals", "retval"),
can_collapse=True,
),
Field("returnvalue", label=_("Returns"), has_arg=False, names=("returns", "return")),
]

View File

@ -242,7 +242,7 @@ class JSObject(ObjectDescription[tuple[str, str]]):
if config.toc_object_entries_show_parents == 'hide':
return name + parens
if config.toc_object_entries_show_parents == 'all':
return '.'.join(parents + [name + parens])
return '.'.join([*parents, name + parens])
return ''

View File

@ -422,5 +422,5 @@ class PyObject(ObjectDescription[tuple[str, str]]):
if config.toc_object_entries_show_parents == 'hide':
return name + parens
if config.toc_object_entries_show_parents == 'all':
return '.'.join(parents + [name + parens])
return '.'.join([*parents, name + parens])
return ''

View File

@ -262,7 +262,7 @@ class OptionXRefRole(XRefRole):
def split_term_classifiers(line: str) -> list[str | None]:
# split line into a term and classifiers. if no classifier, None is used..
parts: list[str | None] = re.split(' +: +', line) + [None]
parts: list[str | None] = [*re.split(' +: +', line), None]
return parts
@ -408,7 +408,7 @@ class Glossary(SphinxDirective):
dlist = nodes.definition_list('', *items)
dlist['classes'].append('glossary')
node += dlist
return messages + [node]
return [*messages, node]
def token_xrefs(text: str, productionGroup: str = '') -> list[Node]:

View File

@ -251,7 +251,7 @@ def _entries_from_toctree(
included,
excluded,
sub_toc_node,
[refdoc] + parents,
[refdoc, *parents],
subtree=True,
),
start=sub_toc_node.parent.index(sub_toc_node) + 1,

View File

@ -283,7 +283,7 @@ class TocTreeCollector(EnvironmentCollector):
secnum = secnum[:env.config.numfig_secnum_depth]
counter[secnum] = counter.get(secnum, 0) + 1
return secnum + (counter[secnum],)
return (*secnum, counter[secnum])
def register_fignumber(docname: str, secnum: tuple[int, ...],
figtype: str, fignode: Element) -> None:

View File

@ -47,7 +47,7 @@ else:
'show-inheritance',
]
PY_SUFFIXES = ('.py', '.pyx') + tuple(EXTENSION_SUFFIXES)
PY_SUFFIXES = ('.py', '.pyx', *tuple(EXTENSION_SUFFIXES))
template_dir = path.join(package_dir, 'templates', 'apidoc')

View File

@ -1138,10 +1138,10 @@ class ModuleLevelDocumenter(Documenter):
def resolve_name(self, modname: str | None, parents: Any, path: str, base: str,
) -> tuple[str | None, list[str]]:
if modname is not None:
return modname, parents + [base]
return modname, [*parents, base]
if path:
modname = path.rstrip('.')
return modname, parents + [base]
return modname, [*parents, base]
# if documenting a toplevel object without explicit module,
# it can be contained in another auto directive ...
@ -1150,7 +1150,7 @@ class ModuleLevelDocumenter(Documenter):
if not modname:
modname = self.env.ref_context.get('py:module')
# ... else, it stays None, which means invalid
return modname, parents + [base]
return modname, [*parents, base]
class ClassLevelDocumenter(Documenter):
@ -1162,7 +1162,7 @@ class ClassLevelDocumenter(Documenter):
def resolve_name(self, modname: str | None, parents: Any, path: str, base: str,
) -> tuple[str | None, list[str]]:
if modname is not None:
return modname, parents + [base]
return modname, [*parents, base]
if path:
mod_cls = path.rstrip('.')
@ -1186,7 +1186,7 @@ class ClassLevelDocumenter(Documenter):
if not modname:
modname = self.env.ref_context.get('py:module')
# ... else, it stays None, which means invalid
return modname, parents + [base]
return modname, [*parents, base]
class DocstringSignatureMixin:

View File

@ -286,9 +286,9 @@ class Autosummary(SphinxDirective):
return import_ivar_by_name(name, prefixes)
except ImportError as exc2:
if exc2.__cause__:
errors: list[BaseException] = exc.exceptions + [exc2.__cause__]
errors: list[BaseException] = [*exc.exceptions, exc2.__cause__]
else:
errors = exc.exceptions + [exc2]
errors = [*exc.exceptions, exc2]
raise ImportExceptionGroup(exc.args[0], errors) from None
@ -593,7 +593,7 @@ def limited_join(sep: str, items: list[str], max_chars: int = 30,
else:
break
return sep.join(list(items[:n_items]) + [overflow_marker])
return sep.join([*list(items[:n_items]), overflow_marker])
# -- Importing items -----------------------------------------------------------

View File

@ -509,9 +509,9 @@ def generate_autosummary_docs(sources: list[str],
qualname = name.replace(modname + ".", "")
except ImportError as exc2:
if exc2.__cause__:
exceptions: list[BaseException] = exc.exceptions + [exc2.__cause__]
exceptions: list[BaseException] = [*exc.exceptions, exc2.__cause__]
else:
exceptions = exc.exceptions + [exc2]
exceptions = [*exc.exceptions, exc2]
errors = list({f"* {type(e).__name__}: {e}" for e in exceptions})
logger.warning(__('[autosummary] failed to import %s.\nPossible hints:\n%s'),

View File

@ -333,7 +333,7 @@ def render_dot_html(self: HTML5Translator, node: graphviz, code: str, options: d
logger.warning(__('dot code %r: %s'), code, exc)
raise nodes.SkipNode from exc
classes = [imgcls, 'graphviz'] + node.get('classes', [])
classes = [imgcls, 'graphviz', *node.get('classes', [])]
imgcls = ' '.join(filter(None, classes))
if fname is None:

View File

@ -56,9 +56,9 @@ class ImagemagickConverter(ImageConverter):
# (or first page) of image (ex. Animation GIF, PDF)
_from += '[0]'
args = ([self.config.image_converter] +
self.config.image_converter_args +
[_from, _to])
args = ([
self.config.image_converter, *self.config.image_converter_args, _from, _to,
])
logger.debug('Invoking %r ...', args)
subprocess.run(args, capture_output=True, check=True)
return True

View File

@ -306,7 +306,7 @@ class GoogleDocstring:
_type = _convert_type_spec(_type, self._config.napoleon_type_aliases or {})
indent = self._get_indent(line) + 1
_descs = [_desc] + self._dedent(self._consume_indented_block(indent))
_descs = [_desc, *self._dedent(self._consume_indented_block(indent))]
_descs = self.__class__(_descs, self._config).lines()
return _name, _type, _descs
@ -328,7 +328,7 @@ class GoogleDocstring:
if not colon or not _desc:
_type, _desc = _desc, _type
_desc += colon
_descs = [_desc] + self._dedent(self._consume_to_end())
_descs = [_desc, *self._dedent(self._consume_to_end())]
_descs = self.__class__(_descs, self._config).lines()
return _type, _descs
@ -400,15 +400,15 @@ class GoogleDocstring:
def _fix_field_desc(self, desc: list[str]) -> list[str]:
if self._is_list(desc):
desc = [''] + desc
desc = ['', *desc]
elif desc[0].endswith('::'):
desc_block = desc[1:]
indent = self._get_indent(desc[0])
block_indent = self._get_initial_indent(desc_block)
if block_indent > indent:
desc = [''] + desc
desc = ['', *desc]
else:
desc = ['', desc[0]] + self._indent(desc_block, 4)
desc = ['', desc[0], *self._indent(desc_block, 4)]
return desc
def _format_admonition(self, admonition: str, lines: list[str]) -> list[str]:
@ -417,7 +417,7 @@ class GoogleDocstring:
return [f'.. {admonition}:: {lines[0].strip()}', '']
elif lines:
lines = self._indent(self._dedent(lines), 3)
return ['.. %s::' % admonition, ''] + lines + ['']
return ['.. %s::' % admonition, '', *lines, '']
else:
return ['.. %s::' % admonition, '']
@ -454,7 +454,7 @@ class GoogleDocstring:
if _type:
lines.append(f':{type_role} {_name}: {_type}')
return lines + ['']
return [*lines, '']
def _format_field(self, _name: str, _type: str, _desc: list[str]) -> list[str]:
_desc = self._strip_empty(_desc)
@ -481,7 +481,7 @@ class GoogleDocstring:
if _desc[0]:
return [field + _desc[0]] + _desc[1:]
else:
return [field] + _desc
return [field, *_desc]
else:
return [field]
@ -624,7 +624,7 @@ class GoogleDocstring:
self._is_in_section = True
self._section_indent = self._get_current_indent()
if _directive_regex.match(section):
lines = [section] + self._consume_to_next_section()
lines = [section, *self._consume_to_next_section()]
else:
lines = self._sections[section.lower()](section)
finally:
@ -712,7 +712,7 @@ class GoogleDocstring:
else:
header = '.. rubric:: %s' % section
if lines:
return [header, ''] + lines + ['']
return [header, '', *lines, '']
else:
return [header, '']
@ -734,7 +734,7 @@ class GoogleDocstring:
if 'no-index' in self._opt or 'noindex' in self._opt:
lines.append(' :no-index:')
if _desc:
lines.extend([''] + self._indent(_desc, 3))
lines.extend(['', *self._indent(_desc, 3)])
lines.append('')
return lines

View File

@ -55,7 +55,7 @@ class Project:
for filename in get_matching_files(
self.srcdir,
include_paths,
[*exclude_paths] + EXCLUDE_PATHS,
[*exclude_paths, *EXCLUDE_PATHS],
):
if docname := self.path2doc(filename):
if docname in self.docnames:

View File

@ -128,7 +128,7 @@ class ModuleAnalyzer:
self.attr_docs = {}
for (scope, comment) in parser.comments.items():
if comment:
self.attr_docs[scope] = comment.splitlines() + ['']
self.attr_docs[scope] = [*comment.splitlines(), '']
else:
self.attr_docs[scope] = ['']

View File

@ -245,7 +245,7 @@ class VariableCommentPicker(ast.NodeVisitor):
else:
return None
else:
return self.context + [name]
return [*self.context, name]
def add_entry(self, name: str) -> None:
qualname = self.get_qualname_for(name)

View File

@ -251,7 +251,7 @@ class AutoIndexUpgrader(SphinxTransform):
logger.warning(msg, location=node)
for i, entry in enumerate(node['entries']):
if len(entry) == 4:
node['entries'][i] = entry + (None,)
node['entries'][i] = (*entry, None)
class ExtraTranslatableNodes(SphinxTransform):

View File

@ -31,7 +31,7 @@ STYLEFILES = ['article.cls', 'fancyhdr.sty', 'titlesec.sty', 'amsmath.sty',
# only run latex if all needed packages are there
def kpsetest(*filenames):
try:
subprocess.run(['kpsewhich'] + list(filenames), capture_output=True, check=True)
subprocess.run(['kpsewhich', *list(filenames)], capture_output=True, check=True)
return True
except (OSError, CalledProcessError):
return False # command not found or exit with non-zero

View File

@ -15,7 +15,7 @@ def apidoc(rootdir, tmp_path, apidoc_params):
coderoot = rootdir / kwargs.get('coderoot', 'test-root')
outdir = tmp_path / 'out'
excludes = [str(coderoot / e) for e in kwargs.get('excludes', [])]
args = ['-o', str(outdir), '-F', str(coderoot)] + excludes + kwargs.get('options', [])
args = ['-o', str(outdir), '-F', str(coderoot), *excludes, *kwargs.get('options', [])]
apidoc_main(args)
return namedtuple('apidoc', 'coderoot,outdir')(coderoot, outdir)