mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Enable automatic formatting for `sphinx/util/
` (#12957)
This commit is contained in:
parent
be52db2bb3
commit
7ece6fc1e7
@ -483,6 +483,5 @@ exclude = [
|
||||
"sphinx/search/*",
|
||||
"sphinx/testing/*",
|
||||
"sphinx/transforms/*",
|
||||
"sphinx/util/*",
|
||||
"sphinx/writers/*",
|
||||
]
|
||||
|
@ -42,6 +42,7 @@ url_re: re.Pattern[str] = re.compile(r'(?P<schema>.+)://.*')
|
||||
|
||||
# High-level utility functions.
|
||||
|
||||
|
||||
def docname_join(basedocname: str, docname: str) -> str:
|
||||
return posixpath.normpath(posixpath.join('/' + basedocname, '..', docname))[1:]
|
||||
|
||||
@ -82,16 +83,23 @@ class UnicodeDecodeErrorHandler:
|
||||
if lineend == -1:
|
||||
lineend = len(error.object)
|
||||
lineno = error.object.count(b'\n', 0, error.start) + 1
|
||||
logger.warning(__('undecodable source characters, replacing with "?": %r'),
|
||||
(error.object[linestart + 1:error.start] + b'>>>' +
|
||||
error.object[error.start:error.end] + b'<<<' +
|
||||
error.object[error.end:lineend]),
|
||||
location=(self.docname, lineno))
|
||||
logger.warning(
|
||||
__('undecodable source characters, replacing with "?": %r'),
|
||||
(
|
||||
error.object[linestart + 1 : error.start]
|
||||
+ b'>>>'
|
||||
+ error.object[error.start : error.end]
|
||||
+ b'<<<'
|
||||
+ error.object[error.end : lineend]
|
||||
),
|
||||
location=(self.docname, lineno),
|
||||
)
|
||||
return ('?', error.end)
|
||||
|
||||
|
||||
# Low-level utility functions and classes.
|
||||
|
||||
|
||||
def parselinenos(spec: str, total: int) -> list[int]:
|
||||
"""Parse a line number spec (such as "1,2,4-6") and return a list of
|
||||
wanted line numbers.
|
||||
@ -136,12 +144,16 @@ def isurl(url: str) -> bool:
|
||||
|
||||
# deprecated name -> (object to return, canonical path or empty string)
|
||||
_DEPRECATED_OBJECTS: dict[str, tuple[Any, str, tuple[int, int]]] = {
|
||||
'split_index_msg': (_index_entries.split_index_msg,
|
||||
'sphinx.util.index_entries.split_index_msg',
|
||||
(9, 0)),
|
||||
'split_into': (_index_entries.split_index_msg,
|
||||
'sphinx.util.index_entries.split_into',
|
||||
(9, 0)),
|
||||
'split_index_msg': (
|
||||
_index_entries.split_index_msg,
|
||||
'sphinx.util.index_entries.split_index_msg',
|
||||
(9, 0),
|
||||
),
|
||||
'split_into': (
|
||||
_index_entries.split_index_msg,
|
||||
'sphinx.util.index_entries.split_into',
|
||||
(9, 0),
|
||||
),
|
||||
'md5': (_md5, '', (9, 0)),
|
||||
'sha1': (_sha1, '', (9, 0)),
|
||||
'import_object': (_importer.import_object, '', (10, 0)),
|
||||
|
@ -36,7 +36,9 @@ class FilenameUniqDict(dict[str, tuple[set[str], str]]):
|
||||
del self[filename]
|
||||
self._existing.discard(unique)
|
||||
|
||||
def merge_other(self, docnames: set[str], other: dict[str, tuple[set[str], Any]]) -> None:
|
||||
def merge_other(
|
||||
self, docnames: set[str], other: dict[str, tuple[set[str], Any]]
|
||||
) -> None:
|
||||
for filename, (docs, _unique) in other.items():
|
||||
for doc in docs & set(docnames):
|
||||
self.add_file(doc, filename)
|
||||
@ -70,7 +72,9 @@ class DownloadFiles(dict[str, tuple[set[str], str]]):
|
||||
if not docs:
|
||||
del self[filename]
|
||||
|
||||
def merge_other(self, docnames: set[str], other: dict[str, tuple[set[str], Any]]) -> None:
|
||||
def merge_other(
|
||||
self, docnames: set[str], other: dict[str, tuple[set[str], Any]]
|
||||
) -> None:
|
||||
for filename, (docs, _dest) in other.items():
|
||||
for docname in docs & set(docnames):
|
||||
self.add_file(docname, filename)
|
||||
|
@ -8,8 +8,7 @@ if TYPE_CHECKING:
|
||||
from typing import Protocol
|
||||
|
||||
class SupportsWrite(Protocol):
|
||||
def write(self, text: str, /) -> int | None:
|
||||
...
|
||||
def write(self, text: str, /) -> int | None: ... # NoQA: E704
|
||||
|
||||
|
||||
class TeeStripANSI:
|
||||
|
@ -33,9 +33,10 @@ _MSG = (
|
||||
# https://docs.python.org/3/library/stdtypes.html#string-methods
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
||||
class _StrPath(WindowsPath):
|
||||
def replace( # type: ignore[override]
|
||||
self, old: str, new: str, count: int = -1, /,
|
||||
self, old: str, new: str, count: int = -1, /
|
||||
) -> str:
|
||||
# replace exists in both Path and str;
|
||||
# in Path it makes filesystem changes, so we use the safer str version
|
||||
@ -81,10 +82,12 @@ if sys.platform == 'win32':
|
||||
def __len__(self) -> int:
|
||||
warnings.warn(_MSG, RemovedInSphinx90Warning, stacklevel=2)
|
||||
return len(self.__str__())
|
||||
|
||||
else:
|
||||
|
||||
class _StrPath(PosixPath):
|
||||
def replace( # type: ignore[override]
|
||||
self, old: str, new: str, count: int = -1, /,
|
||||
self, old: str, new: str, count: int = -1, /
|
||||
) -> str:
|
||||
# replace exists in both Path and str;
|
||||
# in Path it makes filesystem changes, so we use the safer str version
|
||||
|
@ -9,4 +9,5 @@ def _format_rfc3339_microseconds(timestamp: int, /) -> str:
|
||||
:param timestamp: The timestamp to format, in microseconds.
|
||||
"""
|
||||
seconds, fraction = divmod(timestamp, 10**6)
|
||||
return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(seconds)) + f'.{fraction // 1_000}'
|
||||
time_tuple = time.gmtime(seconds)
|
||||
return time.strftime('%Y-%m-%d %H:%M:%S', time_tuple) + f'.{fraction // 1_000}'
|
||||
|
@ -25,19 +25,23 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
_whitespace_re = re.compile(r'\s+')
|
||||
anon_identifier_re = re.compile(r'(@[a-zA-Z0-9_])[a-zA-Z0-9_]*\b')
|
||||
identifier_re = re.compile(r'''
|
||||
identifier_re = re.compile(
|
||||
r"""
|
||||
( # This 'extends' _anon_identifier_re with the ordinary identifiers,
|
||||
# make sure they are in sync.
|
||||
(~?\b[a-zA-Z_]) # ordinary identifiers
|
||||
| (@[a-zA-Z0-9_]) # our extension for names of anonymous entities
|
||||
)
|
||||
[a-zA-Z0-9_]*\b
|
||||
''', flags=re.VERBOSE)
|
||||
""",
|
||||
flags=re.VERBOSE,
|
||||
)
|
||||
integer_literal_re = re.compile(r'[1-9][0-9]*(\'[0-9]+)*')
|
||||
octal_literal_re = re.compile(r'0[0-7]*(\'[0-7]+)*')
|
||||
hex_literal_re = re.compile(r'0[xX][0-9a-fA-F]+(\'[0-9a-fA-F]+)*')
|
||||
binary_literal_re = re.compile(r'0[bB][01]+(\'[01]+)*')
|
||||
integers_literal_suffix_re = re.compile(r'''
|
||||
integers_literal_suffix_re = re.compile(
|
||||
r"""
|
||||
# unsigned and/or (long) long, in any order, but at least one of them
|
||||
(
|
||||
([uU] ([lL] | (ll) | (LL))?)
|
||||
@ -46,8 +50,11 @@ integers_literal_suffix_re = re.compile(r'''
|
||||
)\b
|
||||
# the ending word boundary is important for distinguishing
|
||||
# between suffixes and UDLs in C++
|
||||
''', flags=re.VERBOSE)
|
||||
float_literal_re = re.compile(r'''
|
||||
""",
|
||||
flags=re.VERBOSE,
|
||||
)
|
||||
float_literal_re = re.compile(
|
||||
r"""
|
||||
[+-]?(
|
||||
# decimal
|
||||
([0-9]+(\'[0-9]+)*[eE][+-]?[0-9]+(\'[0-9]+)*)
|
||||
@ -59,10 +66,13 @@ float_literal_re = re.compile(r'''
|
||||
[0-9a-fA-F]+(\'[0-9a-fA-F]+)*([pP][+-]?[0-9a-fA-F]+(\'[0-9a-fA-F]+)*)?)
|
||||
| (0[xX][0-9a-fA-F]+(\'[0-9a-fA-F]+)*\.([pP][+-]?[0-9a-fA-F]+(\'[0-9a-fA-F]+)*)?)
|
||||
)
|
||||
''', flags=re.VERBOSE)
|
||||
""",
|
||||
flags=re.VERBOSE,
|
||||
)
|
||||
float_literal_suffix_re = re.compile(r'[fFlL]\b')
|
||||
# the ending word boundary is important for distinguishing between suffixes and UDLs in C++
|
||||
char_literal_re = re.compile(r'''
|
||||
char_literal_re = re.compile(
|
||||
r"""
|
||||
((?:u8)|u|U|L)?
|
||||
'(
|
||||
(?:[^\\'])
|
||||
@ -74,7 +84,9 @@ char_literal_re = re.compile(r'''
|
||||
| (?:U[0-9a-fA-F]{8})
|
||||
))
|
||||
)'
|
||||
''', flags=re.VERBOSE)
|
||||
""",
|
||||
flags=re.VERBOSE,
|
||||
)
|
||||
|
||||
|
||||
def verify_description_mode(mode: str) -> None:
|
||||
@ -116,6 +128,7 @@ class ASTBaseBase:
|
||||
# Attributes
|
||||
################################################################################
|
||||
|
||||
|
||||
class ASTAttribute(ASTBaseBase):
|
||||
def describe_signature(self, signode: TextElement) -> None:
|
||||
raise NotImplementedError(repr(self))
|
||||
@ -134,7 +147,7 @@ class ASTCPPAttribute(ASTAttribute):
|
||||
return hash(self.arg)
|
||||
|
||||
def _stringify(self, transform: StringifyTransform) -> str:
|
||||
return f"[[{self.arg}]]"
|
||||
return f'[[{self.arg}]]'
|
||||
|
||||
def describe_signature(self, signode: TextElement) -> None:
|
||||
signode.append(addnodes.desc_sig_punctuation('[[', '[['))
|
||||
@ -258,12 +271,14 @@ class ASTAttributeList(ASTBaseBase):
|
||||
|
||||
################################################################################
|
||||
|
||||
|
||||
class ASTBaseParenExprList(ASTBaseBase):
|
||||
pass
|
||||
|
||||
|
||||
################################################################################
|
||||
|
||||
|
||||
class UnsupportedMultiCharacterCharLiteral(Exception):
|
||||
pass
|
||||
|
||||
@ -273,9 +288,13 @@ class DefinitionError(Exception):
|
||||
|
||||
|
||||
class BaseParser:
|
||||
def __init__(self, definition: str, *,
|
||||
location: nodes.Node | tuple[str, int] | str,
|
||||
config: Config) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
definition: str,
|
||||
*,
|
||||
location: nodes.Node | tuple[str, int] | str,
|
||||
config: Config,
|
||||
) -> None:
|
||||
self.definition = definition.strip()
|
||||
self.location = location # for warnings
|
||||
self.config = config
|
||||
@ -315,16 +334,19 @@ class BaseParser:
|
||||
def status(self, msg: str) -> None:
|
||||
# for debugging
|
||||
indicator = '-' * self.pos + '^'
|
||||
logger.debug(f"{msg}\n{self.definition}\n{indicator}") # NoQA: G004
|
||||
logger.debug(f'{msg}\n{self.definition}\n{indicator}') # NoQA: G004
|
||||
|
||||
def fail(self, msg: str) -> None:
|
||||
errors = []
|
||||
indicator = '-' * self.pos + '^'
|
||||
exMain = DefinitionError(
|
||||
'Invalid %s declaration: %s [error at %d]\n %s\n %s' %
|
||||
(self.language, msg, self.pos, self.definition, indicator))
|
||||
errors.append((exMain, "Main error"))
|
||||
errors.extend((err, "Potential other error") for err in self.otherErrors)
|
||||
msg = (
|
||||
f'Invalid {self.language} declaration: {msg} [error at {self.pos}]\n'
|
||||
f' {self.definition}\n'
|
||||
f' {indicator}'
|
||||
)
|
||||
exc_main = DefinitionError(msg)
|
||||
errors.append((exc_main, 'Main error'))
|
||||
errors.extend((err, 'Potential other error') for err in self.otherErrors)
|
||||
self.otherErrors = []
|
||||
raise self._make_multi_error(errors, '')
|
||||
|
||||
@ -342,7 +364,7 @@ class BaseParser:
|
||||
|
||||
def skip_string(self, string: str) -> bool:
|
||||
strlen = len(string)
|
||||
if self.definition[self.pos:self.pos + strlen] == string:
|
||||
if self.definition[self.pos : self.pos + strlen] == string:
|
||||
self.pos += strlen
|
||||
return True
|
||||
return False
|
||||
@ -383,14 +405,14 @@ class BaseParser:
|
||||
return ''
|
||||
|
||||
def read_rest(self) -> str:
|
||||
rv = self.definition[self.pos:]
|
||||
rv = self.definition[self.pos :]
|
||||
self.pos = self.end
|
||||
return rv
|
||||
|
||||
def assert_end(self, *, allowSemicolon: bool = False) -> None:
|
||||
self.skip_ws()
|
||||
if allowSemicolon:
|
||||
if not self.eof and self.definition[self.pos:] != ';':
|
||||
if not self.eof and self.definition[self.pos :] != ';':
|
||||
self.fail('Expected end of definition or ;.')
|
||||
else:
|
||||
if not self.eof:
|
||||
@ -418,13 +440,14 @@ class BaseParser:
|
||||
symbols.append(brackets[self.current_char])
|
||||
elif len(symbols) > 0 and self.current_char == symbols[-1]:
|
||||
symbols.pop()
|
||||
elif self.current_char in ")]}":
|
||||
elif self.current_char in ')]}':
|
||||
self.fail("Unexpected '%s' in balanced-token-seq." % self.current_char)
|
||||
self.pos += 1
|
||||
if self.eof:
|
||||
self.fail("Could not find end of balanced-token-seq starting at %d."
|
||||
% startPos)
|
||||
return self.definition[startPos:self.pos]
|
||||
self.fail(
|
||||
f'Could not find end of balanced-token-seq starting at {startPos}.'
|
||||
)
|
||||
return self.definition[startPos : self.pos]
|
||||
|
||||
def _parse_attribute(self) -> ASTAttribute | None:
|
||||
self.skip_ws()
|
||||
|
@ -41,6 +41,7 @@ if TYPE_CHECKING:
|
||||
try:
|
||||
# check if colorama is installed to support color on Windows
|
||||
import colorama
|
||||
|
||||
COLORAMA_AVAILABLE = True
|
||||
except ImportError:
|
||||
COLORAMA_AVAILABLE = False
|
||||
|
@ -78,7 +78,7 @@ class progress_message:
|
||||
val: BaseException | None,
|
||||
tb: TracebackType | None,
|
||||
) -> bool:
|
||||
prefix = "" if self.nonl else bold(self.message + ': ')
|
||||
prefix = '' if self.nonl else bold(self.message + ': ')
|
||||
if isinstance(val, SkipProgressMessage):
|
||||
logger.info(prefix + __('skipped'))
|
||||
if val.args:
|
||||
|
@ -3,6 +3,7 @@
|
||||
"Doc fields" are reST field lists in object descriptions that will
|
||||
be domain-specifically transformed to a more appealing presentation.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
@ -70,10 +71,17 @@ class Field:
|
||||
self.rolename = rolename
|
||||
self.bodyrolename = bodyrolename
|
||||
|
||||
def make_xref(self, rolename: str, domain: str, target: str,
|
||||
innernode: type[TextlikeNode] = addnodes.literal_emphasis,
|
||||
contnode: Node | None = None, env: BuildEnvironment | None = None,
|
||||
inliner: Inliner | None = None, location: Element | None = None) -> Node:
|
||||
def make_xref(
|
||||
self,
|
||||
rolename: str,
|
||||
domain: str,
|
||||
target: str,
|
||||
innernode: type[TextlikeNode] = addnodes.literal_emphasis,
|
||||
contnode: Node | None = None,
|
||||
env: BuildEnvironment | None = None,
|
||||
inliner: Inliner | None = None,
|
||||
location: Element | None = None,
|
||||
) -> Node:
|
||||
# note: for backwards compatibility env is last, but not optional
|
||||
assert env is not None
|
||||
assert (inliner is None) == (location is None), (inliner, location)
|
||||
@ -84,11 +92,18 @@ class Field:
|
||||
role = env.get_domain(domain).role(rolename)
|
||||
if role is None or inliner is None:
|
||||
if role is None and inliner is not None:
|
||||
msg = __("Problem in %s domain: field is supposed "
|
||||
"to use role '%s', but that role is not in the domain.")
|
||||
msg = __(
|
||||
'Problem in %s domain: field is supposed '
|
||||
"to use role '%s', but that role is not in the domain."
|
||||
)
|
||||
logger.warning(__(msg), domain, rolename, location=location)
|
||||
refnode = addnodes.pending_xref('', refdomain=domain, refexplicit=False,
|
||||
reftype=rolename, reftarget=target)
|
||||
refnode = addnodes.pending_xref(
|
||||
'',
|
||||
refdomain=domain,
|
||||
refexplicit=False,
|
||||
reftype=rolename,
|
||||
reftarget=target,
|
||||
)
|
||||
refnode += contnode or innernode(target, target) # type: ignore[call-arg]
|
||||
env.get_domain(domain).process_field_xref(refnode)
|
||||
return refnode
|
||||
@ -99,13 +114,22 @@ class Field:
|
||||
ns, messages = role(rolename, target, target, lineno, inliner, {}, [])
|
||||
return nodes.inline(target, '', *ns)
|
||||
|
||||
def make_xrefs(self, rolename: str, domain: str, target: str,
|
||||
innernode: type[TextlikeNode] = addnodes.literal_emphasis,
|
||||
contnode: Node | None = None, env: BuildEnvironment | None = None,
|
||||
inliner: Inliner | None = None, location: Element | None = None,
|
||||
) -> list[Node]:
|
||||
return [self.make_xref(rolename, domain, target, innernode, contnode,
|
||||
env, inliner, location)]
|
||||
def make_xrefs(
|
||||
self,
|
||||
rolename: str,
|
||||
domain: str,
|
||||
target: str,
|
||||
innernode: type[TextlikeNode] = addnodes.literal_emphasis,
|
||||
contnode: Node | None = None,
|
||||
env: BuildEnvironment | None = None,
|
||||
inliner: Inliner | None = None,
|
||||
location: Element | None = None,
|
||||
) -> list[Node]:
|
||||
return [
|
||||
self.make_xref(
|
||||
rolename, domain, target, innernode, contnode, env, inliner, location
|
||||
)
|
||||
]
|
||||
|
||||
def make_entry(self, fieldarg: str, content: list[Node]) -> tuple[str, list[Node]]:
|
||||
return (fieldarg, content)
|
||||
@ -123,17 +147,35 @@ class Field:
|
||||
fieldname = nodes.field_name('', self.label)
|
||||
if fieldarg:
|
||||
fieldname += nodes.Text(' ')
|
||||
fieldname.extend(self.make_xrefs(self.rolename, domain,
|
||||
fieldarg, nodes.Text,
|
||||
env=env, inliner=inliner, location=location))
|
||||
fieldname.extend(
|
||||
self.make_xrefs(
|
||||
self.rolename,
|
||||
domain,
|
||||
fieldarg,
|
||||
nodes.Text,
|
||||
env=env,
|
||||
inliner=inliner,
|
||||
location=location,
|
||||
)
|
||||
)
|
||||
|
||||
if len(content) == 1 and (
|
||||
isinstance(content[0], nodes.Text) or
|
||||
(isinstance(content[0], nodes.inline) and len(content[0]) == 1 and
|
||||
isinstance(content[0][0], nodes.Text))):
|
||||
content = self.make_xrefs(self.bodyrolename, domain,
|
||||
content[0].astext(), contnode=content[0],
|
||||
env=env, inliner=inliner, location=location)
|
||||
isinstance(content[0], nodes.Text)
|
||||
or (
|
||||
isinstance(content[0], nodes.inline)
|
||||
and len(content[0]) == 1
|
||||
and isinstance(content[0][0], nodes.Text)
|
||||
)
|
||||
):
|
||||
content = self.make_xrefs(
|
||||
self.bodyrolename,
|
||||
domain,
|
||||
content[0].astext(),
|
||||
contnode=content[0],
|
||||
env=env,
|
||||
inliner=inliner,
|
||||
location=location,
|
||||
)
|
||||
fieldbody = nodes.field_body('', nodes.paragraph('', '', *content))
|
||||
return nodes.field('', fieldname, fieldbody)
|
||||
|
||||
@ -155,8 +197,14 @@ class GroupedField(Field):
|
||||
is_grouped = True
|
||||
list_type = nodes.bullet_list
|
||||
|
||||
def __init__(self, name: str, names: tuple[str, ...] = (), label: str = '',
|
||||
rolename: str = '', can_collapse: bool = False) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
names: tuple[str, ...] = (),
|
||||
label: str = '',
|
||||
rolename: str = '',
|
||||
can_collapse: bool = False,
|
||||
) -> None:
|
||||
super().__init__(name, names, label, True, rolename)
|
||||
self.can_collapse = can_collapse
|
||||
|
||||
@ -173,9 +221,17 @@ class GroupedField(Field):
|
||||
listnode = self.list_type()
|
||||
for fieldarg, content in items:
|
||||
par = nodes.paragraph()
|
||||
par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
|
||||
addnodes.literal_strong,
|
||||
env=env, inliner=inliner, location=location))
|
||||
par.extend(
|
||||
self.make_xrefs(
|
||||
self.rolename,
|
||||
domain,
|
||||
fieldarg,
|
||||
addnodes.literal_strong,
|
||||
env=env,
|
||||
inliner=inliner,
|
||||
location=location,
|
||||
)
|
||||
)
|
||||
par += nodes.Text(' -- ')
|
||||
par += content
|
||||
listnode += nodes.list_item('', par)
|
||||
@ -236,8 +292,11 @@ class TypedField(GroupedField):
|
||||
) -> nodes.field:
|
||||
def handle_item(fieldarg: str, content: list[Node]) -> nodes.paragraph:
|
||||
par = nodes.paragraph()
|
||||
par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
|
||||
addnodes.literal_strong, env=env))
|
||||
par.extend(
|
||||
self.make_xrefs(
|
||||
self.rolename, domain, fieldarg, addnodes.literal_strong, env=env
|
||||
)
|
||||
)
|
||||
if fieldarg in types:
|
||||
par += nodes.Text(' (')
|
||||
# NOTE: using .pop() here to prevent a single type node to be
|
||||
@ -246,9 +305,17 @@ class TypedField(GroupedField):
|
||||
fieldtype = types.pop(fieldarg)
|
||||
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
|
||||
typename = fieldtype[0].astext()
|
||||
par.extend(self.make_xrefs(self.typerolename, domain, typename,
|
||||
addnodes.literal_emphasis, env=env,
|
||||
inliner=inliner, location=location))
|
||||
par.extend(
|
||||
self.make_xrefs(
|
||||
self.typerolename,
|
||||
domain,
|
||||
typename,
|
||||
addnodes.literal_emphasis,
|
||||
env=env,
|
||||
inliner=inliner,
|
||||
location=location,
|
||||
)
|
||||
)
|
||||
else:
|
||||
par += fieldtype
|
||||
par += nodes.Text(')')
|
||||
@ -329,8 +396,13 @@ class DocFieldTransformer:
|
||||
entries.append(field)
|
||||
|
||||
# but if this has a type then we can at least link it
|
||||
if (typedesc and is_typefield and content and
|
||||
len(content) == 1 and isinstance(content[0], nodes.Text)):
|
||||
if (
|
||||
typedesc
|
||||
and is_typefield
|
||||
and content
|
||||
and len(content) == 1
|
||||
and isinstance(content[0], nodes.Text)
|
||||
):
|
||||
typed_field = cast(TypedField, typedesc)
|
||||
target = content[0].astext()
|
||||
xrefs = typed_field.make_xrefs(
|
||||
@ -356,7 +428,9 @@ class DocFieldTransformer:
|
||||
if is_typefield:
|
||||
# filter out only inline nodes; others will result in invalid
|
||||
# markup being written out
|
||||
content = [n for n in content if isinstance(n, nodes.Inline | nodes.Text)]
|
||||
content = [
|
||||
n for n in content if isinstance(n, nodes.Inline | nodes.Text)
|
||||
]
|
||||
if content:
|
||||
types.setdefault(typename, {})[fieldarg] = content
|
||||
continue
|
||||
@ -368,12 +442,10 @@ class DocFieldTransformer:
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
types.setdefault(typename, {})[argname] = \
|
||||
[nodes.Text(argtype)]
|
||||
types.setdefault(typename, {})[argname] = [nodes.Text(argtype)]
|
||||
fieldarg = argname
|
||||
|
||||
translatable_content = nodes.inline(field_body.rawsource,
|
||||
translatable=True)
|
||||
translatable_content = nodes.inline(field_body.rawsource, translatable=True)
|
||||
translatable_content.document = field_body.parent.document
|
||||
translatable_content.source = field_body.parent.source
|
||||
translatable_content.line = field_body.parent.line
|
||||
@ -383,7 +455,9 @@ class DocFieldTransformer:
|
||||
# get one entry per field
|
||||
if typedesc.is_grouped:
|
||||
if typename in groupindices:
|
||||
group = cast(tuple[Field, list, Node], entries[groupindices[typename]])
|
||||
group = cast(
|
||||
tuple[Field, list, Node], entries[groupindices[typename]]
|
||||
)
|
||||
else:
|
||||
groupindices[typename] = len(entries)
|
||||
group = (typedesc, [], field)
|
||||
@ -406,7 +480,13 @@ class DocFieldTransformer:
|
||||
env = self.directive.state.document.settings.env
|
||||
inliner = self.directive.state.inliner
|
||||
domain = self.directive.domain or ''
|
||||
new_list += fieldtype.make_field(fieldtypes, domain, items,
|
||||
env=env, inliner=inliner, location=location)
|
||||
new_list += fieldtype.make_field(
|
||||
fieldtypes,
|
||||
domain,
|
||||
items,
|
||||
env=env,
|
||||
inliner=inliner,
|
||||
location=location,
|
||||
)
|
||||
|
||||
node.replace_self(new_list)
|
||||
|
@ -29,7 +29,7 @@ def separate_metadata(s: str | None) -> tuple[str | None, dict[str, str]]:
|
||||
field_name = matched.group()[1:].split(':', 1)[0]
|
||||
if field_name.startswith('meta '):
|
||||
name = field_name[5:].strip()
|
||||
metadata[name] = line[matched.end():].strip()
|
||||
metadata[name] = line[matched.end() :].strip()
|
||||
else:
|
||||
lines.append(line)
|
||||
else:
|
||||
|
@ -24,7 +24,9 @@ from sphinx.util import logging
|
||||
from sphinx.util.parsing import nested_parse_to_nodes
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
report_re = re.compile('^(.+?:(?:\\d+)?): \\((DEBUG|INFO|WARNING|ERROR|SEVERE)/(\\d+)?\\) ')
|
||||
report_re = re.compile(
|
||||
'^(.+?:(?:\\d+)?): \\((DEBUG|INFO|WARNING|ERROR|SEVERE)/(\\d+)?\\) '
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable, Iterator # NoQA: TCH003
|
||||
@ -114,8 +116,8 @@ def unregister_node(node: type[Element]) -> None:
|
||||
This is inverse of ``nodes._add_nodes_class_names()``.
|
||||
"""
|
||||
if hasattr(nodes.GenericNodeVisitor, 'visit_' + node.__name__):
|
||||
delattr(nodes.GenericNodeVisitor, "visit_" + node.__name__)
|
||||
delattr(nodes.GenericNodeVisitor, "depart_" + node.__name__)
|
||||
delattr(nodes.GenericNodeVisitor, 'visit_' + node.__name__)
|
||||
delattr(nodes.GenericNodeVisitor, 'depart_' + node.__name__)
|
||||
delattr(nodes.SparseNodeVisitor, 'visit_' + node.__name__)
|
||||
delattr(nodes.SparseNodeVisitor, 'depart_' + node.__name__)
|
||||
|
||||
@ -129,7 +131,9 @@ def patched_get_language() -> Iterator[None]:
|
||||
"""
|
||||
from docutils.languages import get_language
|
||||
|
||||
def patched_get_language(language_code: str, reporter: Reporter | None = None) -> Any:
|
||||
def patched_get_language(
|
||||
language_code: str, reporter: Reporter | None = None
|
||||
) -> Any:
|
||||
return get_language(language_code)
|
||||
|
||||
try:
|
||||
@ -153,7 +157,9 @@ def patched_rst_get_language() -> Iterator[None]:
|
||||
"""
|
||||
from docutils.parsers.rst.languages import get_language
|
||||
|
||||
def patched_get_language(language_code: str, reporter: Reporter | None = None) -> Any:
|
||||
def patched_get_language(
|
||||
language_code: str, reporter: Reporter | None = None
|
||||
) -> Any:
|
||||
return get_language(language_code)
|
||||
|
||||
try:
|
||||
@ -170,7 +176,9 @@ def using_user_docutils_conf(confdir: str | None) -> Iterator[None]:
|
||||
try:
|
||||
docutilsconfig = os.environ.get('DOCUTILSCONFIG', None)
|
||||
if confdir:
|
||||
os.environ['DOCUTILSCONFIG'] = path.join(path.abspath(confdir), 'docutils.conf')
|
||||
os.environ['DOCUTILSCONFIG'] = path.join(
|
||||
path.abspath(confdir), 'docutils.conf'
|
||||
)
|
||||
|
||||
yield
|
||||
finally:
|
||||
@ -183,9 +191,11 @@ def using_user_docutils_conf(confdir: str | None) -> Iterator[None]:
|
||||
@contextmanager
|
||||
def patch_docutils(confdir: str | None = None) -> Iterator[None]:
|
||||
"""Patch to docutils temporarily."""
|
||||
with patched_get_language(), \
|
||||
patched_rst_get_language(), \
|
||||
using_user_docutils_conf(confdir):
|
||||
with (
|
||||
patched_get_language(),
|
||||
patched_rst_get_language(),
|
||||
using_user_docutils_conf(confdir),
|
||||
):
|
||||
yield
|
||||
|
||||
|
||||
@ -204,7 +214,7 @@ class CustomReSTDispatcher:
|
||||
self.enable()
|
||||
|
||||
def __exit__(
|
||||
self, exc_type: type[Exception], exc_value: Exception, traceback: Any,
|
||||
self, exc_type: type[Exception], exc_value: Exception, traceback: Any
|
||||
) -> None:
|
||||
self.disable()
|
||||
|
||||
@ -219,16 +229,27 @@ class CustomReSTDispatcher:
|
||||
directives.directive = self.directive_func
|
||||
roles.role = self.role_func
|
||||
|
||||
def directive(self,
|
||||
directive_name: str, language_module: ModuleType, document: nodes.document,
|
||||
) -> tuple[type[Directive] | None, list[system_message]]:
|
||||
def directive(
|
||||
self,
|
||||
directive_name: str,
|
||||
language_module: ModuleType,
|
||||
document: nodes.document,
|
||||
) -> tuple[type[Directive] | None, list[system_message]]:
|
||||
return self.directive_func(directive_name, language_module, document)
|
||||
|
||||
def role(
|
||||
self, role_name: str, language_module: ModuleType, lineno: int, reporter: Reporter,
|
||||
self,
|
||||
role_name: str,
|
||||
language_module: ModuleType,
|
||||
lineno: int,
|
||||
reporter: Reporter,
|
||||
) -> tuple[RoleFunction, list[system_message]]:
|
||||
return self.role_func(role_name, language_module, # type: ignore[return-value]
|
||||
lineno, reporter)
|
||||
return self.role_func(
|
||||
role_name,
|
||||
language_module, # type: ignore[return-value]
|
||||
lineno,
|
||||
reporter,
|
||||
)
|
||||
|
||||
|
||||
class ElementLookupError(Exception):
|
||||
@ -258,7 +279,9 @@ class sphinx_domains(CustomReSTDispatcher):
|
||||
if element is not None:
|
||||
return element, []
|
||||
else:
|
||||
logger.warning(_('unknown directive or role name: %s:%s'), domain_name, name)
|
||||
logger.warning(
|
||||
_('unknown directive or role name: %s:%s'), domain_name, name
|
||||
)
|
||||
# else look in the default domain
|
||||
else:
|
||||
def_domain = self.env.temp_data.get('default_domain')
|
||||
@ -274,16 +297,23 @@ class sphinx_domains(CustomReSTDispatcher):
|
||||
|
||||
raise ElementLookupError
|
||||
|
||||
def directive(self,
|
||||
directive_name: str, language_module: ModuleType, document: nodes.document,
|
||||
) -> tuple[type[Directive] | None, list[system_message]]:
|
||||
def directive(
|
||||
self,
|
||||
directive_name: str,
|
||||
language_module: ModuleType,
|
||||
document: nodes.document,
|
||||
) -> tuple[type[Directive] | None, list[system_message]]:
|
||||
try:
|
||||
return self.lookup_domain_element('directive', directive_name)
|
||||
except ElementLookupError:
|
||||
return super().directive(directive_name, language_module, document)
|
||||
|
||||
def role(
|
||||
self, role_name: str, language_module: ModuleType, lineno: int, reporter: Reporter,
|
||||
self,
|
||||
role_name: str,
|
||||
language_module: ModuleType,
|
||||
lineno: int,
|
||||
reporter: Reporter,
|
||||
) -> tuple[RoleFunction, list[system_message]]:
|
||||
try:
|
||||
return self.lookup_domain_element('role', role_name)
|
||||
@ -295,26 +325,39 @@ class WarningStream:
|
||||
def write(self, text: str) -> None:
|
||||
matched = report_re.search(text)
|
||||
if not matched:
|
||||
logger.warning(text.rstrip("\r\n"), type="docutils")
|
||||
logger.warning(text.rstrip('\r\n'), type='docutils')
|
||||
else:
|
||||
location, type, level = matched.groups()
|
||||
message = report_re.sub('', text).rstrip()
|
||||
logger.log(type, message, location=location, type="docutils")
|
||||
logger.log(type, message, location=location, type='docutils')
|
||||
|
||||
|
||||
class LoggingReporter(Reporter):
|
||||
@classmethod
|
||||
def from_reporter(cls: type[LoggingReporter], reporter: Reporter) -> LoggingReporter:
|
||||
def from_reporter(
|
||||
cls: type[LoggingReporter], reporter: Reporter
|
||||
) -> LoggingReporter:
|
||||
"""Create an instance of LoggingReporter from other reporter object."""
|
||||
return cls(reporter.source, reporter.report_level, reporter.halt_level,
|
||||
reporter.debug_flag, reporter.error_handler)
|
||||
return cls(
|
||||
reporter.source,
|
||||
reporter.report_level,
|
||||
reporter.halt_level,
|
||||
reporter.debug_flag,
|
||||
reporter.error_handler,
|
||||
)
|
||||
|
||||
def __init__(self, source: str, report_level: int = Reporter.WARNING_LEVEL,
|
||||
halt_level: int = Reporter.SEVERE_LEVEL, debug: bool = False,
|
||||
error_handler: str = 'backslashreplace') -> None:
|
||||
def __init__(
|
||||
self,
|
||||
source: str,
|
||||
report_level: int = Reporter.WARNING_LEVEL,
|
||||
halt_level: int = Reporter.SEVERE_LEVEL,
|
||||
debug: bool = False,
|
||||
error_handler: str = 'backslashreplace',
|
||||
) -> None:
|
||||
stream = cast(IO, WarningStream())
|
||||
super().__init__(source, report_level, halt_level,
|
||||
stream, debug, error_handler=error_handler)
|
||||
super().__init__(
|
||||
source, report_level, halt_level, stream, debug, error_handler=error_handler
|
||||
)
|
||||
|
||||
|
||||
class NullReporter(Reporter):
|
||||
@ -351,8 +394,13 @@ class SphinxFileOutput(FileOutput):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def write(self, data: str) -> str:
|
||||
if (self.destination_path and self.autoclose and 'b' not in self.mode and
|
||||
self.overwrite_if_changed and os.path.exists(self.destination_path)):
|
||||
if (
|
||||
self.destination_path
|
||||
and self.autoclose
|
||||
and 'b' not in self.mode
|
||||
and self.overwrite_if_changed
|
||||
and os.path.exists(self.destination_path)
|
||||
):
|
||||
with open(self.destination_path, encoding=self.encoding) as f:
|
||||
# skip writing: content not changed
|
||||
if f.read() == data:
|
||||
@ -416,7 +464,9 @@ class SphinxDirective(Directive):
|
||||
return f'<unknown>:{line}'
|
||||
return ''
|
||||
|
||||
def parse_content_to_nodes(self, allow_section_headings: bool = False) -> list[Node]:
|
||||
def parse_content_to_nodes(
|
||||
self, allow_section_headings: bool = False
|
||||
) -> list[Node]:
|
||||
"""Parse the directive's content into nodes.
|
||||
|
||||
:param allow_section_headings:
|
||||
@ -437,7 +487,12 @@ class SphinxDirective(Directive):
|
||||
)
|
||||
|
||||
def parse_text_to_nodes(
|
||||
self, text: str = '', /, *, offset: int = -1, allow_section_headings: bool = False,
|
||||
self,
|
||||
text: str = '',
|
||||
/,
|
||||
*,
|
||||
offset: int = -1,
|
||||
allow_section_headings: bool = False,
|
||||
) -> list[Node]:
|
||||
"""Parse *text* into nodes.
|
||||
|
||||
@ -465,7 +520,7 @@ class SphinxDirective(Directive):
|
||||
)
|
||||
|
||||
def parse_inline(
|
||||
self, text: str, *, lineno: int = -1,
|
||||
self, text: str, *, lineno: int = -1
|
||||
) -> tuple[list[Node], list[system_message]]:
|
||||
"""Parse *text* as inline elements.
|
||||
|
||||
@ -496,6 +551,7 @@ class SphinxRole:
|
||||
This class is strongly coupled with Sphinx.
|
||||
"""
|
||||
|
||||
# fmt: off
|
||||
name: str #: The role name actually used in the document.
|
||||
rawtext: str #: A string containing the entire interpreted text input.
|
||||
text: str #: The interpreted text content.
|
||||
@ -507,10 +563,18 @@ class SphinxRole:
|
||||
#: A list of strings, the directive content for customisation
|
||||
#: (from the "role" directive).
|
||||
content: Sequence[str]
|
||||
# fmt: on
|
||||
|
||||
def __call__(self, name: str, rawtext: str, text: str, lineno: int,
|
||||
inliner: Inliner, options: dict | None = None, content: Sequence[str] = (),
|
||||
) -> tuple[list[Node], list[system_message]]:
|
||||
def __call__(
|
||||
self,
|
||||
name: str,
|
||||
rawtext: str,
|
||||
text: str,
|
||||
lineno: int,
|
||||
inliner: Inliner,
|
||||
options: dict | None = None,
|
||||
content: Sequence[str] = (),
|
||||
) -> tuple[list[Node], list[system_message]]:
|
||||
self.rawtext = rawtext
|
||||
self.text = unescape(text)
|
||||
self.lineno = lineno
|
||||
@ -585,17 +649,26 @@ class ReferenceRole(SphinxRole):
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
|
||||
# fmt: off
|
||||
has_explicit_title: bool #: A boolean indicates the role has explicit title or not.
|
||||
disabled: bool #: A boolean indicates the reference is disabled.
|
||||
title: str #: The link title for the interpreted text.
|
||||
target: str #: The link target for the interpreted text.
|
||||
# fmt: on
|
||||
|
||||
# \x00 means the "<" was backslash-escaped
|
||||
explicit_title_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
|
||||
|
||||
def __call__(self, name: str, rawtext: str, text: str, lineno: int,
|
||||
inliner: Inliner, options: dict | None = None, content: Sequence[str] = (),
|
||||
) -> tuple[list[Node], list[system_message]]:
|
||||
def __call__(
|
||||
self,
|
||||
name: str,
|
||||
rawtext: str,
|
||||
text: str,
|
||||
lineno: int,
|
||||
inliner: Inliner,
|
||||
options: dict | None = None,
|
||||
content: Sequence[str] = (),
|
||||
) -> tuple[list[Node], list[system_message]]:
|
||||
if options is None:
|
||||
options = {}
|
||||
|
||||
@ -698,6 +771,7 @@ def new_document(source_path: str, settings: Any = None) -> nodes.document:
|
||||
|
||||
# Create a new instance of nodes.document using cached reporter
|
||||
from sphinx import addnodes
|
||||
|
||||
document = addnodes.document(settings, reporter, source=source_path)
|
||||
document.note_source(source_path, -1)
|
||||
return document
|
||||
|
@ -31,12 +31,18 @@ def save_traceback(app: Sphinx | None, exc: BaseException) -> str:
|
||||
last_msgs = exts_list = ''
|
||||
else:
|
||||
extensions = app.extensions.values()
|
||||
last_msgs = '\n'.join(f'# {strip_escape_sequences(s).strip()}'
|
||||
for s in app.messagelog)
|
||||
exts_list = '\n'.join(f'# {ext.name} ({ext.version})' for ext in extensions
|
||||
if ext.version != 'builtin')
|
||||
last_msgs = '\n'.join(
|
||||
f'# {strip_escape_sequences(s).strip()}' for s in app.messagelog
|
||||
)
|
||||
exts_list = '\n'.join(
|
||||
f'# {ext.name} ({ext.version})'
|
||||
for ext in extensions
|
||||
if ext.version != 'builtin'
|
||||
)
|
||||
|
||||
with NamedTemporaryFile('w', suffix='.log', prefix='sphinx-err-', delete=False) as f:
|
||||
with NamedTemporaryFile(
|
||||
'w', suffix='.log', prefix='sphinx-err-', delete=False
|
||||
) as f:
|
||||
f.write(f"""\
|
||||
# Platform: {sys.platform}; ({platform.platform()})
|
||||
# Sphinx version: {sphinx.__display_version__}
|
||||
|
@ -35,11 +35,14 @@ def _template_basename(filename: str | os.PathLike[str]) -> str | None:
|
||||
return None
|
||||
|
||||
|
||||
def copy_asset_file(source: str | os.PathLike[str], destination: str | os.PathLike[str],
|
||||
context: dict[str, Any] | None = None,
|
||||
renderer: BaseRenderer | None = None,
|
||||
*,
|
||||
force: bool = False) -> None:
|
||||
def copy_asset_file(
|
||||
source: str | os.PathLike[str],
|
||||
destination: str | os.PathLike[str],
|
||||
context: dict[str, Any] | None = None,
|
||||
renderer: BaseRenderer | None = None,
|
||||
*,
|
||||
force: bool = False,
|
||||
) -> None:
|
||||
"""Copy an asset file to destination.
|
||||
|
||||
On copying, it expands the template variables if context argument is given and
|
||||
@ -62,38 +65,51 @@ def copy_asset_file(source: str | os.PathLike[str], destination: str | os.PathLi
|
||||
if _template_basename(source) and context is not None:
|
||||
if renderer is None:
|
||||
from sphinx.util.template import SphinxRenderer
|
||||
|
||||
renderer = SphinxRenderer()
|
||||
|
||||
with open(source, encoding='utf-8') as fsrc:
|
||||
template_content = fsrc.read()
|
||||
rendered_template = renderer.render_string(template_content, context)
|
||||
|
||||
if (
|
||||
not force
|
||||
and destination.exists()
|
||||
and template_content != rendered_template
|
||||
):
|
||||
msg = __('Aborted attempted copy from rendered template %s to %s '
|
||||
'(the destination path has existing data).')
|
||||
logger.warning(msg, os.fsdecode(source), os.fsdecode(destination),
|
||||
type='misc', subtype='copy_overwrite')
|
||||
if not force and destination.exists() and template_content != rendered_template:
|
||||
msg = __(
|
||||
'Aborted attempted copy from rendered template %s to %s '
|
||||
'(the destination path has existing data).'
|
||||
)
|
||||
logger.warning(
|
||||
msg,
|
||||
os.fsdecode(source),
|
||||
os.fsdecode(destination),
|
||||
type='misc',
|
||||
subtype='copy_overwrite',
|
||||
)
|
||||
return
|
||||
|
||||
destination = _template_basename(destination) or destination
|
||||
with open(destination, 'w', encoding='utf-8') as fdst:
|
||||
msg = __('Writing evaluated template result to %s')
|
||||
logger.info(msg, os.fsdecode(destination), type='misc',
|
||||
subtype='template_evaluation')
|
||||
logger.info(
|
||||
msg,
|
||||
os.fsdecode(destination),
|
||||
type='misc',
|
||||
subtype='template_evaluation',
|
||||
)
|
||||
fdst.write(rendered_template)
|
||||
else:
|
||||
copyfile(source, destination, force=force)
|
||||
|
||||
|
||||
def copy_asset(source: str | os.PathLike[str], destination: str | os.PathLike[str],
|
||||
excluded: PathMatcher = lambda path: False,
|
||||
context: dict[str, Any] | None = None, renderer: BaseRenderer | None = None,
|
||||
onerror: Callable[[str, Exception], None] | None = None,
|
||||
*, force: bool = False) -> None:
|
||||
def copy_asset(
|
||||
source: str | os.PathLike[str],
|
||||
destination: str | os.PathLike[str],
|
||||
excluded: PathMatcher = lambda path: False,
|
||||
context: dict[str, Any] | None = None,
|
||||
renderer: BaseRenderer | None = None,
|
||||
onerror: Callable[[str, Exception], None] | None = None,
|
||||
*,
|
||||
force: bool = False,
|
||||
) -> None:
|
||||
"""Copy asset files to destination recursively.
|
||||
|
||||
On copying, it expands the template variables if context argument is given and
|
||||
@ -114,14 +130,14 @@ def copy_asset(source: str | os.PathLike[str], destination: str | os.PathLike[st
|
||||
|
||||
if renderer is None:
|
||||
from sphinx.util.template import SphinxRenderer
|
||||
|
||||
renderer = SphinxRenderer()
|
||||
|
||||
ensuredir(destination)
|
||||
if os.path.isfile(source):
|
||||
copy_asset_file(source, destination,
|
||||
context=context,
|
||||
renderer=renderer,
|
||||
force=force)
|
||||
copy_asset_file(
|
||||
source, destination, context=context, renderer=renderer, force=force
|
||||
)
|
||||
return
|
||||
|
||||
for root, dirs, files in os.walk(source, followlinks=True):
|
||||
@ -135,11 +151,13 @@ def copy_asset(source: str | os.PathLike[str], destination: str | os.PathLike[st
|
||||
for filename in files:
|
||||
if not excluded(posixpath.join(reldir, filename)):
|
||||
try:
|
||||
copy_asset_file(posixpath.join(root, filename),
|
||||
posixpath.join(destination, reldir),
|
||||
context=context,
|
||||
renderer=renderer,
|
||||
force=force)
|
||||
copy_asset_file(
|
||||
posixpath.join(root, filename),
|
||||
posixpath.join(destination, reldir),
|
||||
context=context,
|
||||
renderer=renderer,
|
||||
force=force,
|
||||
)
|
||||
except Exception as exc:
|
||||
if onerror:
|
||||
onerror(posixpath.join(root, filename), exc)
|
||||
|
@ -12,7 +12,7 @@ from sphinx.deprecation import RemovedInSphinx90Warning
|
||||
_WEEKDAY_NAME = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
|
||||
_MONTH_NAME = ('', # Placeholder for indexing purposes
|
||||
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
|
||||
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
|
||||
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec') # fmt: skip
|
||||
_GMT_OFFSET = float(time.localtime().tm_gmtoff)
|
||||
|
||||
|
||||
@ -29,18 +29,20 @@ def rfc1123_to_epoch(rfc1123: str) -> float:
|
||||
t = parsedate_tz(rfc1123)
|
||||
if t is None:
|
||||
raise ValueError
|
||||
if not rfc1123.endswith(" GMT"):
|
||||
if not rfc1123.endswith(' GMT'):
|
||||
warnings.warn(
|
||||
"HTTP-date string does not meet RFC 7231 requirements "
|
||||
'HTTP-date string does not meet RFC 7231 requirements '
|
||||
f"(must end with 'GMT'): {rfc1123!r}",
|
||||
RemovedInSphinx90Warning, stacklevel=3,
|
||||
RemovedInSphinx90Warning,
|
||||
stacklevel=3,
|
||||
)
|
||||
epoch_secs = time.mktime(time.struct_time(t[:9])) + _GMT_OFFSET
|
||||
if (gmt_offset := t[9]) != 0:
|
||||
warnings.warn(
|
||||
"HTTP-date string does not meet RFC 7231 requirements "
|
||||
f"(must be GMT time): {rfc1123!r}",
|
||||
RemovedInSphinx90Warning, stacklevel=3,
|
||||
'HTTP-date string does not meet RFC 7231 requirements '
|
||||
f'(must be GMT time): {rfc1123!r}',
|
||||
RemovedInSphinx90Warning,
|
||||
stacklevel=3,
|
||||
)
|
||||
return epoch_secs - (gmt_offset or 0)
|
||||
return epoch_secs
|
||||
|
@ -75,7 +75,6 @@ class LocaleFileInfoBase(NamedTuple):
|
||||
|
||||
|
||||
class CatalogInfo(LocaleFileInfoBase):
|
||||
|
||||
@property
|
||||
def po_file(self) -> str:
|
||||
return self.domain + '.po'
|
||||
@ -94,8 +93,9 @@ class CatalogInfo(LocaleFileInfoBase):
|
||||
|
||||
def is_outdated(self) -> bool:
|
||||
return (
|
||||
not path.exists(self.mo_path) or
|
||||
_last_modified_time(self.mo_path) < _last_modified_time(self.po_path))
|
||||
not path.exists(self.mo_path)
|
||||
or _last_modified_time(self.mo_path) < _last_modified_time(self.po_path)
|
||||
) # fmt: skip
|
||||
|
||||
def write_mo(self, locale: str, use_fuzzy: bool = False) -> None:
|
||||
with open(self.po_path, encoding=self.charset) as file_po:
|
||||
@ -115,8 +115,13 @@ class CatalogInfo(LocaleFileInfoBase):
|
||||
class CatalogRepository:
|
||||
"""A repository for message catalogs."""
|
||||
|
||||
def __init__(self, basedir: str | os.PathLike[str], locale_dirs: list[str],
|
||||
language: str, encoding: str) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
basedir: str | os.PathLike[str],
|
||||
locale_dirs: list[str],
|
||||
language: str,
|
||||
encoding: str,
|
||||
) -> None:
|
||||
self.basedir = basedir
|
||||
self._locale_dirs = locale_dirs
|
||||
self.language = language
|
||||
@ -205,13 +210,17 @@ date_format_mappings = {
|
||||
'%z': 'ZZZ', # UTC offset in the form ±HHMM[SS[.ffffff]]
|
||||
# (empty string if the object is naive).
|
||||
'%%': '%',
|
||||
}
|
||||
} # fmt: skip
|
||||
|
||||
date_format_re = re.compile('(%s)' % '|'.join(date_format_mappings))
|
||||
|
||||
|
||||
def babel_format_date(date: datetime, format: str, locale: str,
|
||||
formatter: Formatter = babel.dates.format_date) -> str:
|
||||
def babel_format_date(
|
||||
date: datetime,
|
||||
format: str,
|
||||
locale: str,
|
||||
formatter: Formatter = babel.dates.format_date,
|
||||
) -> str:
|
||||
# Check if we have the tzinfo attribute. If not we cannot do any time
|
||||
# related formats.
|
||||
if not hasattr(date, 'tzinfo'):
|
||||
@ -223,8 +232,13 @@ def babel_format_date(date: datetime, format: str, locale: str,
|
||||
# fallback to English
|
||||
return formatter(date, format, locale='en')
|
||||
except AttributeError:
|
||||
logger.warning(__('Invalid date format. Quote the string by single quote '
|
||||
'if you want to output it directly: %s'), format)
|
||||
logger.warning(
|
||||
__(
|
||||
'Invalid date format. Quote the string by single quote '
|
||||
'if you want to output it directly: %s'
|
||||
),
|
||||
format,
|
||||
)
|
||||
return format
|
||||
|
||||
|
||||
@ -267,12 +281,15 @@ def format_date(
|
||||
else:
|
||||
function = babel.dates.format_datetime
|
||||
|
||||
result.append(babel_format_date(date, babel_format, locale=language,
|
||||
formatter=function))
|
||||
result.append(
|
||||
babel_format_date(
|
||||
date, babel_format, locale=language, formatter=function
|
||||
)
|
||||
)
|
||||
else:
|
||||
result.append(token)
|
||||
|
||||
return "".join(result)
|
||||
return ''.join(result)
|
||||
|
||||
|
||||
def get_image_filename_for_language(
|
||||
|
@ -13,6 +13,7 @@ if TYPE_CHECKING:
|
||||
|
||||
try:
|
||||
from PIL import Image
|
||||
|
||||
PILLOW_AVAILABLE = True
|
||||
except ImportError:
|
||||
PILLOW_AVAILABLE = False
|
||||
@ -54,13 +55,13 @@ def get_image_size(filename: str) -> tuple[int, int] | None:
|
||||
|
||||
|
||||
@overload
|
||||
def guess_mimetype(filename: PathLike[str] | str, default: str) -> str:
|
||||
...
|
||||
def guess_mimetype(filename: PathLike[str] | str, default: str) -> str: ... # NoQA: E704
|
||||
|
||||
|
||||
@overload
|
||||
def guess_mimetype(filename: PathLike[str] | str, default: None = None) -> str | None:
|
||||
...
|
||||
def guess_mimetype( # NoQA: E704
|
||||
filename: PathLike[str] | str, default: None = None
|
||||
) -> str | None: ...
|
||||
|
||||
|
||||
def guess_mimetype(
|
||||
@ -121,12 +122,12 @@ def _image_type_from_file(filename: PathLike[str] | str) -> str:
|
||||
|
||||
# JPEG data
|
||||
# https://en.wikipedia.org/wiki/JPEG_File_Interchange_Format#File_format_structure
|
||||
if header.startswith(b'\xFF\xD8'):
|
||||
if header.startswith(b'\xff\xd8'):
|
||||
return 'jpeg'
|
||||
|
||||
# Portable Network Graphics
|
||||
# https://en.wikipedia.org/wiki/PNG#File_header
|
||||
if header.startswith(b'\x89PNG\r\n\x1A\n'):
|
||||
if header.startswith(b'\x89PNG\r\n\x1a\n'):
|
||||
return 'png'
|
||||
|
||||
# Scalable Vector Graphics
|
||||
|
@ -52,11 +52,7 @@ if TYPE_CHECKING:
|
||||
| types.MethodDescriptorType
|
||||
| types.ClassMethodDescriptorType
|
||||
)
|
||||
_SignatureType: TypeAlias = (
|
||||
Callable[..., Any]
|
||||
| staticmethod
|
||||
| classmethod
|
||||
)
|
||||
_SignatureType: TypeAlias = Callable[..., Any] | staticmethod | classmethod
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -266,7 +262,8 @@ def isstaticmethod(
|
||||
def isdescriptor(x: Any) -> TypeIs[_SupportsGet | _SupportsSet | _SupportsDelete]:
|
||||
"""Check if the object is a :external+python:term:`descriptor`."""
|
||||
return any(
|
||||
callable(safe_getattr(x, item, None)) for item in ('__get__', '__set__', '__delete__')
|
||||
callable(safe_getattr(x, item, None))
|
||||
for item in ('__get__', '__set__', '__delete__')
|
||||
)
|
||||
|
||||
|
||||
@ -429,7 +426,10 @@ def object_description(obj: Any, *, _seen: frozenset[int] = frozenset()) -> str:
|
||||
sorted_keys = sorted(obj, key=lambda k: object_description(k, _seen=seen))
|
||||
|
||||
items = (
|
||||
(object_description(key, _seen=seen), object_description(obj[key], _seen=seen))
|
||||
(
|
||||
object_description(key, _seen=seen),
|
||||
object_description(obj[key], _seen=seen),
|
||||
)
|
||||
for key in sorted_keys
|
||||
)
|
||||
return '{%s}' % ', '.join(f'{key}: {value}' for (key, value) in items)
|
||||
@ -442,7 +442,9 @@ def object_description(obj: Any, *, _seen: frozenset[int] = frozenset()) -> str:
|
||||
except TypeError:
|
||||
# Cannot sort set values, fall back to using descriptions as a sort key
|
||||
sorted_values = sorted(obj, key=lambda x: object_description(x, _seen=seen))
|
||||
return '{%s}' % ', '.join(object_description(x, _seen=seen) for x in sorted_values)
|
||||
return '{%s}' % ', '.join(
|
||||
object_description(x, _seen=seen) for x in sorted_values
|
||||
)
|
||||
elif isinstance(obj, frozenset):
|
||||
if id(obj) in seen:
|
||||
return 'frozenset(...)'
|
||||
@ -760,7 +762,10 @@ def stringify_signature(
|
||||
args = []
|
||||
last_kind = None
|
||||
for param in sig.parameters.values():
|
||||
if param.kind != Parameter.POSITIONAL_ONLY and last_kind == Parameter.POSITIONAL_ONLY:
|
||||
if (
|
||||
param.kind != Parameter.POSITIONAL_ONLY
|
||||
and last_kind == Parameter.POSITIONAL_ONLY
|
||||
):
|
||||
# PEP-570: Separator for Positional Only Parameter: /
|
||||
args.append('/')
|
||||
if param.kind == Parameter.KEYWORD_ONLY and last_kind in (
|
||||
@ -797,7 +802,11 @@ def stringify_signature(
|
||||
args.append('/')
|
||||
|
||||
concatenated_args = ', '.join(args)
|
||||
if sig.return_annotation is EMPTY or not show_annotation or not show_return_annotation:
|
||||
if (
|
||||
sig.return_annotation is EMPTY
|
||||
or not show_annotation
|
||||
or not show_return_annotation
|
||||
):
|
||||
return f'({concatenated_args})'
|
||||
else:
|
||||
retann = stringify_annotation(sig.return_annotation, mode) # type: ignore[arg-type]
|
||||
@ -842,11 +851,15 @@ def signature_from_ast(node: ast.FunctionDef, code: str = '') -> Signature:
|
||||
|
||||
# normal arguments
|
||||
for arg, defexpr in zip(args.args, defaults[pos_only_offset:], strict=False):
|
||||
params.append(_define(Parameter.POSITIONAL_OR_KEYWORD, arg, code, defexpr=defexpr))
|
||||
params.append(
|
||||
_define(Parameter.POSITIONAL_OR_KEYWORD, arg, code, defexpr=defexpr)
|
||||
)
|
||||
|
||||
# variadic positional argument (no possible default expression)
|
||||
if args.vararg:
|
||||
params.append(_define(Parameter.VAR_POSITIONAL, args.vararg, code, defexpr=None))
|
||||
params.append(
|
||||
_define(Parameter.VAR_POSITIONAL, args.vararg, code, defexpr=None)
|
||||
)
|
||||
|
||||
# keyword-only arguments
|
||||
for arg, defexpr in zip(args.kwonlyargs, args.kw_defaults, strict=False):
|
||||
|
@ -1,4 +1,5 @@
|
||||
"""Inventory utility functions for Sphinx."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
@ -41,7 +42,7 @@ class InventoryFileReader:
|
||||
pos = self.buffer.find(b'\n')
|
||||
if pos != -1:
|
||||
line = self.buffer[:pos].decode()
|
||||
self.buffer = self.buffer[pos + 1:]
|
||||
self.buffer = self.buffer[pos + 1 :]
|
||||
elif self.eof:
|
||||
line = self.buffer.decode()
|
||||
self.buffer = b''
|
||||
@ -72,7 +73,7 @@ class InventoryFileReader:
|
||||
pos = buf.find(b'\n')
|
||||
while pos != -1:
|
||||
yield buf[:pos].decode()
|
||||
buf = buf[pos + 1:]
|
||||
buf = buf[pos + 1 :]
|
||||
pos = buf.find(b'\n')
|
||||
|
||||
|
||||
@ -135,8 +136,11 @@ class InventoryFile:
|
||||
|
||||
for line in stream.read_compressed_lines():
|
||||
# be careful to handle names with embedded spaces correctly
|
||||
m = re.match(r'(.+?)\s+(\S+)\s+(-?\d+)\s+?(\S*)\s+(.*)',
|
||||
line.rstrip(), flags=re.VERBOSE)
|
||||
m = re.match(
|
||||
r'(.+?)\s+(\S+)\s+(-?\d+)\s+?(\S*)\s+(.*)',
|
||||
line.rstrip(),
|
||||
flags=re.VERBOSE,
|
||||
)
|
||||
if not m:
|
||||
continue
|
||||
name, type, prio, location, dispname = m.groups()
|
||||
@ -155,15 +159,20 @@ class InventoryFile:
|
||||
# Some types require case insensitive matches:
|
||||
# * 'term': https://github.com/sphinx-doc/sphinx/issues/9291
|
||||
# * 'label': https://github.com/sphinx-doc/sphinx/issues/12008
|
||||
definition = f"{type}:{name}"
|
||||
definition = f'{type}:{name}'
|
||||
content = prio, location, dispname
|
||||
lowercase_definition = definition.lower()
|
||||
if lowercase_definition in potential_ambiguities:
|
||||
if potential_ambiguities[lowercase_definition] != content:
|
||||
actual_ambiguities.add(definition)
|
||||
else:
|
||||
logger.debug(__("inventory <%s> contains duplicate definitions of %s"),
|
||||
uri, definition, type='intersphinx', subtype='external')
|
||||
logger.debug(
|
||||
__('inventory <%s> contains duplicate definitions of %s'),
|
||||
uri,
|
||||
definition,
|
||||
type='intersphinx',
|
||||
subtype='external',
|
||||
)
|
||||
else:
|
||||
potential_ambiguities[lowercase_definition] = content
|
||||
if location.endswith('$'):
|
||||
@ -172,25 +181,35 @@ class InventoryFile:
|
||||
inv_item: InventoryItem = projname, version, location, dispname
|
||||
invdata.setdefault(type, {})[name] = inv_item
|
||||
for ambiguity in actual_ambiguities:
|
||||
logger.info(__("inventory <%s> contains multiple definitions for %s"),
|
||||
uri, ambiguity, type='intersphinx', subtype='external')
|
||||
logger.info(
|
||||
__('inventory <%s> contains multiple definitions for %s'),
|
||||
uri,
|
||||
ambiguity,
|
||||
type='intersphinx',
|
||||
subtype='external',
|
||||
)
|
||||
return invdata
|
||||
|
||||
@classmethod
|
||||
def dump(
|
||||
cls: type[InventoryFile], filename: str, env: BuildEnvironment, builder: Builder,
|
||||
cls: type[InventoryFile],
|
||||
filename: str,
|
||||
env: BuildEnvironment,
|
||||
builder: Builder,
|
||||
) -> None:
|
||||
def escape(string: str) -> str:
|
||||
return re.sub("\\s+", " ", string)
|
||||
return re.sub('\\s+', ' ', string)
|
||||
|
||||
with open(os.path.join(filename), 'wb') as f:
|
||||
# header
|
||||
f.write(('# Sphinx inventory version 2\n'
|
||||
'# Project: %s\n'
|
||||
'# Version: %s\n'
|
||||
'# The remainder of this file is compressed using zlib.\n' %
|
||||
(escape(env.config.project),
|
||||
escape(env.config.version))).encode())
|
||||
f.write(
|
||||
(
|
||||
'# Sphinx inventory version 2\n'
|
||||
f'# Project: {escape(env.config.project)}\n'
|
||||
f'# Version: {escape(env.config.version)}\n'
|
||||
'# The remainder of this file is compressed using zlib.\n'
|
||||
).encode()
|
||||
)
|
||||
|
||||
# body
|
||||
compressor = zlib.compressobj(9)
|
||||
@ -205,7 +224,6 @@ class InventoryFile:
|
||||
uri += '#' + anchor
|
||||
if dispname == fullname:
|
||||
dispname = '-'
|
||||
entry = ('%s %s:%s %s %s %s\n' %
|
||||
(fullname, domain.name, type, prio, uri, dispname))
|
||||
entry = f'{fullname} {domain.name}:{type} {prio} {uri} {dispname}\n'
|
||||
f.write(compressor.compress(entry.encode()))
|
||||
f.write(compressor.flush())
|
||||
|
@ -27,27 +27,36 @@ if TYPE_CHECKING:
|
||||
NAMESPACE = 'sphinx'
|
||||
VERBOSE = 15
|
||||
|
||||
LEVEL_NAMES: defaultdict[str, int] = defaultdict(lambda: logging.WARNING, {
|
||||
'CRITICAL': logging.CRITICAL,
|
||||
'SEVERE': logging.CRITICAL,
|
||||
'ERROR': logging.ERROR,
|
||||
'WARNING': logging.WARNING,
|
||||
'INFO': logging.INFO,
|
||||
'VERBOSE': VERBOSE,
|
||||
'DEBUG': logging.DEBUG,
|
||||
})
|
||||
LEVEL_NAMES: defaultdict[str, int] = defaultdict(
|
||||
lambda: logging.WARNING,
|
||||
{
|
||||
'CRITICAL': logging.CRITICAL,
|
||||
'SEVERE': logging.CRITICAL,
|
||||
'ERROR': logging.ERROR,
|
||||
'WARNING': logging.WARNING,
|
||||
'INFO': logging.INFO,
|
||||
'VERBOSE': VERBOSE,
|
||||
'DEBUG': logging.DEBUG,
|
||||
},
|
||||
)
|
||||
|
||||
VERBOSITY_MAP: defaultdict[int, int] = defaultdict(lambda: logging.NOTSET, {
|
||||
0: logging.INFO,
|
||||
1: VERBOSE,
|
||||
2: logging.DEBUG,
|
||||
})
|
||||
VERBOSITY_MAP: defaultdict[int, int] = defaultdict(
|
||||
lambda: logging.NOTSET,
|
||||
{
|
||||
0: logging.INFO,
|
||||
1: VERBOSE,
|
||||
2: logging.DEBUG,
|
||||
},
|
||||
)
|
||||
|
||||
COLOR_MAP: defaultdict[int, str] = defaultdict(lambda: 'blue', {
|
||||
logging.ERROR: 'darkred',
|
||||
logging.WARNING: 'red',
|
||||
logging.DEBUG: 'darkgray',
|
||||
})
|
||||
COLOR_MAP: defaultdict[int, str] = defaultdict(
|
||||
lambda: 'blue',
|
||||
{
|
||||
logging.ERROR: 'darkred',
|
||||
logging.WARNING: 'red',
|
||||
logging.DEBUG: 'darkgray',
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def getLogger(name: str) -> SphinxLoggerAdapter:
|
||||
@ -126,7 +135,7 @@ class SphinxLoggerAdapter(logging.LoggerAdapter):
|
||||
KEYWORDS = ['type', 'subtype', 'location', 'nonl', 'color', 'once']
|
||||
|
||||
def log( # type: ignore[override]
|
||||
self, level: int | str, msg: str, *args: Any, **kwargs: Any,
|
||||
self, level: int | str, msg: str, *args: Any, **kwargs: Any
|
||||
) -> None:
|
||||
if isinstance(level, int):
|
||||
super().log(level, msg, *args, **kwargs)
|
||||
@ -400,14 +409,14 @@ class _RaiseOnWarningFilter(logging.Filter):
|
||||
except (TypeError, ValueError):
|
||||
message = record.msg # use record.msg itself
|
||||
if location := getattr(record, 'location', ''):
|
||||
message = f"{location}:{message}"
|
||||
message = f'{location}:{message}'
|
||||
if record.exc_info is not None:
|
||||
raise SphinxWarning(message) from record.exc_info[1]
|
||||
raise SphinxWarning(message)
|
||||
|
||||
|
||||
def is_suppressed_warning(
|
||||
warning_type: str, sub_type: str, suppress_warnings: Set[str] | Sequence[str],
|
||||
warning_type: str, sub_type: str, suppress_warnings: Set[str] | Sequence[str]
|
||||
) -> bool:
|
||||
"""Check whether the warning is suppressed or not."""
|
||||
if warning_type is None or len(suppress_warnings) == 0:
|
||||
@ -546,11 +555,11 @@ class WarningLogRecordTranslator(SphinxLogRecordTranslator):
|
||||
def get_node_location(node: Node) -> str | None:
|
||||
source, line = get_source_line(node)
|
||||
if source and line:
|
||||
return f"{abspath(source)}:{line}"
|
||||
return f'{abspath(source)}:{line}'
|
||||
if source:
|
||||
return f"{abspath(source)}:"
|
||||
return f'{abspath(source)}:'
|
||||
if line:
|
||||
return f"<unknown>:{line}"
|
||||
return f'<unknown>:{line}'
|
||||
return None
|
||||
|
||||
|
||||
@ -580,7 +589,9 @@ class SafeEncodingWriter:
|
||||
except UnicodeEncodeError:
|
||||
# stream accept only str, not bytes. So, we encode and replace
|
||||
# non-encodable characters, then decode them.
|
||||
self.stream.write(data.encode(self.encoding, 'replace').decode(self.encoding))
|
||||
self.stream.write(
|
||||
data.encode(self.encoding, 'replace').decode(self.encoding)
|
||||
)
|
||||
|
||||
def flush(self) -> None:
|
||||
if hasattr(self.stream, 'flush'):
|
||||
|
@ -112,7 +112,7 @@ def patfilter(names: Iterable[str], pat: str) -> list[str]:
|
||||
|
||||
def get_matching_files(
|
||||
dirname: str | os.PathLike[str],
|
||||
include_patterns: Iterable[str] = ("**",),
|
||||
include_patterns: Iterable[str] = ('**',),
|
||||
exclude_patterns: Iterable[str] = (),
|
||||
) -> Iterator[str]:
|
||||
"""Get all file names in a directory, recursively.
|
||||
@ -132,8 +132,8 @@ def get_matching_files(
|
||||
|
||||
for root, dirs, files in os.walk(dirname, followlinks=True):
|
||||
relative_root = os.path.relpath(root, dirname)
|
||||
if relative_root == ".":
|
||||
relative_root = "" # suppress dirname for files on the target dir
|
||||
if relative_root == '.':
|
||||
relative_root = '' # suppress dirname for files on the target dir
|
||||
|
||||
# Filter files
|
||||
included_files = []
|
||||
|
@ -14,7 +14,7 @@ def get_node_equation_number(writer: HTML5Translator, node: nodes.math_block) ->
|
||||
if writer.builder.config.math_numfig and writer.builder.config.numfig:
|
||||
figtype = 'displaymath'
|
||||
if writer.builder.name == 'singlehtml':
|
||||
key = f"{writer.docnames[-1]}/{figtype}" # type: ignore[has-type]
|
||||
key = f'{writer.docnames[-1]}/{figtype}' # type: ignore[has-type]
|
||||
else:
|
||||
key = figtype
|
||||
|
||||
|
@ -35,7 +35,7 @@ explicit_title_re = re.compile(r'^(.+?)\s*(?<!\x00)<([^<]*?)>$', re.DOTALL)
|
||||
caption_ref_re = explicit_title_re # b/w compat alias
|
||||
|
||||
|
||||
N = TypeVar("N", bound=Node)
|
||||
N = TypeVar('N', bound=Node)
|
||||
|
||||
|
||||
class NodeMatcher(Generic[N]):
|
||||
@ -135,8 +135,11 @@ def apply_source_workaround(node: Element) -> None:
|
||||
# * rawsource of term node will have: ``term text : classifier1 : classifier2``
|
||||
# * rawsource of classifier node will be None
|
||||
if isinstance(node, nodes.classifier) and not node.rawsource:
|
||||
logger.debug('[i18n] PATCH: %r to have source, line and rawsource: %s',
|
||||
get_full_module_name(node), repr_domxml(node))
|
||||
logger.debug(
|
||||
'[i18n] PATCH: %r to have source, line and rawsource: %s',
|
||||
get_full_module_name(node),
|
||||
repr_domxml(node),
|
||||
)
|
||||
definition_list_item = node.parent
|
||||
node.source = definition_list_item.source
|
||||
node.line = definition_list_item.line - 1 # type: ignore[operator]
|
||||
@ -145,24 +148,37 @@ def apply_source_workaround(node: Element) -> None:
|
||||
# docutils-0.15 fills in rawsource attribute, but not in source.
|
||||
node.source = node.parent.source
|
||||
if isinstance(node, nodes.image) and node.source is None:
|
||||
logger.debug('[i18n] PATCH: %r to have source, line: %s',
|
||||
get_full_module_name(node), repr_domxml(node))
|
||||
logger.debug(
|
||||
'[i18n] PATCH: %r to have source, line: %s',
|
||||
get_full_module_name(node),
|
||||
repr_domxml(node),
|
||||
)
|
||||
node.source, node.line = node.parent.source, node.parent.line
|
||||
if isinstance(node, nodes.title) and node.source is None:
|
||||
logger.debug('[i18n] PATCH: %r to have source: %s',
|
||||
get_full_module_name(node), repr_domxml(node))
|
||||
logger.debug(
|
||||
'[i18n] PATCH: %r to have source: %s',
|
||||
get_full_module_name(node),
|
||||
repr_domxml(node),
|
||||
)
|
||||
node.source, node.line = node.parent.source, node.parent.line
|
||||
if isinstance(node, nodes.term):
|
||||
logger.debug('[i18n] PATCH: %r to have rawsource: %s',
|
||||
get_full_module_name(node), repr_domxml(node))
|
||||
logger.debug(
|
||||
'[i18n] PATCH: %r to have rawsource: %s',
|
||||
get_full_module_name(node),
|
||||
repr_domxml(node),
|
||||
)
|
||||
# strip classifier from rawsource of term
|
||||
for classifier in reversed(list(node.parent.findall(nodes.classifier))):
|
||||
node.rawsource = re.sub(r'\s*:\s*%s' % re.escape(classifier.astext()),
|
||||
'', node.rawsource)
|
||||
node.rawsource = re.sub(
|
||||
r'\s*:\s*%s' % re.escape(classifier.astext()), '', node.rawsource
|
||||
)
|
||||
if isinstance(node, nodes.topic) and node.source is None:
|
||||
# docutils-0.18 does not fill the source attribute of topic
|
||||
logger.debug('[i18n] PATCH: %r to have source, line: %s',
|
||||
get_full_module_name(node), repr_domxml(node))
|
||||
logger.debug(
|
||||
'[i18n] PATCH: %r to have source, line: %s',
|
||||
get_full_module_name(node),
|
||||
repr_domxml(node),
|
||||
)
|
||||
node.source, node.line = node.parent.source, node.parent.line
|
||||
|
||||
# workaround: literal_block under bullet list (#4913)
|
||||
@ -178,14 +194,20 @@ def apply_source_workaround(node: Element) -> None:
|
||||
return
|
||||
|
||||
# workaround: some docutils nodes doesn't have source, line.
|
||||
if isinstance(node, (
|
||||
nodes.rubric # #1305 rubric directive
|
||||
| nodes.line # #1477 line node
|
||||
| nodes.image # #3093 image directive in substitution
|
||||
| nodes.field_name # #3335 field list syntax
|
||||
)):
|
||||
logger.debug('[i18n] PATCH: %r to have source and line: %s',
|
||||
get_full_module_name(node), repr_domxml(node))
|
||||
if isinstance(
|
||||
node,
|
||||
(
|
||||
nodes.rubric # #1305 rubric directive
|
||||
| nodes.line # #1477 line node
|
||||
| nodes.image # #3093 image directive in substitution
|
||||
| nodes.field_name # #3335 field list syntax
|
||||
),
|
||||
):
|
||||
logger.debug(
|
||||
'[i18n] PATCH: %r to have source and line: %s',
|
||||
get_full_module_name(node),
|
||||
repr_domxml(node),
|
||||
)
|
||||
try:
|
||||
node.source = get_node_source(node)
|
||||
except ValueError:
|
||||
@ -217,24 +239,36 @@ def is_translatable(node: Node) -> bool:
|
||||
|
||||
if isinstance(node, nodes.TextElement):
|
||||
if not node.source:
|
||||
logger.debug('[i18n] SKIP %r because no node.source: %s',
|
||||
get_full_module_name(node), repr_domxml(node))
|
||||
logger.debug(
|
||||
'[i18n] SKIP %r because no node.source: %s',
|
||||
get_full_module_name(node),
|
||||
repr_domxml(node),
|
||||
)
|
||||
return False # built-in message
|
||||
if isinstance(node, IGNORED_NODES) and 'translatable' not in node:
|
||||
logger.debug("[i18n] SKIP %r because node is in IGNORED_NODES "
|
||||
"and no node['translatable']: %s",
|
||||
get_full_module_name(node), repr_domxml(node))
|
||||
logger.debug(
|
||||
'[i18n] SKIP %r because node is in IGNORED_NODES '
|
||||
"and no node['translatable']: %s",
|
||||
get_full_module_name(node),
|
||||
repr_domxml(node),
|
||||
)
|
||||
return False
|
||||
if not node.get('translatable', True):
|
||||
# not(node['translatable'] == True or node['translatable'] is None)
|
||||
logger.debug("[i18n] SKIP %r because not node['translatable']: %s",
|
||||
get_full_module_name(node), repr_domxml(node))
|
||||
logger.debug(
|
||||
"[i18n] SKIP %r because not node['translatable']: %s",
|
||||
get_full_module_name(node),
|
||||
repr_domxml(node),
|
||||
)
|
||||
return False
|
||||
# <field_name>orphan</field_name>
|
||||
# XXX ignore all metadata (== docinfo)
|
||||
if isinstance(node, nodes.field_name) and (node.children[0] == 'orphan'):
|
||||
logger.debug('[i18n] SKIP %r because orphan node: %s',
|
||||
get_full_module_name(node), repr_domxml(node))
|
||||
logger.debug(
|
||||
'[i18n] SKIP %r because orphan node: %s',
|
||||
get_full_module_name(node),
|
||||
repr_domxml(node),
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
@ -249,7 +283,7 @@ LITERAL_TYPE_NODES = (
|
||||
)
|
||||
IMAGE_TYPE_NODES = (
|
||||
nodes.image,
|
||||
)
|
||||
) # fmt: skip
|
||||
|
||||
|
||||
def extract_messages(doctree: Element) -> Iterable[tuple[Element, str]]:
|
||||
@ -272,7 +306,7 @@ def extract_messages(doctree: Element) -> Iterable[tuple[Element, str]]:
|
||||
else:
|
||||
msg = ''
|
||||
elif isinstance(node, nodes.meta):
|
||||
msg = node["content"]
|
||||
msg = node['content']
|
||||
else:
|
||||
msg = node.rawsource.replace('\n', ' ').strip() # type: ignore[attr-defined]
|
||||
|
||||
@ -325,8 +359,9 @@ def traverse_translatable_index(
|
||||
yield node, entries
|
||||
|
||||
|
||||
def nested_parse_with_titles(state: RSTState, content: StringList, node: Node,
|
||||
content_offset: int = 0) -> str:
|
||||
def nested_parse_with_titles(
|
||||
state: RSTState, content: StringList, node: Node, content_offset: int = 0
|
||||
) -> str:
|
||||
"""Version of state.nested_parse() that allows titles and does not require
|
||||
titles to have the same decoration as the calling document.
|
||||
|
||||
@ -359,13 +394,13 @@ def split_explicit_title(text: str) -> tuple[bool, str, str]:
|
||||
return False, text, text
|
||||
|
||||
|
||||
indextypes = [
|
||||
'single', 'pair', 'double', 'triple', 'see', 'seealso',
|
||||
]
|
||||
indextypes = ['single', 'pair', 'double', 'triple', 'see', 'seealso']
|
||||
|
||||
|
||||
def process_index_entry(entry: str, targetid: str,
|
||||
) -> list[tuple[str, str, str, str, str | None]]:
|
||||
def process_index_entry(
|
||||
entry: str,
|
||||
targetid: str,
|
||||
) -> list[tuple[str, str, str, str, str | None]]:
|
||||
from sphinx.domains.python import pairindextypes
|
||||
|
||||
indexentries: list[tuple[str, str, str, str, str | None]] = []
|
||||
@ -377,18 +412,25 @@ def process_index_entry(entry: str, targetid: str,
|
||||
entry = entry[1:].lstrip()
|
||||
for index_type in pairindextypes:
|
||||
if entry.startswith(f'{index_type}:'):
|
||||
value = entry[len(index_type) + 1:].strip()
|
||||
value = entry[len(index_type) + 1 :].strip()
|
||||
value = f'{pairindextypes[index_type]}; {value}'
|
||||
# xref RemovedInSphinx90Warning
|
||||
logger.warning(__('%r is deprecated for index entries (from entry %r). '
|
||||
"Use 'pair: %s' instead."),
|
||||
index_type, entry, value, type='index')
|
||||
logger.warning(
|
||||
__(
|
||||
'%r is deprecated for index entries (from entry %r). '
|
||||
"Use 'pair: %s' instead."
|
||||
),
|
||||
index_type,
|
||||
entry,
|
||||
value,
|
||||
type='index',
|
||||
)
|
||||
indexentries.append(('pair', value, targetid, main, None))
|
||||
break
|
||||
else:
|
||||
for index_type in indextypes:
|
||||
if entry.startswith(f'{index_type}:'):
|
||||
value = entry[len(index_type) + 1:].strip()
|
||||
value = entry[len(index_type) + 1 :].strip()
|
||||
if index_type == 'double':
|
||||
index_type = 'pair'
|
||||
indexentries.append((index_type, value, targetid, main, None))
|
||||
@ -430,13 +472,22 @@ def inline_all_toctrees(
|
||||
try:
|
||||
traversed.append(includefile)
|
||||
logger.info(indent + colorfunc(includefile))
|
||||
subtree = inline_all_toctrees(builder, docnameset, includefile,
|
||||
builder.env.get_doctree(includefile),
|
||||
colorfunc, traversed, indent)
|
||||
subtree = inline_all_toctrees(
|
||||
builder,
|
||||
docnameset,
|
||||
includefile,
|
||||
builder.env.get_doctree(includefile),
|
||||
colorfunc,
|
||||
traversed,
|
||||
indent,
|
||||
)
|
||||
docnameset.add(includefile)
|
||||
except Exception:
|
||||
logger.warning(__('toctree contains ref to nonexisting file %r'),
|
||||
includefile, location=docname)
|
||||
logger.warning(
|
||||
__('toctree contains ref to nonexisting file %r'),
|
||||
includefile,
|
||||
location=docname,
|
||||
)
|
||||
else:
|
||||
sof = addnodes.start_of_file(docname=includefile)
|
||||
sof.children = subtree.children
|
||||
@ -478,57 +529,61 @@ def _make_id(string: str) -> str:
|
||||
_non_id_chars = re.compile('[^a-zA-Z0-9._]+')
|
||||
_non_id_at_ends = re.compile('^[-0-9._]+|-+$')
|
||||
_non_id_translate = {
|
||||
0x00f8: 'o', # o with stroke
|
||||
0x0111: 'd', # d with stroke
|
||||
0x0127: 'h', # h with stroke
|
||||
0x0131: 'i', # dotless i
|
||||
0x0142: 'l', # l with stroke
|
||||
0x0167: 't', # t with stroke
|
||||
0x0180: 'b', # b with stroke
|
||||
0x0183: 'b', # b with topbar
|
||||
0x0188: 'c', # c with hook
|
||||
0x018c: 'd', # d with topbar
|
||||
0x0192: 'f', # f with hook
|
||||
0x0199: 'k', # k with hook
|
||||
0x019a: 'l', # l with bar
|
||||
0x019e: 'n', # n with long right leg
|
||||
0x01a5: 'p', # p with hook
|
||||
0x01ab: 't', # t with palatal hook
|
||||
0x01ad: 't', # t with hook
|
||||
0x01b4: 'y', # y with hook
|
||||
0x01b6: 'z', # z with stroke
|
||||
0x01e5: 'g', # g with stroke
|
||||
0x0225: 'z', # z with hook
|
||||
0x0234: 'l', # l with curl
|
||||
0x0235: 'n', # n with curl
|
||||
0x0236: 't', # t with curl
|
||||
0x0237: 'j', # dotless j
|
||||
0x023c: 'c', # c with stroke
|
||||
0x023f: 's', # s with swash tail
|
||||
0x0240: 'z', # z with swash tail
|
||||
0x0247: 'e', # e with stroke
|
||||
0x0249: 'j', # j with stroke
|
||||
0x024b: 'q', # q with hook tail
|
||||
0x024d: 'r', # r with stroke
|
||||
0x024f: 'y', # y with stroke
|
||||
0x00F8: 'o', # o with stroke
|
||||
0x0111: 'd', # d with stroke
|
||||
0x0127: 'h', # h with stroke
|
||||
0x0131: 'i', # dotless i
|
||||
0x0142: 'l', # l with stroke
|
||||
0x0167: 't', # t with stroke
|
||||
0x0180: 'b', # b with stroke
|
||||
0x0183: 'b', # b with topbar
|
||||
0x0188: 'c', # c with hook
|
||||
0x018C: 'd', # d with topbar
|
||||
0x0192: 'f', # f with hook
|
||||
0x0199: 'k', # k with hook
|
||||
0x019A: 'l', # l with bar
|
||||
0x019E: 'n', # n with long right leg
|
||||
0x01A5: 'p', # p with hook
|
||||
0x01AB: 't', # t with palatal hook
|
||||
0x01AD: 't', # t with hook
|
||||
0x01B4: 'y', # y with hook
|
||||
0x01B6: 'z', # z with stroke
|
||||
0x01E5: 'g', # g with stroke
|
||||
0x0225: 'z', # z with hook
|
||||
0x0234: 'l', # l with curl
|
||||
0x0235: 'n', # n with curl
|
||||
0x0236: 't', # t with curl
|
||||
0x0237: 'j', # dotless j
|
||||
0x023C: 'c', # c with stroke
|
||||
0x023F: 's', # s with swash tail
|
||||
0x0240: 'z', # z with swash tail
|
||||
0x0247: 'e', # e with stroke
|
||||
0x0249: 'j', # j with stroke
|
||||
0x024B: 'q', # q with hook tail
|
||||
0x024D: 'r', # r with stroke
|
||||
0x024F: 'y', # y with stroke
|
||||
}
|
||||
_non_id_translate_digraphs = {
|
||||
0x00df: 'sz', # ligature sz
|
||||
0x00e6: 'ae', # ae
|
||||
0x0153: 'oe', # ligature oe
|
||||
0x0238: 'db', # db digraph
|
||||
0x0239: 'qp', # qp digraph
|
||||
0x00DF: 'sz', # ligature sz
|
||||
0x00E6: 'ae', # ae
|
||||
0x0153: 'oe', # ligature oe
|
||||
0x0238: 'db', # db digraph
|
||||
0x0239: 'qp', # qp digraph
|
||||
}
|
||||
|
||||
|
||||
def make_id(env: BuildEnvironment, document: nodes.document,
|
||||
prefix: str = '', term: str | None = None) -> str:
|
||||
def make_id(
|
||||
env: BuildEnvironment,
|
||||
document: nodes.document,
|
||||
prefix: str = '',
|
||||
term: str | None = None,
|
||||
) -> str:
|
||||
"""Generate an appropriate node_id for given *prefix* and *term*."""
|
||||
node_id = None
|
||||
if prefix:
|
||||
idformat = prefix + "-%s"
|
||||
idformat = prefix + '-%s'
|
||||
else:
|
||||
idformat = (document.settings.id_prefix or "id") + "%s"
|
||||
idformat = (document.settings.id_prefix or 'id') + '%s'
|
||||
|
||||
# try to generate node_id by *term*
|
||||
if prefix and term:
|
||||
@ -547,27 +602,36 @@ def make_id(env: BuildEnvironment, document: nodes.document,
|
||||
return node_id
|
||||
|
||||
|
||||
def find_pending_xref_condition(node: addnodes.pending_xref, condition: str,
|
||||
) -> Element | None:
|
||||
def find_pending_xref_condition(
|
||||
node: addnodes.pending_xref, condition: str
|
||||
) -> Element | None:
|
||||
"""Pick matched pending_xref_condition node up from the pending_xref."""
|
||||
for subnode in node:
|
||||
if (isinstance(subnode, addnodes.pending_xref_condition) and
|
||||
subnode.get('condition') == condition):
|
||||
if (
|
||||
isinstance(subnode, addnodes.pending_xref_condition)
|
||||
and subnode.get('condition') == condition
|
||||
):
|
||||
return subnode
|
||||
return None
|
||||
|
||||
|
||||
def make_refnode(builder: Builder, fromdocname: str, todocname: str, targetid: str | None,
|
||||
child: Node | list[Node], title: str | None = None,
|
||||
) -> nodes.reference:
|
||||
def make_refnode(
|
||||
builder: Builder,
|
||||
fromdocname: str,
|
||||
todocname: str,
|
||||
targetid: str | None,
|
||||
child: Node | list[Node],
|
||||
title: str | None = None,
|
||||
) -> nodes.reference:
|
||||
"""Shortcut to create a reference node."""
|
||||
node = nodes.reference('', '', internal=True)
|
||||
if fromdocname == todocname and targetid:
|
||||
node['refid'] = targetid
|
||||
else:
|
||||
if targetid:
|
||||
node['refuri'] = (builder.get_relative_uri(fromdocname, todocname) +
|
||||
'#' + targetid)
|
||||
node['refuri'] = (
|
||||
builder.get_relative_uri(fromdocname, todocname) + '#' + targetid
|
||||
)
|
||||
else:
|
||||
node['refuri'] = builder.get_relative_uri(fromdocname, todocname)
|
||||
if title:
|
||||
@ -577,8 +641,9 @@ def make_refnode(builder: Builder, fromdocname: str, todocname: str, targetid: s
|
||||
|
||||
|
||||
def set_source_info(directive: Directive, node: Node) -> None:
|
||||
node.source, node.line = \
|
||||
directive.state_machine.get_source_and_line(directive.lineno)
|
||||
node.source, node.line = directive.state_machine.get_source_and_line(
|
||||
directive.lineno
|
||||
)
|
||||
|
||||
|
||||
def set_role_source_info(inliner: Inliner, lineno: int, node: Node) -> None:
|
||||
@ -635,7 +700,8 @@ def _only_node_keep_children(node: addnodes.only, tags: Tags) -> bool:
|
||||
logger.warning(
|
||||
__('exception while evaluating only directive expression: %s'),
|
||||
err,
|
||||
location=node)
|
||||
location=node,
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
@ -651,10 +717,10 @@ def _copy_except__document(el: Element) -> Element:
|
||||
newnode.rawsource = el.rawsource
|
||||
newnode.tagname = el.tagname
|
||||
# copied in Element.copy()
|
||||
newnode.attributes = {k: (v
|
||||
if k not in {'ids', 'classes', 'names', 'dupnames', 'backrefs'}
|
||||
else v[:])
|
||||
for k, v in el.attributes.items()}
|
||||
newnode.attributes = {
|
||||
k: (v if k not in {'ids', 'classes', 'names', 'dupnames', 'backrefs'} else v[:])
|
||||
for k, v in el.attributes.items()
|
||||
}
|
||||
newnode.line = el.line
|
||||
newnode.source = el.source
|
||||
return newnode
|
||||
|
@ -25,7 +25,7 @@ if TYPE_CHECKING:
|
||||
# Define SEP as a manifest constant, not so much because we expect it to change
|
||||
# in the future as to avoid the suspicion that a stray "/" in the code is a
|
||||
# hangover from more *nix-oriented origins.
|
||||
SEP = "/"
|
||||
SEP = '/'
|
||||
|
||||
|
||||
def os_path(canonical_path: str, /) -> str:
|
||||
@ -115,21 +115,23 @@ def copyfile(
|
||||
raise FileNotFoundError(msg)
|
||||
|
||||
if (
|
||||
not (dest_exists := dest.exists()) or
|
||||
not (dest_exists := dest.exists())
|
||||
# comparison must be done using shallow=False since
|
||||
# two different files might have the same size
|
||||
not filecmp.cmp(source, dest, shallow=False)
|
||||
or not filecmp.cmp(source, dest, shallow=False)
|
||||
):
|
||||
if not force and dest_exists:
|
||||
# sphinx.util.logging imports sphinx.util.osutil,
|
||||
# so use a local import to avoid circular imports
|
||||
from sphinx.util import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
msg = __('Aborted attempted copy from %s to %s '
|
||||
'(the destination path has existing data).')
|
||||
logger.warning(msg, source, dest,
|
||||
type='misc', subtype='copy_overwrite')
|
||||
msg = __(
|
||||
'Aborted attempted copy from %s to %s '
|
||||
'(the destination path has existing data).'
|
||||
)
|
||||
logger.warning(msg, source, dest, type='misc', subtype='copy_overwrite')
|
||||
return
|
||||
|
||||
shutil.copyfile(source, dest)
|
||||
@ -149,8 +151,9 @@ def make_filename_from_project(project: str) -> str:
|
||||
return make_filename(project.removesuffix(' Documentation')).lower()
|
||||
|
||||
|
||||
def relpath(path: str | os.PathLike[str],
|
||||
start: str | os.PathLike[str] | None = os.curdir) -> str:
|
||||
def relpath(
|
||||
path: str | os.PathLike[str], start: str | os.PathLike[str] | None = os.curdir
|
||||
) -> str:
|
||||
"""Return a relative filepath to *path* either from the current directory or
|
||||
from an optional *start* directory.
|
||||
|
||||
@ -241,7 +244,7 @@ class FileAvoidWrite:
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self, exc_type: type[Exception], exc_value: Exception, traceback: Any,
|
||||
self, exc_type: type[Exception], exc_value: Exception, traceback: Any
|
||||
) -> bool:
|
||||
self.close()
|
||||
return True
|
||||
|
@ -10,6 +10,7 @@ from typing import TYPE_CHECKING, Any
|
||||
|
||||
try:
|
||||
import multiprocessing
|
||||
|
||||
HAS_MULTIPROCESSING = True
|
||||
except ImportError:
|
||||
HAS_MULTIPROCESSING = False
|
||||
@ -33,7 +34,7 @@ class SerialTasks:
|
||||
pass
|
||||
|
||||
def add_task(
|
||||
self, task_func: Callable, arg: Any = None, result_func: Callable | None = None,
|
||||
self, task_func: Callable, arg: Any = None, result_func: Callable | None = None
|
||||
) -> None:
|
||||
if arg is not None:
|
||||
res = task_func(arg)
|
||||
@ -83,7 +84,7 @@ class ParallelTasks:
|
||||
pipe.send((failed, collector.logs, ret))
|
||||
|
||||
def add_task(
|
||||
self, task_func: Callable, arg: Any = None, result_func: Callable | None = None,
|
||||
self, task_func: Callable, arg: Any = None, result_func: Callable | None = None
|
||||
) -> None:
|
||||
tid = self._taskid
|
||||
self._taskid += 1
|
||||
@ -156,4 +157,4 @@ def make_chunks(arguments: Sequence[str], nproc: int, maxbatch: int = 10) -> lis
|
||||
if rest:
|
||||
nchunks += 1
|
||||
# partition documents in "chunks" that will be written by one Process
|
||||
return [arguments[i * chunksize:(i + 1) * chunksize] for i in range(nchunks)]
|
||||
return [arguments[i * chunksize : (i + 1) * chunksize] for i in range(nchunks)]
|
||||
|
@ -53,7 +53,7 @@ def nested_parse_to_nodes(
|
||||
"""
|
||||
document = state.document
|
||||
content = _text_to_string_list(
|
||||
text, source=source, tab_width=document.settings.tab_width,
|
||||
text, source=source, tab_width=document.settings.tab_width
|
||||
)
|
||||
node = Element() # Anonymous container for parsing
|
||||
node.document = document
|
||||
@ -62,7 +62,9 @@ def nested_parse_to_nodes(
|
||||
state.nested_parse(content, offset, node, match_titles=allow_section_headings)
|
||||
else:
|
||||
with _fresh_title_style_context(state):
|
||||
state.nested_parse(content, offset, node, match_titles=allow_section_headings)
|
||||
state.nested_parse(
|
||||
content, offset, node, match_titles=allow_section_headings
|
||||
)
|
||||
return node.children
|
||||
|
||||
|
||||
@ -84,7 +86,7 @@ def _fresh_title_style_context(state: RSTState) -> Iterator[None]:
|
||||
|
||||
|
||||
def _text_to_string_list(
|
||||
text: str | StringList, /, *, source: str, tab_width: int,
|
||||
text: str | StringList, /, *, source: str, tab_width: int
|
||||
) -> StringList:
|
||||
# Doesn't really belong in this module, but avoids circular imports.
|
||||
if isinstance(text, StringList):
|
||||
|
@ -10,13 +10,13 @@ LEN_DEPTH = 22
|
||||
|
||||
DEPTH_CHUNK_LEN = struct.pack('!i', 10)
|
||||
DEPTH_CHUNK_START = b'tEXtDepth\x00'
|
||||
IEND_CHUNK = b'\x00\x00\x00\x00IEND\xAE\x42\x60\x82'
|
||||
IEND_CHUNK = b'\x00\x00\x00\x00IEND\xae\x42\x60\x82'
|
||||
|
||||
|
||||
def read_png_depth(filename: str) -> int | None:
|
||||
"""Read the special tEXt chunk indicating the depth from a PNG file."""
|
||||
with open(filename, 'rb') as f:
|
||||
f.seek(- (LEN_IEND + LEN_DEPTH), 2)
|
||||
f.seek(-(LEN_IEND + LEN_DEPTH), 2)
|
||||
depthchunk = f.read(LEN_DEPTH)
|
||||
if not depthchunk.startswith(DEPTH_CHUNK_LEN + DEPTH_CHUNK_START):
|
||||
# either not a PNG file or not containing the depth chunk
|
||||
@ -37,7 +37,7 @@ def write_png_depth(filename: str, depth: int) -> None:
|
||||
# overwrite it with the depth chunk
|
||||
f.write(DEPTH_CHUNK_LEN + DEPTH_CHUNK_START + data)
|
||||
# calculate the checksum over chunk name and data
|
||||
crc = binascii.crc32(DEPTH_CHUNK_START + data) & 0xffffffff
|
||||
crc = binascii.crc32(DEPTH_CHUNK_START + data) & 0xFFFFFFFF
|
||||
f.write(struct.pack('!I', crc))
|
||||
# replace the IEND chunk
|
||||
f.write(IEND_CHUNK)
|
||||
|
@ -11,8 +11,10 @@ from urllib3.exceptions import InsecureRequestWarning
|
||||
|
||||
import sphinx
|
||||
|
||||
_USER_AGENT = (f'Mozilla/5.0 (X11; Linux x86_64; rv:100.0) Gecko/20100101 Firefox/100.0 '
|
||||
f'Sphinx/{sphinx.__version__}')
|
||||
_USER_AGENT = (
|
||||
f'Mozilla/5.0 (X11; Linux x86_64; rv:100.0) Gecko/20100101 Firefox/100.0 '
|
||||
f'Sphinx/{sphinx.__version__}'
|
||||
)
|
||||
|
||||
|
||||
def _get_tls_cacert(url: str, certs: str | dict[str, str] | None) -> str | bool:
|
||||
@ -49,7 +51,9 @@ def head(url: str, **kwargs: Any) -> requests.Response:
|
||||
|
||||
class _Session(requests.Session):
|
||||
def request( # type: ignore[override]
|
||||
self, method: str, url: str,
|
||||
self,
|
||||
method: str,
|
||||
url: str,
|
||||
_user_agent: str = '',
|
||||
_tls_info: tuple[bool, str | dict[str, str] | None] = (), # type: ignore[assignment]
|
||||
**kwargs: Any,
|
||||
@ -72,5 +76,5 @@ class _Session(requests.Session):
|
||||
|
||||
with warnings.catch_warnings():
|
||||
# ignore InsecureRequestWarning if verify=False
|
||||
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
|
||||
warnings.filterwarnings('ignore', category=InsecureRequestWarning)
|
||||
return super().request(method, url, **kwargs)
|
||||
|
@ -29,8 +29,8 @@ symbols_re = re.compile(r'([!-\-/:-@\[-`{-~])') # symbols without dot(0x2e)
|
||||
SECTIONING_CHARS = ['=', '-', '~']
|
||||
|
||||
# width of characters
|
||||
WIDECHARS: dict[str, str] = defaultdict(lambda: "WF") # WF: Wide + Full-width
|
||||
WIDECHARS["ja"] = "WFA" # In Japanese, Ambiguous characters also have double width
|
||||
WIDECHARS: dict[str, str] = defaultdict(lambda: 'WF') # WF: Wide + Full-width
|
||||
WIDECHARS['ja'] = 'WFA' # In Japanese, Ambiguous characters also have double width
|
||||
|
||||
|
||||
def escape(text: str) -> str:
|
||||
@ -41,6 +41,7 @@ def escape(text: str) -> str:
|
||||
|
||||
def textwidth(text: str, widechars: str = 'WF') -> int:
|
||||
"""Get width of text."""
|
||||
|
||||
def charwidth(char: str, widechars: str) -> int:
|
||||
if east_asian_width(char) in widechars:
|
||||
return 2
|
||||
@ -103,7 +104,8 @@ def append_epilog(content: StringList, epilog: str) -> None:
|
||||
if epilog:
|
||||
if len(content) > 0:
|
||||
source, lineno = content.info(-1)
|
||||
lineno = cast(int, lineno) # lineno will never be None, since len(content) > 0
|
||||
# lineno will never be None, since len(content) > 0
|
||||
lineno = cast(int, lineno)
|
||||
else:
|
||||
source = '<generated>'
|
||||
lineno = 0
|
||||
|
@ -69,8 +69,11 @@ class Tags:
|
||||
|
||||
@property
|
||||
def tags(self) -> dict[str, Literal[True]]:
|
||||
warnings.warn('Tags.tags is deprecated, use methods on Tags.',
|
||||
RemovedInSphinx90Warning, stacklevel=2)
|
||||
warnings.warn(
|
||||
'Tags.tags is deprecated, use methods on Tags.',
|
||||
RemovedInSphinx90Warning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return dict.fromkeys(self._tags, True)
|
||||
|
||||
def eval_condition(self, condition: str) -> bool:
|
||||
|
@ -49,7 +49,7 @@ class FileRenderer(BaseRenderer):
|
||||
|
||||
@classmethod
|
||||
def render_from_file(
|
||||
cls: type[FileRenderer], filename: str, context: dict[str, Any],
|
||||
cls: type[FileRenderer], filename: str, context: dict[str, Any]
|
||||
) -> str:
|
||||
dirname = os.path.dirname(filename)
|
||||
basename = os.path.basename(filename)
|
||||
@ -57,21 +57,26 @@ class FileRenderer(BaseRenderer):
|
||||
|
||||
|
||||
class SphinxRenderer(FileRenderer):
|
||||
def __init__(self, template_path: Sequence[str | os.PathLike[str]] | None = None) -> None:
|
||||
def __init__(
|
||||
self, template_path: Sequence[str | os.PathLike[str]] | None = None
|
||||
) -> None:
|
||||
if template_path is None:
|
||||
template_path = os.path.join(package_dir, 'templates')
|
||||
super().__init__(template_path)
|
||||
|
||||
@classmethod
|
||||
def render_from_file(
|
||||
cls: type[FileRenderer], filename: str, context: dict[str, Any],
|
||||
cls: type[FileRenderer], filename: str, context: dict[str, Any]
|
||||
) -> str:
|
||||
return FileRenderer.render_from_file(filename, context)
|
||||
|
||||
|
||||
class LaTeXRenderer(SphinxRenderer):
|
||||
def __init__(self, template_path: Sequence[str | os.PathLike[str]] | None = None,
|
||||
latex_engine: str | None = None) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
template_path: Sequence[str | os.PathLike[str]] | None = None,
|
||||
latex_engine: str | None = None,
|
||||
) -> None:
|
||||
if template_path is None:
|
||||
template_path = [os.path.join(package_dir, 'templates', 'latex')]
|
||||
super().__init__(template_path)
|
||||
@ -93,8 +98,11 @@ class LaTeXRenderer(SphinxRenderer):
|
||||
|
||||
|
||||
class ReSTRenderer(SphinxRenderer):
|
||||
def __init__(self, template_path: Sequence[str | os.PathLike[str]] | None = None,
|
||||
language: str | None = None) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
template_path: Sequence[str | os.PathLike[str]] | None = None,
|
||||
language: str | None = None,
|
||||
) -> None:
|
||||
super().__init__(template_path)
|
||||
|
||||
# add language to environment
|
||||
@ -109,9 +117,12 @@ class ReSTRenderer(SphinxRenderer):
|
||||
class SphinxTemplateLoader(BaseLoader):
|
||||
"""A loader supporting template inheritance"""
|
||||
|
||||
def __init__(self, confdir: str | os.PathLike[str],
|
||||
templates_paths: Sequence[str | os.PathLike[str]],
|
||||
system_templates_paths: Sequence[str | os.PathLike[str]]) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
confdir: str | os.PathLike[str],
|
||||
templates_paths: Sequence[str | os.PathLike[str]],
|
||||
system_templates_paths: Sequence[str | os.PathLike[str]],
|
||||
) -> None:
|
||||
self.loaders = []
|
||||
self.sysloaders = []
|
||||
|
||||
|
@ -47,8 +47,8 @@ ascii_tex_replacements = [
|
||||
# complications (whether by {}, or a macro) and is not done
|
||||
# the next two require textcomp package
|
||||
("'", r'\textquotesingle{}'), # else ' renders curly, and '' is a ligature
|
||||
('`', r'\textasciigrave{}'), # else \` and \`\` render curly
|
||||
('<', r'\textless{}'), # < is inv. exclam in OT1, << is a T1-ligature
|
||||
('`', r'\textasciigrave{}'), # else \` and \`\` render curly
|
||||
('<', r'\textless{}'), # < is inv. exclam in OT1, << is a T1-ligature
|
||||
('>', r'\textgreater{}'), # > is inv. quest. mark in 0T1, >> a T1-ligature
|
||||
]
|
||||
|
||||
|
@ -92,8 +92,9 @@ PathMatcher: TypeAlias = Callable[[str], bool]
|
||||
|
||||
# common role functions
|
||||
if TYPE_CHECKING:
|
||||
|
||||
class RoleFunction(Protocol):
|
||||
def __call__(
|
||||
def __call__( # NoQA: E704
|
||||
self,
|
||||
name: str,
|
||||
rawtext: str,
|
||||
@ -103,8 +104,8 @@ if TYPE_CHECKING:
|
||||
/,
|
||||
options: dict[str, Any] | None = None,
|
||||
content: Sequence[str] = (),
|
||||
) -> tuple[list[nodes.Node], list[nodes.system_message]]:
|
||||
...
|
||||
) -> tuple[list[nodes.Node], list[nodes.system_message]]: ...
|
||||
|
||||
else:
|
||||
RoleFunction: TypeAlias = Callable[
|
||||
[str, str, str, int, Inliner, dict[str, Any], Sequence[str]],
|
||||
@ -126,19 +127,17 @@ if TYPE_CHECKING:
|
||||
_T_co = TypeVar('_T_co', str, bytes, covariant=True)
|
||||
|
||||
class _ReadableStream(Protocol[_T_co]):
|
||||
def read(self, size: int = ...) -> _T_co:
|
||||
...
|
||||
def read(self, size: int = ...) -> _T_co: ... # NoQA: E704
|
||||
|
||||
def __enter__(self) -> Self:
|
||||
...
|
||||
def __enter__(self) -> Self: ... # NoQA: E704
|
||||
|
||||
def __exit__(
|
||||
def __exit__( # NoQA: E704
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None
|
||||
) -> None:
|
||||
...
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None: ...
|
||||
|
||||
|
||||
# inventory data on memory
|
||||
InventoryItem: TypeAlias = tuple[
|
||||
@ -189,7 +188,9 @@ def get_type_hints(
|
||||
from sphinx.util.inspect import safe_getattr # lazy loading
|
||||
|
||||
try:
|
||||
return typing.get_type_hints(obj, globalns, localns, include_extras=include_extras)
|
||||
return typing.get_type_hints(
|
||||
obj, globalns, localns, include_extras=include_extras
|
||||
)
|
||||
except NameError:
|
||||
# Failed to evaluate ForwardRef (maybe TYPE_CHECKING)
|
||||
return safe_getattr(obj, '__annotations__', {})
|
||||
@ -212,7 +213,10 @@ def is_system_TypeVar(typ: Any) -> bool:
|
||||
|
||||
def _is_annotated_form(obj: Any) -> TypeIs[Annotated[Any, ...]]:
|
||||
"""Check if *obj* is an annotated type."""
|
||||
return typing.get_origin(obj) is Annotated or str(obj).startswith('typing.Annotated')
|
||||
return (
|
||||
typing.get_origin(obj) is Annotated
|
||||
or str(obj).startswith('typing.Annotated')
|
||||
) # fmt: skip
|
||||
|
||||
|
||||
def _is_unpack_form(obj: Any) -> bool:
|
||||
@ -286,18 +290,21 @@ def restify(cls: Any, mode: _RestifyMode = 'fully-qualified-except-typing') -> s
|
||||
elif dataclasses.is_dataclass(m):
|
||||
# use restify for the repr of field values rather than repr
|
||||
d_fields = ', '.join([
|
||||
fr"{f.name}=\ {restify(getattr(m, f.name), mode)}"
|
||||
for f in dataclasses.fields(m) if f.repr
|
||||
rf'{f.name}=\ {restify(getattr(m, f.name), mode)}'
|
||||
for f in dataclasses.fields(m)
|
||||
if f.repr
|
||||
])
|
||||
meta_args.append(fr'{restify(type(m), mode)}\ ({d_fields})')
|
||||
meta_args.append(rf'{restify(type(m), mode)}\ ({d_fields})')
|
||||
else:
|
||||
meta_args.append(repr(m))
|
||||
meta = ', '.join(meta_args)
|
||||
if sys.version_info[:2] <= (3, 11):
|
||||
# Hardcoded to fix errors on Python 3.11 and earlier.
|
||||
return fr':py:class:`~typing.Annotated`\ [{args}, {meta}]'
|
||||
return (f':py:class:`{module_prefix}{cls.__module__}.{cls.__name__}`'
|
||||
fr'\ [{args}, {meta}]')
|
||||
return rf':py:class:`~typing.Annotated`\ [{args}, {meta}]'
|
||||
return (
|
||||
f':py:class:`{module_prefix}{cls.__module__}.{cls.__name__}`'
|
||||
rf'\ [{args}, {meta}]'
|
||||
)
|
||||
elif isinstance(cls, NewType):
|
||||
return f':py:class:`{module_prefix}{cls.__module__}.{cls.__name__}`' # type: ignore[attr-defined]
|
||||
elif isinstance(cls, types.UnionType):
|
||||
@ -307,14 +314,14 @@ def restify(cls: Any, mode: _RestifyMode = 'fully-qualified-except-typing') -> s
|
||||
elif cls.__module__ in ('__builtin__', 'builtins'):
|
||||
if hasattr(cls, '__args__'):
|
||||
if not cls.__args__: # Empty tuple, list, ...
|
||||
return fr':py:class:`{cls.__name__}`\ [{cls.__args__!r}]'
|
||||
return rf':py:class:`{cls.__name__}`\ [{cls.__args__!r}]'
|
||||
|
||||
concatenated_args = ', '.join(restify(arg, mode) for arg in cls.__args__)
|
||||
return fr':py:class:`{cls.__name__}`\ [{concatenated_args}]'
|
||||
concatenated_args = ', '.join(
|
||||
restify(arg, mode) for arg in cls.__args__
|
||||
)
|
||||
return rf':py:class:`{cls.__name__}`\ [{concatenated_args}]'
|
||||
return f':py:class:`{cls.__name__}`'
|
||||
elif (isgenericalias(cls)
|
||||
and cls_module_is_typing
|
||||
and cls.__origin__ is Union):
|
||||
elif isgenericalias(cls) and cls_module_is_typing and cls.__origin__ is Union:
|
||||
# *cls* is defined in ``typing``, and thus ``__args__`` must exist
|
||||
return ' | '.join(restify(a, mode) for a in cls.__args__)
|
||||
elif isgenericalias(cls):
|
||||
@ -338,19 +345,20 @@ def restify(cls: Any, mode: _RestifyMode = 'fully-qualified-except-typing') -> s
|
||||
if (
|
||||
(cls_module_is_typing and cls.__name__ == 'Callable')
|
||||
or (cls.__module__ == 'collections.abc' and cls.__name__ == 'Callable')
|
||||
):
|
||||
): # fmt: skip
|
||||
args = ', '.join(restify(a, mode) for a in __args__[:-1])
|
||||
returns = restify(__args__[-1], mode)
|
||||
return fr'{text}\ [[{args}], {returns}]'
|
||||
return rf'{text}\ [[{args}], {returns}]'
|
||||
|
||||
if cls_module_is_typing and cls.__origin__.__name__ == 'Literal':
|
||||
args = ', '.join(_format_literal_arg_restify(a, mode=mode)
|
||||
for a in cls.__args__)
|
||||
return fr'{text}\ [{args}]'
|
||||
args = ', '.join(
|
||||
_format_literal_arg_restify(a, mode=mode) for a in cls.__args__
|
||||
)
|
||||
return rf'{text}\ [{args}]'
|
||||
|
||||
# generic representation of the parameters
|
||||
args = ', '.join(restify(a, mode) for a in __args__)
|
||||
return fr'{text}\ [{args}]'
|
||||
return rf'{text}\ [{args}]'
|
||||
elif isinstance(cls, typing._SpecialForm):
|
||||
return f':py:obj:`~{cls.__module__}.{cls.__name__}`' # type: ignore[attr-defined]
|
||||
elif sys.version_info[:2] >= (3, 11) and cls is typing.Any:
|
||||
@ -375,7 +383,9 @@ def _format_literal_arg_restify(arg: Any, /, *, mode: str) -> str:
|
||||
enum_cls = arg.__class__
|
||||
if mode == 'smart' or enum_cls.__module__ == 'typing':
|
||||
# MyEnum.member
|
||||
return f':py:attr:`~{enum_cls.__module__}.{enum_cls.__qualname__}.{arg.name}`'
|
||||
return (
|
||||
f':py:attr:`~{enum_cls.__module__}.{enum_cls.__qualname__}.{arg.name}`'
|
||||
)
|
||||
# module.MyEnum.member
|
||||
return f':py:attr:`{enum_cls.__module__}.{enum_cls.__qualname__}.{arg.name}`'
|
||||
return repr(arg)
|
||||
@ -431,7 +441,10 @@ def stringify_annotation(
|
||||
# Extract the annotation's base type by considering formattable cases
|
||||
if isinstance(annotation, TypeVar) and not _is_unpack_form(annotation):
|
||||
# typing_extensions.Unpack is incorrectly determined as a TypeVar
|
||||
if annotation_module_is_typing and mode in {'fully-qualified-except-typing', 'smart'}:
|
||||
if annotation_module_is_typing and mode in {
|
||||
'fully-qualified-except-typing',
|
||||
'smart',
|
||||
}:
|
||||
return annotation_name
|
||||
return module_prefix + f'{annotation_module}.{annotation_name}'
|
||||
elif isinstance(annotation, NewType):
|
||||
@ -461,7 +474,9 @@ def stringify_annotation(
|
||||
|
||||
module_prefix = f'{annotation_module}.'
|
||||
annotation_forward_arg: str | None = getattr(annotation, '__forward_arg__', None)
|
||||
if annotation_qualname or (annotation_module_is_typing and not annotation_forward_arg):
|
||||
if annotation_qualname or (
|
||||
annotation_module_is_typing and not annotation_forward_arg
|
||||
):
|
||||
if mode == 'smart':
|
||||
module_prefix = f'~{module_prefix}'
|
||||
if annotation_module_is_typing and mode == 'fully-qualified-except-typing':
|
||||
@ -484,7 +499,8 @@ def stringify_annotation(
|
||||
# in this case, we know that the annotation is a member
|
||||
# of ``typing`` and all of them define ``__origin__``
|
||||
qualname = stringify_annotation(
|
||||
annotation.__origin__, 'fully-qualified-except-typing',
|
||||
annotation.__origin__,
|
||||
'fully-qualified-except-typing',
|
||||
).replace('typing.', '') # ex. Union
|
||||
elif annotation_qualname:
|
||||
qualname = annotation_qualname
|
||||
@ -505,21 +521,25 @@ def stringify_annotation(
|
||||
if (
|
||||
qualname in {'Union', 'types.UnionType'}
|
||||
and all(getattr(a, '__origin__', ...) is typing.Literal for a in annotation_args)
|
||||
):
|
||||
): # fmt: skip
|
||||
# special case to flatten a Union of Literals into a literal
|
||||
flattened_args = typing.Literal[annotation_args].__args__ # type: ignore[attr-defined]
|
||||
args = ', '.join(_format_literal_arg_stringify(a, mode=mode)
|
||||
for a in flattened_args)
|
||||
args = ', '.join(
|
||||
_format_literal_arg_stringify(a, mode=mode) for a in flattened_args
|
||||
)
|
||||
return f'{module_prefix}Literal[{args}]'
|
||||
if qualname in {'Optional', 'Union', 'types.UnionType'}:
|
||||
return ' | '.join(stringify_annotation(a, mode) for a in annotation_args)
|
||||
elif qualname == 'Callable':
|
||||
args = ', '.join(stringify_annotation(a, mode) for a in annotation_args[:-1])
|
||||
args = ', '.join(
|
||||
stringify_annotation(a, mode) for a in annotation_args[:-1]
|
||||
)
|
||||
returns = stringify_annotation(annotation_args[-1], mode)
|
||||
return f'{module_prefix}Callable[[{args}], {returns}]'
|
||||
elif qualname == 'Literal':
|
||||
args = ', '.join(_format_literal_arg_stringify(a, mode=mode)
|
||||
for a in annotation_args)
|
||||
args = ', '.join(
|
||||
_format_literal_arg_stringify(a, mode=mode) for a in annotation_args
|
||||
)
|
||||
return f'{module_prefix}Literal[{args}]'
|
||||
elif _is_annotated_form(annotation): # for py310+
|
||||
args = stringify_annotation(annotation_args[0], mode)
|
||||
@ -530,10 +550,13 @@ def stringify_annotation(
|
||||
elif dataclasses.is_dataclass(m):
|
||||
# use stringify_annotation for the repr of field values rather than repr
|
||||
d_fields = ', '.join([
|
||||
f"{f.name}={stringify_annotation(getattr(m, f.name), mode)}"
|
||||
for f in dataclasses.fields(m) if f.repr
|
||||
f'{f.name}={stringify_annotation(getattr(m, f.name), mode)}'
|
||||
for f in dataclasses.fields(m)
|
||||
if f.repr
|
||||
])
|
||||
meta_args.append(f'{stringify_annotation(type(m), mode)}({d_fields})')
|
||||
meta_args.append(
|
||||
f'{stringify_annotation(type(m), mode)}({d_fields})'
|
||||
)
|
||||
else:
|
||||
meta_args.append(repr(m))
|
||||
meta = ', '.join(meta_args)
|
||||
@ -568,7 +591,7 @@ def _format_literal_arg_stringify(arg: Any, /, *, mode: str) -> str:
|
||||
|
||||
# deprecated name -> (object to return, canonical path or empty string, removal version)
|
||||
_DEPRECATED_OBJECTS: dict[str, tuple[Any, str, tuple[int, int]]] = {
|
||||
}
|
||||
} # fmt: skip
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
|
Loading…
Reference in New Issue
Block a user