mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Enable automatic formatting for `sphinx/util/
` (#12957)
This commit is contained in:
parent
be52db2bb3
commit
7ece6fc1e7
@ -483,6 +483,5 @@ exclude = [
|
|||||||
"sphinx/search/*",
|
"sphinx/search/*",
|
||||||
"sphinx/testing/*",
|
"sphinx/testing/*",
|
||||||
"sphinx/transforms/*",
|
"sphinx/transforms/*",
|
||||||
"sphinx/util/*",
|
|
||||||
"sphinx/writers/*",
|
"sphinx/writers/*",
|
||||||
]
|
]
|
||||||
|
@ -42,6 +42,7 @@ url_re: re.Pattern[str] = re.compile(r'(?P<schema>.+)://.*')
|
|||||||
|
|
||||||
# High-level utility functions.
|
# High-level utility functions.
|
||||||
|
|
||||||
|
|
||||||
def docname_join(basedocname: str, docname: str) -> str:
|
def docname_join(basedocname: str, docname: str) -> str:
|
||||||
return posixpath.normpath(posixpath.join('/' + basedocname, '..', docname))[1:]
|
return posixpath.normpath(posixpath.join('/' + basedocname, '..', docname))[1:]
|
||||||
|
|
||||||
@ -82,16 +83,23 @@ class UnicodeDecodeErrorHandler:
|
|||||||
if lineend == -1:
|
if lineend == -1:
|
||||||
lineend = len(error.object)
|
lineend = len(error.object)
|
||||||
lineno = error.object.count(b'\n', 0, error.start) + 1
|
lineno = error.object.count(b'\n', 0, error.start) + 1
|
||||||
logger.warning(__('undecodable source characters, replacing with "?": %r'),
|
logger.warning(
|
||||||
(error.object[linestart + 1:error.start] + b'>>>' +
|
__('undecodable source characters, replacing with "?": %r'),
|
||||||
error.object[error.start:error.end] + b'<<<' +
|
(
|
||||||
error.object[error.end:lineend]),
|
error.object[linestart + 1 : error.start]
|
||||||
location=(self.docname, lineno))
|
+ b'>>>'
|
||||||
|
+ error.object[error.start : error.end]
|
||||||
|
+ b'<<<'
|
||||||
|
+ error.object[error.end : lineend]
|
||||||
|
),
|
||||||
|
location=(self.docname, lineno),
|
||||||
|
)
|
||||||
return ('?', error.end)
|
return ('?', error.end)
|
||||||
|
|
||||||
|
|
||||||
# Low-level utility functions and classes.
|
# Low-level utility functions and classes.
|
||||||
|
|
||||||
|
|
||||||
def parselinenos(spec: str, total: int) -> list[int]:
|
def parselinenos(spec: str, total: int) -> list[int]:
|
||||||
"""Parse a line number spec (such as "1,2,4-6") and return a list of
|
"""Parse a line number spec (such as "1,2,4-6") and return a list of
|
||||||
wanted line numbers.
|
wanted line numbers.
|
||||||
@ -136,12 +144,16 @@ def isurl(url: str) -> bool:
|
|||||||
|
|
||||||
# deprecated name -> (object to return, canonical path or empty string)
|
# deprecated name -> (object to return, canonical path or empty string)
|
||||||
_DEPRECATED_OBJECTS: dict[str, tuple[Any, str, tuple[int, int]]] = {
|
_DEPRECATED_OBJECTS: dict[str, tuple[Any, str, tuple[int, int]]] = {
|
||||||
'split_index_msg': (_index_entries.split_index_msg,
|
'split_index_msg': (
|
||||||
|
_index_entries.split_index_msg,
|
||||||
'sphinx.util.index_entries.split_index_msg',
|
'sphinx.util.index_entries.split_index_msg',
|
||||||
(9, 0)),
|
(9, 0),
|
||||||
'split_into': (_index_entries.split_index_msg,
|
),
|
||||||
|
'split_into': (
|
||||||
|
_index_entries.split_index_msg,
|
||||||
'sphinx.util.index_entries.split_into',
|
'sphinx.util.index_entries.split_into',
|
||||||
(9, 0)),
|
(9, 0),
|
||||||
|
),
|
||||||
'md5': (_md5, '', (9, 0)),
|
'md5': (_md5, '', (9, 0)),
|
||||||
'sha1': (_sha1, '', (9, 0)),
|
'sha1': (_sha1, '', (9, 0)),
|
||||||
'import_object': (_importer.import_object, '', (10, 0)),
|
'import_object': (_importer.import_object, '', (10, 0)),
|
||||||
|
@ -36,7 +36,9 @@ class FilenameUniqDict(dict[str, tuple[set[str], str]]):
|
|||||||
del self[filename]
|
del self[filename]
|
||||||
self._existing.discard(unique)
|
self._existing.discard(unique)
|
||||||
|
|
||||||
def merge_other(self, docnames: set[str], other: dict[str, tuple[set[str], Any]]) -> None:
|
def merge_other(
|
||||||
|
self, docnames: set[str], other: dict[str, tuple[set[str], Any]]
|
||||||
|
) -> None:
|
||||||
for filename, (docs, _unique) in other.items():
|
for filename, (docs, _unique) in other.items():
|
||||||
for doc in docs & set(docnames):
|
for doc in docs & set(docnames):
|
||||||
self.add_file(doc, filename)
|
self.add_file(doc, filename)
|
||||||
@ -70,7 +72,9 @@ class DownloadFiles(dict[str, tuple[set[str], str]]):
|
|||||||
if not docs:
|
if not docs:
|
||||||
del self[filename]
|
del self[filename]
|
||||||
|
|
||||||
def merge_other(self, docnames: set[str], other: dict[str, tuple[set[str], Any]]) -> None:
|
def merge_other(
|
||||||
|
self, docnames: set[str], other: dict[str, tuple[set[str], Any]]
|
||||||
|
) -> None:
|
||||||
for filename, (docs, _dest) in other.items():
|
for filename, (docs, _dest) in other.items():
|
||||||
for docname in docs & set(docnames):
|
for docname in docs & set(docnames):
|
||||||
self.add_file(docname, filename)
|
self.add_file(docname, filename)
|
||||||
|
@ -8,8 +8,7 @@ if TYPE_CHECKING:
|
|||||||
from typing import Protocol
|
from typing import Protocol
|
||||||
|
|
||||||
class SupportsWrite(Protocol):
|
class SupportsWrite(Protocol):
|
||||||
def write(self, text: str, /) -> int | None:
|
def write(self, text: str, /) -> int | None: ... # NoQA: E704
|
||||||
...
|
|
||||||
|
|
||||||
|
|
||||||
class TeeStripANSI:
|
class TeeStripANSI:
|
||||||
|
@ -33,9 +33,10 @@ _MSG = (
|
|||||||
# https://docs.python.org/3/library/stdtypes.html#string-methods
|
# https://docs.python.org/3/library/stdtypes.html#string-methods
|
||||||
|
|
||||||
if sys.platform == 'win32':
|
if sys.platform == 'win32':
|
||||||
|
|
||||||
class _StrPath(WindowsPath):
|
class _StrPath(WindowsPath):
|
||||||
def replace( # type: ignore[override]
|
def replace( # type: ignore[override]
|
||||||
self, old: str, new: str, count: int = -1, /,
|
self, old: str, new: str, count: int = -1, /
|
||||||
) -> str:
|
) -> str:
|
||||||
# replace exists in both Path and str;
|
# replace exists in both Path and str;
|
||||||
# in Path it makes filesystem changes, so we use the safer str version
|
# in Path it makes filesystem changes, so we use the safer str version
|
||||||
@ -81,10 +82,12 @@ if sys.platform == 'win32':
|
|||||||
def __len__(self) -> int:
|
def __len__(self) -> int:
|
||||||
warnings.warn(_MSG, RemovedInSphinx90Warning, stacklevel=2)
|
warnings.warn(_MSG, RemovedInSphinx90Warning, stacklevel=2)
|
||||||
return len(self.__str__())
|
return len(self.__str__())
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
|
||||||
class _StrPath(PosixPath):
|
class _StrPath(PosixPath):
|
||||||
def replace( # type: ignore[override]
|
def replace( # type: ignore[override]
|
||||||
self, old: str, new: str, count: int = -1, /,
|
self, old: str, new: str, count: int = -1, /
|
||||||
) -> str:
|
) -> str:
|
||||||
# replace exists in both Path and str;
|
# replace exists in both Path and str;
|
||||||
# in Path it makes filesystem changes, so we use the safer str version
|
# in Path it makes filesystem changes, so we use the safer str version
|
||||||
|
@ -9,4 +9,5 @@ def _format_rfc3339_microseconds(timestamp: int, /) -> str:
|
|||||||
:param timestamp: The timestamp to format, in microseconds.
|
:param timestamp: The timestamp to format, in microseconds.
|
||||||
"""
|
"""
|
||||||
seconds, fraction = divmod(timestamp, 10**6)
|
seconds, fraction = divmod(timestamp, 10**6)
|
||||||
return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(seconds)) + f'.{fraction // 1_000}'
|
time_tuple = time.gmtime(seconds)
|
||||||
|
return time.strftime('%Y-%m-%d %H:%M:%S', time_tuple) + f'.{fraction // 1_000}'
|
||||||
|
@ -25,19 +25,23 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
_whitespace_re = re.compile(r'\s+')
|
_whitespace_re = re.compile(r'\s+')
|
||||||
anon_identifier_re = re.compile(r'(@[a-zA-Z0-9_])[a-zA-Z0-9_]*\b')
|
anon_identifier_re = re.compile(r'(@[a-zA-Z0-9_])[a-zA-Z0-9_]*\b')
|
||||||
identifier_re = re.compile(r'''
|
identifier_re = re.compile(
|
||||||
|
r"""
|
||||||
( # This 'extends' _anon_identifier_re with the ordinary identifiers,
|
( # This 'extends' _anon_identifier_re with the ordinary identifiers,
|
||||||
# make sure they are in sync.
|
# make sure they are in sync.
|
||||||
(~?\b[a-zA-Z_]) # ordinary identifiers
|
(~?\b[a-zA-Z_]) # ordinary identifiers
|
||||||
| (@[a-zA-Z0-9_]) # our extension for names of anonymous entities
|
| (@[a-zA-Z0-9_]) # our extension for names of anonymous entities
|
||||||
)
|
)
|
||||||
[a-zA-Z0-9_]*\b
|
[a-zA-Z0-9_]*\b
|
||||||
''', flags=re.VERBOSE)
|
""",
|
||||||
|
flags=re.VERBOSE,
|
||||||
|
)
|
||||||
integer_literal_re = re.compile(r'[1-9][0-9]*(\'[0-9]+)*')
|
integer_literal_re = re.compile(r'[1-9][0-9]*(\'[0-9]+)*')
|
||||||
octal_literal_re = re.compile(r'0[0-7]*(\'[0-7]+)*')
|
octal_literal_re = re.compile(r'0[0-7]*(\'[0-7]+)*')
|
||||||
hex_literal_re = re.compile(r'0[xX][0-9a-fA-F]+(\'[0-9a-fA-F]+)*')
|
hex_literal_re = re.compile(r'0[xX][0-9a-fA-F]+(\'[0-9a-fA-F]+)*')
|
||||||
binary_literal_re = re.compile(r'0[bB][01]+(\'[01]+)*')
|
binary_literal_re = re.compile(r'0[bB][01]+(\'[01]+)*')
|
||||||
integers_literal_suffix_re = re.compile(r'''
|
integers_literal_suffix_re = re.compile(
|
||||||
|
r"""
|
||||||
# unsigned and/or (long) long, in any order, but at least one of them
|
# unsigned and/or (long) long, in any order, but at least one of them
|
||||||
(
|
(
|
||||||
([uU] ([lL] | (ll) | (LL))?)
|
([uU] ([lL] | (ll) | (LL))?)
|
||||||
@ -46,8 +50,11 @@ integers_literal_suffix_re = re.compile(r'''
|
|||||||
)\b
|
)\b
|
||||||
# the ending word boundary is important for distinguishing
|
# the ending word boundary is important for distinguishing
|
||||||
# between suffixes and UDLs in C++
|
# between suffixes and UDLs in C++
|
||||||
''', flags=re.VERBOSE)
|
""",
|
||||||
float_literal_re = re.compile(r'''
|
flags=re.VERBOSE,
|
||||||
|
)
|
||||||
|
float_literal_re = re.compile(
|
||||||
|
r"""
|
||||||
[+-]?(
|
[+-]?(
|
||||||
# decimal
|
# decimal
|
||||||
([0-9]+(\'[0-9]+)*[eE][+-]?[0-9]+(\'[0-9]+)*)
|
([0-9]+(\'[0-9]+)*[eE][+-]?[0-9]+(\'[0-9]+)*)
|
||||||
@ -59,10 +66,13 @@ float_literal_re = re.compile(r'''
|
|||||||
[0-9a-fA-F]+(\'[0-9a-fA-F]+)*([pP][+-]?[0-9a-fA-F]+(\'[0-9a-fA-F]+)*)?)
|
[0-9a-fA-F]+(\'[0-9a-fA-F]+)*([pP][+-]?[0-9a-fA-F]+(\'[0-9a-fA-F]+)*)?)
|
||||||
| (0[xX][0-9a-fA-F]+(\'[0-9a-fA-F]+)*\.([pP][+-]?[0-9a-fA-F]+(\'[0-9a-fA-F]+)*)?)
|
| (0[xX][0-9a-fA-F]+(\'[0-9a-fA-F]+)*\.([pP][+-]?[0-9a-fA-F]+(\'[0-9a-fA-F]+)*)?)
|
||||||
)
|
)
|
||||||
''', flags=re.VERBOSE)
|
""",
|
||||||
|
flags=re.VERBOSE,
|
||||||
|
)
|
||||||
float_literal_suffix_re = re.compile(r'[fFlL]\b')
|
float_literal_suffix_re = re.compile(r'[fFlL]\b')
|
||||||
# the ending word boundary is important for distinguishing between suffixes and UDLs in C++
|
# the ending word boundary is important for distinguishing between suffixes and UDLs in C++
|
||||||
char_literal_re = re.compile(r'''
|
char_literal_re = re.compile(
|
||||||
|
r"""
|
||||||
((?:u8)|u|U|L)?
|
((?:u8)|u|U|L)?
|
||||||
'(
|
'(
|
||||||
(?:[^\\'])
|
(?:[^\\'])
|
||||||
@ -74,7 +84,9 @@ char_literal_re = re.compile(r'''
|
|||||||
| (?:U[0-9a-fA-F]{8})
|
| (?:U[0-9a-fA-F]{8})
|
||||||
))
|
))
|
||||||
)'
|
)'
|
||||||
''', flags=re.VERBOSE)
|
""",
|
||||||
|
flags=re.VERBOSE,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def verify_description_mode(mode: str) -> None:
|
def verify_description_mode(mode: str) -> None:
|
||||||
@ -116,6 +128,7 @@ class ASTBaseBase:
|
|||||||
# Attributes
|
# Attributes
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
|
|
||||||
class ASTAttribute(ASTBaseBase):
|
class ASTAttribute(ASTBaseBase):
|
||||||
def describe_signature(self, signode: TextElement) -> None:
|
def describe_signature(self, signode: TextElement) -> None:
|
||||||
raise NotImplementedError(repr(self))
|
raise NotImplementedError(repr(self))
|
||||||
@ -134,7 +147,7 @@ class ASTCPPAttribute(ASTAttribute):
|
|||||||
return hash(self.arg)
|
return hash(self.arg)
|
||||||
|
|
||||||
def _stringify(self, transform: StringifyTransform) -> str:
|
def _stringify(self, transform: StringifyTransform) -> str:
|
||||||
return f"[[{self.arg}]]"
|
return f'[[{self.arg}]]'
|
||||||
|
|
||||||
def describe_signature(self, signode: TextElement) -> None:
|
def describe_signature(self, signode: TextElement) -> None:
|
||||||
signode.append(addnodes.desc_sig_punctuation('[[', '[['))
|
signode.append(addnodes.desc_sig_punctuation('[[', '[['))
|
||||||
@ -258,12 +271,14 @@ class ASTAttributeList(ASTBaseBase):
|
|||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
|
|
||||||
class ASTBaseParenExprList(ASTBaseBase):
|
class ASTBaseParenExprList(ASTBaseBase):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
|
|
||||||
class UnsupportedMultiCharacterCharLiteral(Exception):
|
class UnsupportedMultiCharacterCharLiteral(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -273,9 +288,13 @@ class DefinitionError(Exception):
|
|||||||
|
|
||||||
|
|
||||||
class BaseParser:
|
class BaseParser:
|
||||||
def __init__(self, definition: str, *,
|
def __init__(
|
||||||
|
self,
|
||||||
|
definition: str,
|
||||||
|
*,
|
||||||
location: nodes.Node | tuple[str, int] | str,
|
location: nodes.Node | tuple[str, int] | str,
|
||||||
config: Config) -> None:
|
config: Config,
|
||||||
|
) -> None:
|
||||||
self.definition = definition.strip()
|
self.definition = definition.strip()
|
||||||
self.location = location # for warnings
|
self.location = location # for warnings
|
||||||
self.config = config
|
self.config = config
|
||||||
@ -315,16 +334,19 @@ class BaseParser:
|
|||||||
def status(self, msg: str) -> None:
|
def status(self, msg: str) -> None:
|
||||||
# for debugging
|
# for debugging
|
||||||
indicator = '-' * self.pos + '^'
|
indicator = '-' * self.pos + '^'
|
||||||
logger.debug(f"{msg}\n{self.definition}\n{indicator}") # NoQA: G004
|
logger.debug(f'{msg}\n{self.definition}\n{indicator}') # NoQA: G004
|
||||||
|
|
||||||
def fail(self, msg: str) -> None:
|
def fail(self, msg: str) -> None:
|
||||||
errors = []
|
errors = []
|
||||||
indicator = '-' * self.pos + '^'
|
indicator = '-' * self.pos + '^'
|
||||||
exMain = DefinitionError(
|
msg = (
|
||||||
'Invalid %s declaration: %s [error at %d]\n %s\n %s' %
|
f'Invalid {self.language} declaration: {msg} [error at {self.pos}]\n'
|
||||||
(self.language, msg, self.pos, self.definition, indicator))
|
f' {self.definition}\n'
|
||||||
errors.append((exMain, "Main error"))
|
f' {indicator}'
|
||||||
errors.extend((err, "Potential other error") for err in self.otherErrors)
|
)
|
||||||
|
exc_main = DefinitionError(msg)
|
||||||
|
errors.append((exc_main, 'Main error'))
|
||||||
|
errors.extend((err, 'Potential other error') for err in self.otherErrors)
|
||||||
self.otherErrors = []
|
self.otherErrors = []
|
||||||
raise self._make_multi_error(errors, '')
|
raise self._make_multi_error(errors, '')
|
||||||
|
|
||||||
@ -418,12 +440,13 @@ class BaseParser:
|
|||||||
symbols.append(brackets[self.current_char])
|
symbols.append(brackets[self.current_char])
|
||||||
elif len(symbols) > 0 and self.current_char == symbols[-1]:
|
elif len(symbols) > 0 and self.current_char == symbols[-1]:
|
||||||
symbols.pop()
|
symbols.pop()
|
||||||
elif self.current_char in ")]}":
|
elif self.current_char in ')]}':
|
||||||
self.fail("Unexpected '%s' in balanced-token-seq." % self.current_char)
|
self.fail("Unexpected '%s' in balanced-token-seq." % self.current_char)
|
||||||
self.pos += 1
|
self.pos += 1
|
||||||
if self.eof:
|
if self.eof:
|
||||||
self.fail("Could not find end of balanced-token-seq starting at %d."
|
self.fail(
|
||||||
% startPos)
|
f'Could not find end of balanced-token-seq starting at {startPos}.'
|
||||||
|
)
|
||||||
return self.definition[startPos : self.pos]
|
return self.definition[startPos : self.pos]
|
||||||
|
|
||||||
def _parse_attribute(self) -> ASTAttribute | None:
|
def _parse_attribute(self) -> ASTAttribute | None:
|
||||||
|
@ -41,6 +41,7 @@ if TYPE_CHECKING:
|
|||||||
try:
|
try:
|
||||||
# check if colorama is installed to support color on Windows
|
# check if colorama is installed to support color on Windows
|
||||||
import colorama
|
import colorama
|
||||||
|
|
||||||
COLORAMA_AVAILABLE = True
|
COLORAMA_AVAILABLE = True
|
||||||
except ImportError:
|
except ImportError:
|
||||||
COLORAMA_AVAILABLE = False
|
COLORAMA_AVAILABLE = False
|
||||||
|
@ -78,7 +78,7 @@ class progress_message:
|
|||||||
val: BaseException | None,
|
val: BaseException | None,
|
||||||
tb: TracebackType | None,
|
tb: TracebackType | None,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
prefix = "" if self.nonl else bold(self.message + ': ')
|
prefix = '' if self.nonl else bold(self.message + ': ')
|
||||||
if isinstance(val, SkipProgressMessage):
|
if isinstance(val, SkipProgressMessage):
|
||||||
logger.info(prefix + __('skipped'))
|
logger.info(prefix + __('skipped'))
|
||||||
if val.args:
|
if val.args:
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
"Doc fields" are reST field lists in object descriptions that will
|
"Doc fields" are reST field lists in object descriptions that will
|
||||||
be domain-specifically transformed to a more appealing presentation.
|
be domain-specifically transformed to a more appealing presentation.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import contextlib
|
import contextlib
|
||||||
@ -70,10 +71,17 @@ class Field:
|
|||||||
self.rolename = rolename
|
self.rolename = rolename
|
||||||
self.bodyrolename = bodyrolename
|
self.bodyrolename = bodyrolename
|
||||||
|
|
||||||
def make_xref(self, rolename: str, domain: str, target: str,
|
def make_xref(
|
||||||
|
self,
|
||||||
|
rolename: str,
|
||||||
|
domain: str,
|
||||||
|
target: str,
|
||||||
innernode: type[TextlikeNode] = addnodes.literal_emphasis,
|
innernode: type[TextlikeNode] = addnodes.literal_emphasis,
|
||||||
contnode: Node | None = None, env: BuildEnvironment | None = None,
|
contnode: Node | None = None,
|
||||||
inliner: Inliner | None = None, location: Element | None = None) -> Node:
|
env: BuildEnvironment | None = None,
|
||||||
|
inliner: Inliner | None = None,
|
||||||
|
location: Element | None = None,
|
||||||
|
) -> Node:
|
||||||
# note: for backwards compatibility env is last, but not optional
|
# note: for backwards compatibility env is last, but not optional
|
||||||
assert env is not None
|
assert env is not None
|
||||||
assert (inliner is None) == (location is None), (inliner, location)
|
assert (inliner is None) == (location is None), (inliner, location)
|
||||||
@ -84,11 +92,18 @@ class Field:
|
|||||||
role = env.get_domain(domain).role(rolename)
|
role = env.get_domain(domain).role(rolename)
|
||||||
if role is None or inliner is None:
|
if role is None or inliner is None:
|
||||||
if role is None and inliner is not None:
|
if role is None and inliner is not None:
|
||||||
msg = __("Problem in %s domain: field is supposed "
|
msg = __(
|
||||||
"to use role '%s', but that role is not in the domain.")
|
'Problem in %s domain: field is supposed '
|
||||||
|
"to use role '%s', but that role is not in the domain."
|
||||||
|
)
|
||||||
logger.warning(__(msg), domain, rolename, location=location)
|
logger.warning(__(msg), domain, rolename, location=location)
|
||||||
refnode = addnodes.pending_xref('', refdomain=domain, refexplicit=False,
|
refnode = addnodes.pending_xref(
|
||||||
reftype=rolename, reftarget=target)
|
'',
|
||||||
|
refdomain=domain,
|
||||||
|
refexplicit=False,
|
||||||
|
reftype=rolename,
|
||||||
|
reftarget=target,
|
||||||
|
)
|
||||||
refnode += contnode or innernode(target, target) # type: ignore[call-arg]
|
refnode += contnode or innernode(target, target) # type: ignore[call-arg]
|
||||||
env.get_domain(domain).process_field_xref(refnode)
|
env.get_domain(domain).process_field_xref(refnode)
|
||||||
return refnode
|
return refnode
|
||||||
@ -99,13 +114,22 @@ class Field:
|
|||||||
ns, messages = role(rolename, target, target, lineno, inliner, {}, [])
|
ns, messages = role(rolename, target, target, lineno, inliner, {}, [])
|
||||||
return nodes.inline(target, '', *ns)
|
return nodes.inline(target, '', *ns)
|
||||||
|
|
||||||
def make_xrefs(self, rolename: str, domain: str, target: str,
|
def make_xrefs(
|
||||||
|
self,
|
||||||
|
rolename: str,
|
||||||
|
domain: str,
|
||||||
|
target: str,
|
||||||
innernode: type[TextlikeNode] = addnodes.literal_emphasis,
|
innernode: type[TextlikeNode] = addnodes.literal_emphasis,
|
||||||
contnode: Node | None = None, env: BuildEnvironment | None = None,
|
contnode: Node | None = None,
|
||||||
inliner: Inliner | None = None, location: Element | None = None,
|
env: BuildEnvironment | None = None,
|
||||||
|
inliner: Inliner | None = None,
|
||||||
|
location: Element | None = None,
|
||||||
) -> list[Node]:
|
) -> list[Node]:
|
||||||
return [self.make_xref(rolename, domain, target, innernode, contnode,
|
return [
|
||||||
env, inliner, location)]
|
self.make_xref(
|
||||||
|
rolename, domain, target, innernode, contnode, env, inliner, location
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
def make_entry(self, fieldarg: str, content: list[Node]) -> tuple[str, list[Node]]:
|
def make_entry(self, fieldarg: str, content: list[Node]) -> tuple[str, list[Node]]:
|
||||||
return (fieldarg, content)
|
return (fieldarg, content)
|
||||||
@ -123,17 +147,35 @@ class Field:
|
|||||||
fieldname = nodes.field_name('', self.label)
|
fieldname = nodes.field_name('', self.label)
|
||||||
if fieldarg:
|
if fieldarg:
|
||||||
fieldname += nodes.Text(' ')
|
fieldname += nodes.Text(' ')
|
||||||
fieldname.extend(self.make_xrefs(self.rolename, domain,
|
fieldname.extend(
|
||||||
fieldarg, nodes.Text,
|
self.make_xrefs(
|
||||||
env=env, inliner=inliner, location=location))
|
self.rolename,
|
||||||
|
domain,
|
||||||
|
fieldarg,
|
||||||
|
nodes.Text,
|
||||||
|
env=env,
|
||||||
|
inliner=inliner,
|
||||||
|
location=location,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
if len(content) == 1 and (
|
if len(content) == 1 and (
|
||||||
isinstance(content[0], nodes.Text) or
|
isinstance(content[0], nodes.Text)
|
||||||
(isinstance(content[0], nodes.inline) and len(content[0]) == 1 and
|
or (
|
||||||
isinstance(content[0][0], nodes.Text))):
|
isinstance(content[0], nodes.inline)
|
||||||
content = self.make_xrefs(self.bodyrolename, domain,
|
and len(content[0]) == 1
|
||||||
content[0].astext(), contnode=content[0],
|
and isinstance(content[0][0], nodes.Text)
|
||||||
env=env, inliner=inliner, location=location)
|
)
|
||||||
|
):
|
||||||
|
content = self.make_xrefs(
|
||||||
|
self.bodyrolename,
|
||||||
|
domain,
|
||||||
|
content[0].astext(),
|
||||||
|
contnode=content[0],
|
||||||
|
env=env,
|
||||||
|
inliner=inliner,
|
||||||
|
location=location,
|
||||||
|
)
|
||||||
fieldbody = nodes.field_body('', nodes.paragraph('', '', *content))
|
fieldbody = nodes.field_body('', nodes.paragraph('', '', *content))
|
||||||
return nodes.field('', fieldname, fieldbody)
|
return nodes.field('', fieldname, fieldbody)
|
||||||
|
|
||||||
@ -155,8 +197,14 @@ class GroupedField(Field):
|
|||||||
is_grouped = True
|
is_grouped = True
|
||||||
list_type = nodes.bullet_list
|
list_type = nodes.bullet_list
|
||||||
|
|
||||||
def __init__(self, name: str, names: tuple[str, ...] = (), label: str = '',
|
def __init__(
|
||||||
rolename: str = '', can_collapse: bool = False) -> None:
|
self,
|
||||||
|
name: str,
|
||||||
|
names: tuple[str, ...] = (),
|
||||||
|
label: str = '',
|
||||||
|
rolename: str = '',
|
||||||
|
can_collapse: bool = False,
|
||||||
|
) -> None:
|
||||||
super().__init__(name, names, label, True, rolename)
|
super().__init__(name, names, label, True, rolename)
|
||||||
self.can_collapse = can_collapse
|
self.can_collapse = can_collapse
|
||||||
|
|
||||||
@ -173,9 +221,17 @@ class GroupedField(Field):
|
|||||||
listnode = self.list_type()
|
listnode = self.list_type()
|
||||||
for fieldarg, content in items:
|
for fieldarg, content in items:
|
||||||
par = nodes.paragraph()
|
par = nodes.paragraph()
|
||||||
par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
|
par.extend(
|
||||||
|
self.make_xrefs(
|
||||||
|
self.rolename,
|
||||||
|
domain,
|
||||||
|
fieldarg,
|
||||||
addnodes.literal_strong,
|
addnodes.literal_strong,
|
||||||
env=env, inliner=inliner, location=location))
|
env=env,
|
||||||
|
inliner=inliner,
|
||||||
|
location=location,
|
||||||
|
)
|
||||||
|
)
|
||||||
par += nodes.Text(' -- ')
|
par += nodes.Text(' -- ')
|
||||||
par += content
|
par += content
|
||||||
listnode += nodes.list_item('', par)
|
listnode += nodes.list_item('', par)
|
||||||
@ -236,8 +292,11 @@ class TypedField(GroupedField):
|
|||||||
) -> nodes.field:
|
) -> nodes.field:
|
||||||
def handle_item(fieldarg: str, content: list[Node]) -> nodes.paragraph:
|
def handle_item(fieldarg: str, content: list[Node]) -> nodes.paragraph:
|
||||||
par = nodes.paragraph()
|
par = nodes.paragraph()
|
||||||
par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
|
par.extend(
|
||||||
addnodes.literal_strong, env=env))
|
self.make_xrefs(
|
||||||
|
self.rolename, domain, fieldarg, addnodes.literal_strong, env=env
|
||||||
|
)
|
||||||
|
)
|
||||||
if fieldarg in types:
|
if fieldarg in types:
|
||||||
par += nodes.Text(' (')
|
par += nodes.Text(' (')
|
||||||
# NOTE: using .pop() here to prevent a single type node to be
|
# NOTE: using .pop() here to prevent a single type node to be
|
||||||
@ -246,9 +305,17 @@ class TypedField(GroupedField):
|
|||||||
fieldtype = types.pop(fieldarg)
|
fieldtype = types.pop(fieldarg)
|
||||||
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
|
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
|
||||||
typename = fieldtype[0].astext()
|
typename = fieldtype[0].astext()
|
||||||
par.extend(self.make_xrefs(self.typerolename, domain, typename,
|
par.extend(
|
||||||
addnodes.literal_emphasis, env=env,
|
self.make_xrefs(
|
||||||
inliner=inliner, location=location))
|
self.typerolename,
|
||||||
|
domain,
|
||||||
|
typename,
|
||||||
|
addnodes.literal_emphasis,
|
||||||
|
env=env,
|
||||||
|
inliner=inliner,
|
||||||
|
location=location,
|
||||||
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
par += fieldtype
|
par += fieldtype
|
||||||
par += nodes.Text(')')
|
par += nodes.Text(')')
|
||||||
@ -329,8 +396,13 @@ class DocFieldTransformer:
|
|||||||
entries.append(field)
|
entries.append(field)
|
||||||
|
|
||||||
# but if this has a type then we can at least link it
|
# but if this has a type then we can at least link it
|
||||||
if (typedesc and is_typefield and content and
|
if (
|
||||||
len(content) == 1 and isinstance(content[0], nodes.Text)):
|
typedesc
|
||||||
|
and is_typefield
|
||||||
|
and content
|
||||||
|
and len(content) == 1
|
||||||
|
and isinstance(content[0], nodes.Text)
|
||||||
|
):
|
||||||
typed_field = cast(TypedField, typedesc)
|
typed_field = cast(TypedField, typedesc)
|
||||||
target = content[0].astext()
|
target = content[0].astext()
|
||||||
xrefs = typed_field.make_xrefs(
|
xrefs = typed_field.make_xrefs(
|
||||||
@ -356,7 +428,9 @@ class DocFieldTransformer:
|
|||||||
if is_typefield:
|
if is_typefield:
|
||||||
# filter out only inline nodes; others will result in invalid
|
# filter out only inline nodes; others will result in invalid
|
||||||
# markup being written out
|
# markup being written out
|
||||||
content = [n for n in content if isinstance(n, nodes.Inline | nodes.Text)]
|
content = [
|
||||||
|
n for n in content if isinstance(n, nodes.Inline | nodes.Text)
|
||||||
|
]
|
||||||
if content:
|
if content:
|
||||||
types.setdefault(typename, {})[fieldarg] = content
|
types.setdefault(typename, {})[fieldarg] = content
|
||||||
continue
|
continue
|
||||||
@ -368,12 +442,10 @@ class DocFieldTransformer:
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
types.setdefault(typename, {})[argname] = \
|
types.setdefault(typename, {})[argname] = [nodes.Text(argtype)]
|
||||||
[nodes.Text(argtype)]
|
|
||||||
fieldarg = argname
|
fieldarg = argname
|
||||||
|
|
||||||
translatable_content = nodes.inline(field_body.rawsource,
|
translatable_content = nodes.inline(field_body.rawsource, translatable=True)
|
||||||
translatable=True)
|
|
||||||
translatable_content.document = field_body.parent.document
|
translatable_content.document = field_body.parent.document
|
||||||
translatable_content.source = field_body.parent.source
|
translatable_content.source = field_body.parent.source
|
||||||
translatable_content.line = field_body.parent.line
|
translatable_content.line = field_body.parent.line
|
||||||
@ -383,7 +455,9 @@ class DocFieldTransformer:
|
|||||||
# get one entry per field
|
# get one entry per field
|
||||||
if typedesc.is_grouped:
|
if typedesc.is_grouped:
|
||||||
if typename in groupindices:
|
if typename in groupindices:
|
||||||
group = cast(tuple[Field, list, Node], entries[groupindices[typename]])
|
group = cast(
|
||||||
|
tuple[Field, list, Node], entries[groupindices[typename]]
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
groupindices[typename] = len(entries)
|
groupindices[typename] = len(entries)
|
||||||
group = (typedesc, [], field)
|
group = (typedesc, [], field)
|
||||||
@ -406,7 +480,13 @@ class DocFieldTransformer:
|
|||||||
env = self.directive.state.document.settings.env
|
env = self.directive.state.document.settings.env
|
||||||
inliner = self.directive.state.inliner
|
inliner = self.directive.state.inliner
|
||||||
domain = self.directive.domain or ''
|
domain = self.directive.domain or ''
|
||||||
new_list += fieldtype.make_field(fieldtypes, domain, items,
|
new_list += fieldtype.make_field(
|
||||||
env=env, inliner=inliner, location=location)
|
fieldtypes,
|
||||||
|
domain,
|
||||||
|
items,
|
||||||
|
env=env,
|
||||||
|
inliner=inliner,
|
||||||
|
location=location,
|
||||||
|
)
|
||||||
|
|
||||||
node.replace_self(new_list)
|
node.replace_self(new_list)
|
||||||
|
@ -24,7 +24,9 @@ from sphinx.util import logging
|
|||||||
from sphinx.util.parsing import nested_parse_to_nodes
|
from sphinx.util.parsing import nested_parse_to_nodes
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
report_re = re.compile('^(.+?:(?:\\d+)?): \\((DEBUG|INFO|WARNING|ERROR|SEVERE)/(\\d+)?\\) ')
|
report_re = re.compile(
|
||||||
|
'^(.+?:(?:\\d+)?): \\((DEBUG|INFO|WARNING|ERROR|SEVERE)/(\\d+)?\\) '
|
||||||
|
)
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from collections.abc import Callable, Iterator # NoQA: TCH003
|
from collections.abc import Callable, Iterator # NoQA: TCH003
|
||||||
@ -114,8 +116,8 @@ def unregister_node(node: type[Element]) -> None:
|
|||||||
This is inverse of ``nodes._add_nodes_class_names()``.
|
This is inverse of ``nodes._add_nodes_class_names()``.
|
||||||
"""
|
"""
|
||||||
if hasattr(nodes.GenericNodeVisitor, 'visit_' + node.__name__):
|
if hasattr(nodes.GenericNodeVisitor, 'visit_' + node.__name__):
|
||||||
delattr(nodes.GenericNodeVisitor, "visit_" + node.__name__)
|
delattr(nodes.GenericNodeVisitor, 'visit_' + node.__name__)
|
||||||
delattr(nodes.GenericNodeVisitor, "depart_" + node.__name__)
|
delattr(nodes.GenericNodeVisitor, 'depart_' + node.__name__)
|
||||||
delattr(nodes.SparseNodeVisitor, 'visit_' + node.__name__)
|
delattr(nodes.SparseNodeVisitor, 'visit_' + node.__name__)
|
||||||
delattr(nodes.SparseNodeVisitor, 'depart_' + node.__name__)
|
delattr(nodes.SparseNodeVisitor, 'depart_' + node.__name__)
|
||||||
|
|
||||||
@ -129,7 +131,9 @@ def patched_get_language() -> Iterator[None]:
|
|||||||
"""
|
"""
|
||||||
from docutils.languages import get_language
|
from docutils.languages import get_language
|
||||||
|
|
||||||
def patched_get_language(language_code: str, reporter: Reporter | None = None) -> Any:
|
def patched_get_language(
|
||||||
|
language_code: str, reporter: Reporter | None = None
|
||||||
|
) -> Any:
|
||||||
return get_language(language_code)
|
return get_language(language_code)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -153,7 +157,9 @@ def patched_rst_get_language() -> Iterator[None]:
|
|||||||
"""
|
"""
|
||||||
from docutils.parsers.rst.languages import get_language
|
from docutils.parsers.rst.languages import get_language
|
||||||
|
|
||||||
def patched_get_language(language_code: str, reporter: Reporter | None = None) -> Any:
|
def patched_get_language(
|
||||||
|
language_code: str, reporter: Reporter | None = None
|
||||||
|
) -> Any:
|
||||||
return get_language(language_code)
|
return get_language(language_code)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -170,7 +176,9 @@ def using_user_docutils_conf(confdir: str | None) -> Iterator[None]:
|
|||||||
try:
|
try:
|
||||||
docutilsconfig = os.environ.get('DOCUTILSCONFIG', None)
|
docutilsconfig = os.environ.get('DOCUTILSCONFIG', None)
|
||||||
if confdir:
|
if confdir:
|
||||||
os.environ['DOCUTILSCONFIG'] = path.join(path.abspath(confdir), 'docutils.conf')
|
os.environ['DOCUTILSCONFIG'] = path.join(
|
||||||
|
path.abspath(confdir), 'docutils.conf'
|
||||||
|
)
|
||||||
|
|
||||||
yield
|
yield
|
||||||
finally:
|
finally:
|
||||||
@ -183,9 +191,11 @@ def using_user_docutils_conf(confdir: str | None) -> Iterator[None]:
|
|||||||
@contextmanager
|
@contextmanager
|
||||||
def patch_docutils(confdir: str | None = None) -> Iterator[None]:
|
def patch_docutils(confdir: str | None = None) -> Iterator[None]:
|
||||||
"""Patch to docutils temporarily."""
|
"""Patch to docutils temporarily."""
|
||||||
with patched_get_language(), \
|
with (
|
||||||
patched_rst_get_language(), \
|
patched_get_language(),
|
||||||
using_user_docutils_conf(confdir):
|
patched_rst_get_language(),
|
||||||
|
using_user_docutils_conf(confdir),
|
||||||
|
):
|
||||||
yield
|
yield
|
||||||
|
|
||||||
|
|
||||||
@ -204,7 +214,7 @@ class CustomReSTDispatcher:
|
|||||||
self.enable()
|
self.enable()
|
||||||
|
|
||||||
def __exit__(
|
def __exit__(
|
||||||
self, exc_type: type[Exception], exc_value: Exception, traceback: Any,
|
self, exc_type: type[Exception], exc_value: Exception, traceback: Any
|
||||||
) -> None:
|
) -> None:
|
||||||
self.disable()
|
self.disable()
|
||||||
|
|
||||||
@ -219,16 +229,27 @@ class CustomReSTDispatcher:
|
|||||||
directives.directive = self.directive_func
|
directives.directive = self.directive_func
|
||||||
roles.role = self.role_func
|
roles.role = self.role_func
|
||||||
|
|
||||||
def directive(self,
|
def directive(
|
||||||
directive_name: str, language_module: ModuleType, document: nodes.document,
|
self,
|
||||||
|
directive_name: str,
|
||||||
|
language_module: ModuleType,
|
||||||
|
document: nodes.document,
|
||||||
) -> tuple[type[Directive] | None, list[system_message]]:
|
) -> tuple[type[Directive] | None, list[system_message]]:
|
||||||
return self.directive_func(directive_name, language_module, document)
|
return self.directive_func(directive_name, language_module, document)
|
||||||
|
|
||||||
def role(
|
def role(
|
||||||
self, role_name: str, language_module: ModuleType, lineno: int, reporter: Reporter,
|
self,
|
||||||
|
role_name: str,
|
||||||
|
language_module: ModuleType,
|
||||||
|
lineno: int,
|
||||||
|
reporter: Reporter,
|
||||||
) -> tuple[RoleFunction, list[system_message]]:
|
) -> tuple[RoleFunction, list[system_message]]:
|
||||||
return self.role_func(role_name, language_module, # type: ignore[return-value]
|
return self.role_func(
|
||||||
lineno, reporter)
|
role_name,
|
||||||
|
language_module, # type: ignore[return-value]
|
||||||
|
lineno,
|
||||||
|
reporter,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ElementLookupError(Exception):
|
class ElementLookupError(Exception):
|
||||||
@ -258,7 +279,9 @@ class sphinx_domains(CustomReSTDispatcher):
|
|||||||
if element is not None:
|
if element is not None:
|
||||||
return element, []
|
return element, []
|
||||||
else:
|
else:
|
||||||
logger.warning(_('unknown directive or role name: %s:%s'), domain_name, name)
|
logger.warning(
|
||||||
|
_('unknown directive or role name: %s:%s'), domain_name, name
|
||||||
|
)
|
||||||
# else look in the default domain
|
# else look in the default domain
|
||||||
else:
|
else:
|
||||||
def_domain = self.env.temp_data.get('default_domain')
|
def_domain = self.env.temp_data.get('default_domain')
|
||||||
@ -274,8 +297,11 @@ class sphinx_domains(CustomReSTDispatcher):
|
|||||||
|
|
||||||
raise ElementLookupError
|
raise ElementLookupError
|
||||||
|
|
||||||
def directive(self,
|
def directive(
|
||||||
directive_name: str, language_module: ModuleType, document: nodes.document,
|
self,
|
||||||
|
directive_name: str,
|
||||||
|
language_module: ModuleType,
|
||||||
|
document: nodes.document,
|
||||||
) -> tuple[type[Directive] | None, list[system_message]]:
|
) -> tuple[type[Directive] | None, list[system_message]]:
|
||||||
try:
|
try:
|
||||||
return self.lookup_domain_element('directive', directive_name)
|
return self.lookup_domain_element('directive', directive_name)
|
||||||
@ -283,7 +309,11 @@ class sphinx_domains(CustomReSTDispatcher):
|
|||||||
return super().directive(directive_name, language_module, document)
|
return super().directive(directive_name, language_module, document)
|
||||||
|
|
||||||
def role(
|
def role(
|
||||||
self, role_name: str, language_module: ModuleType, lineno: int, reporter: Reporter,
|
self,
|
||||||
|
role_name: str,
|
||||||
|
language_module: ModuleType,
|
||||||
|
lineno: int,
|
||||||
|
reporter: Reporter,
|
||||||
) -> tuple[RoleFunction, list[system_message]]:
|
) -> tuple[RoleFunction, list[system_message]]:
|
||||||
try:
|
try:
|
||||||
return self.lookup_domain_element('role', role_name)
|
return self.lookup_domain_element('role', role_name)
|
||||||
@ -295,26 +325,39 @@ class WarningStream:
|
|||||||
def write(self, text: str) -> None:
|
def write(self, text: str) -> None:
|
||||||
matched = report_re.search(text)
|
matched = report_re.search(text)
|
||||||
if not matched:
|
if not matched:
|
||||||
logger.warning(text.rstrip("\r\n"), type="docutils")
|
logger.warning(text.rstrip('\r\n'), type='docutils')
|
||||||
else:
|
else:
|
||||||
location, type, level = matched.groups()
|
location, type, level = matched.groups()
|
||||||
message = report_re.sub('', text).rstrip()
|
message = report_re.sub('', text).rstrip()
|
||||||
logger.log(type, message, location=location, type="docutils")
|
logger.log(type, message, location=location, type='docutils')
|
||||||
|
|
||||||
|
|
||||||
class LoggingReporter(Reporter):
|
class LoggingReporter(Reporter):
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_reporter(cls: type[LoggingReporter], reporter: Reporter) -> LoggingReporter:
|
def from_reporter(
|
||||||
|
cls: type[LoggingReporter], reporter: Reporter
|
||||||
|
) -> LoggingReporter:
|
||||||
"""Create an instance of LoggingReporter from other reporter object."""
|
"""Create an instance of LoggingReporter from other reporter object."""
|
||||||
return cls(reporter.source, reporter.report_level, reporter.halt_level,
|
return cls(
|
||||||
reporter.debug_flag, reporter.error_handler)
|
reporter.source,
|
||||||
|
reporter.report_level,
|
||||||
|
reporter.halt_level,
|
||||||
|
reporter.debug_flag,
|
||||||
|
reporter.error_handler,
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, source: str, report_level: int = Reporter.WARNING_LEVEL,
|
def __init__(
|
||||||
halt_level: int = Reporter.SEVERE_LEVEL, debug: bool = False,
|
self,
|
||||||
error_handler: str = 'backslashreplace') -> None:
|
source: str,
|
||||||
|
report_level: int = Reporter.WARNING_LEVEL,
|
||||||
|
halt_level: int = Reporter.SEVERE_LEVEL,
|
||||||
|
debug: bool = False,
|
||||||
|
error_handler: str = 'backslashreplace',
|
||||||
|
) -> None:
|
||||||
stream = cast(IO, WarningStream())
|
stream = cast(IO, WarningStream())
|
||||||
super().__init__(source, report_level, halt_level,
|
super().__init__(
|
||||||
stream, debug, error_handler=error_handler)
|
source, report_level, halt_level, stream, debug, error_handler=error_handler
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class NullReporter(Reporter):
|
class NullReporter(Reporter):
|
||||||
@ -351,8 +394,13 @@ class SphinxFileOutput(FileOutput):
|
|||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
def write(self, data: str) -> str:
|
def write(self, data: str) -> str:
|
||||||
if (self.destination_path and self.autoclose and 'b' not in self.mode and
|
if (
|
||||||
self.overwrite_if_changed and os.path.exists(self.destination_path)):
|
self.destination_path
|
||||||
|
and self.autoclose
|
||||||
|
and 'b' not in self.mode
|
||||||
|
and self.overwrite_if_changed
|
||||||
|
and os.path.exists(self.destination_path)
|
||||||
|
):
|
||||||
with open(self.destination_path, encoding=self.encoding) as f:
|
with open(self.destination_path, encoding=self.encoding) as f:
|
||||||
# skip writing: content not changed
|
# skip writing: content not changed
|
||||||
if f.read() == data:
|
if f.read() == data:
|
||||||
@ -416,7 +464,9 @@ class SphinxDirective(Directive):
|
|||||||
return f'<unknown>:{line}'
|
return f'<unknown>:{line}'
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
def parse_content_to_nodes(self, allow_section_headings: bool = False) -> list[Node]:
|
def parse_content_to_nodes(
|
||||||
|
self, allow_section_headings: bool = False
|
||||||
|
) -> list[Node]:
|
||||||
"""Parse the directive's content into nodes.
|
"""Parse the directive's content into nodes.
|
||||||
|
|
||||||
:param allow_section_headings:
|
:param allow_section_headings:
|
||||||
@ -437,7 +487,12 @@ class SphinxDirective(Directive):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def parse_text_to_nodes(
|
def parse_text_to_nodes(
|
||||||
self, text: str = '', /, *, offset: int = -1, allow_section_headings: bool = False,
|
self,
|
||||||
|
text: str = '',
|
||||||
|
/,
|
||||||
|
*,
|
||||||
|
offset: int = -1,
|
||||||
|
allow_section_headings: bool = False,
|
||||||
) -> list[Node]:
|
) -> list[Node]:
|
||||||
"""Parse *text* into nodes.
|
"""Parse *text* into nodes.
|
||||||
|
|
||||||
@ -465,7 +520,7 @@ class SphinxDirective(Directive):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def parse_inline(
|
def parse_inline(
|
||||||
self, text: str, *, lineno: int = -1,
|
self, text: str, *, lineno: int = -1
|
||||||
) -> tuple[list[Node], list[system_message]]:
|
) -> tuple[list[Node], list[system_message]]:
|
||||||
"""Parse *text* as inline elements.
|
"""Parse *text* as inline elements.
|
||||||
|
|
||||||
@ -496,6 +551,7 @@ class SphinxRole:
|
|||||||
This class is strongly coupled with Sphinx.
|
This class is strongly coupled with Sphinx.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
name: str #: The role name actually used in the document.
|
name: str #: The role name actually used in the document.
|
||||||
rawtext: str #: A string containing the entire interpreted text input.
|
rawtext: str #: A string containing the entire interpreted text input.
|
||||||
text: str #: The interpreted text content.
|
text: str #: The interpreted text content.
|
||||||
@ -507,9 +563,17 @@ class SphinxRole:
|
|||||||
#: A list of strings, the directive content for customisation
|
#: A list of strings, the directive content for customisation
|
||||||
#: (from the "role" directive).
|
#: (from the "role" directive).
|
||||||
content: Sequence[str]
|
content: Sequence[str]
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
def __call__(self, name: str, rawtext: str, text: str, lineno: int,
|
def __call__(
|
||||||
inliner: Inliner, options: dict | None = None, content: Sequence[str] = (),
|
self,
|
||||||
|
name: str,
|
||||||
|
rawtext: str,
|
||||||
|
text: str,
|
||||||
|
lineno: int,
|
||||||
|
inliner: Inliner,
|
||||||
|
options: dict | None = None,
|
||||||
|
content: Sequence[str] = (),
|
||||||
) -> tuple[list[Node], list[system_message]]:
|
) -> tuple[list[Node], list[system_message]]:
|
||||||
self.rawtext = rawtext
|
self.rawtext = rawtext
|
||||||
self.text = unescape(text)
|
self.text = unescape(text)
|
||||||
@ -585,16 +649,25 @@ class ReferenceRole(SphinxRole):
|
|||||||
.. versionadded:: 2.0
|
.. versionadded:: 2.0
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
has_explicit_title: bool #: A boolean indicates the role has explicit title or not.
|
has_explicit_title: bool #: A boolean indicates the role has explicit title or not.
|
||||||
disabled: bool #: A boolean indicates the reference is disabled.
|
disabled: bool #: A boolean indicates the reference is disabled.
|
||||||
title: str #: The link title for the interpreted text.
|
title: str #: The link title for the interpreted text.
|
||||||
target: str #: The link target for the interpreted text.
|
target: str #: The link target for the interpreted text.
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
# \x00 means the "<" was backslash-escaped
|
# \x00 means the "<" was backslash-escaped
|
||||||
explicit_title_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
|
explicit_title_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
|
||||||
|
|
||||||
def __call__(self, name: str, rawtext: str, text: str, lineno: int,
|
def __call__(
|
||||||
inliner: Inliner, options: dict | None = None, content: Sequence[str] = (),
|
self,
|
||||||
|
name: str,
|
||||||
|
rawtext: str,
|
||||||
|
text: str,
|
||||||
|
lineno: int,
|
||||||
|
inliner: Inliner,
|
||||||
|
options: dict | None = None,
|
||||||
|
content: Sequence[str] = (),
|
||||||
) -> tuple[list[Node], list[system_message]]:
|
) -> tuple[list[Node], list[system_message]]:
|
||||||
if options is None:
|
if options is None:
|
||||||
options = {}
|
options = {}
|
||||||
@ -698,6 +771,7 @@ def new_document(source_path: str, settings: Any = None) -> nodes.document:
|
|||||||
|
|
||||||
# Create a new instance of nodes.document using cached reporter
|
# Create a new instance of nodes.document using cached reporter
|
||||||
from sphinx import addnodes
|
from sphinx import addnodes
|
||||||
|
|
||||||
document = addnodes.document(settings, reporter, source=source_path)
|
document = addnodes.document(settings, reporter, source=source_path)
|
||||||
document.note_source(source_path, -1)
|
document.note_source(source_path, -1)
|
||||||
return document
|
return document
|
||||||
|
@ -31,12 +31,18 @@ def save_traceback(app: Sphinx | None, exc: BaseException) -> str:
|
|||||||
last_msgs = exts_list = ''
|
last_msgs = exts_list = ''
|
||||||
else:
|
else:
|
||||||
extensions = app.extensions.values()
|
extensions = app.extensions.values()
|
||||||
last_msgs = '\n'.join(f'# {strip_escape_sequences(s).strip()}'
|
last_msgs = '\n'.join(
|
||||||
for s in app.messagelog)
|
f'# {strip_escape_sequences(s).strip()}' for s in app.messagelog
|
||||||
exts_list = '\n'.join(f'# {ext.name} ({ext.version})' for ext in extensions
|
)
|
||||||
if ext.version != 'builtin')
|
exts_list = '\n'.join(
|
||||||
|
f'# {ext.name} ({ext.version})'
|
||||||
|
for ext in extensions
|
||||||
|
if ext.version != 'builtin'
|
||||||
|
)
|
||||||
|
|
||||||
with NamedTemporaryFile('w', suffix='.log', prefix='sphinx-err-', delete=False) as f:
|
with NamedTemporaryFile(
|
||||||
|
'w', suffix='.log', prefix='sphinx-err-', delete=False
|
||||||
|
) as f:
|
||||||
f.write(f"""\
|
f.write(f"""\
|
||||||
# Platform: {sys.platform}; ({platform.platform()})
|
# Platform: {sys.platform}; ({platform.platform()})
|
||||||
# Sphinx version: {sphinx.__display_version__}
|
# Sphinx version: {sphinx.__display_version__}
|
||||||
|
@ -35,11 +35,14 @@ def _template_basename(filename: str | os.PathLike[str]) -> str | None:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def copy_asset_file(source: str | os.PathLike[str], destination: str | os.PathLike[str],
|
def copy_asset_file(
|
||||||
|
source: str | os.PathLike[str],
|
||||||
|
destination: str | os.PathLike[str],
|
||||||
context: dict[str, Any] | None = None,
|
context: dict[str, Any] | None = None,
|
||||||
renderer: BaseRenderer | None = None,
|
renderer: BaseRenderer | None = None,
|
||||||
*,
|
*,
|
||||||
force: bool = False) -> None:
|
force: bool = False,
|
||||||
|
) -> None:
|
||||||
"""Copy an asset file to destination.
|
"""Copy an asset file to destination.
|
||||||
|
|
||||||
On copying, it expands the template variables if context argument is given and
|
On copying, it expands the template variables if context argument is given and
|
||||||
@ -62,38 +65,51 @@ def copy_asset_file(source: str | os.PathLike[str], destination: str | os.PathLi
|
|||||||
if _template_basename(source) and context is not None:
|
if _template_basename(source) and context is not None:
|
||||||
if renderer is None:
|
if renderer is None:
|
||||||
from sphinx.util.template import SphinxRenderer
|
from sphinx.util.template import SphinxRenderer
|
||||||
|
|
||||||
renderer = SphinxRenderer()
|
renderer = SphinxRenderer()
|
||||||
|
|
||||||
with open(source, encoding='utf-8') as fsrc:
|
with open(source, encoding='utf-8') as fsrc:
|
||||||
template_content = fsrc.read()
|
template_content = fsrc.read()
|
||||||
rendered_template = renderer.render_string(template_content, context)
|
rendered_template = renderer.render_string(template_content, context)
|
||||||
|
|
||||||
if (
|
if not force and destination.exists() and template_content != rendered_template:
|
||||||
not force
|
msg = __(
|
||||||
and destination.exists()
|
'Aborted attempted copy from rendered template %s to %s '
|
||||||
and template_content != rendered_template
|
'(the destination path has existing data).'
|
||||||
):
|
)
|
||||||
msg = __('Aborted attempted copy from rendered template %s to %s '
|
logger.warning(
|
||||||
'(the destination path has existing data).')
|
msg,
|
||||||
logger.warning(msg, os.fsdecode(source), os.fsdecode(destination),
|
os.fsdecode(source),
|
||||||
type='misc', subtype='copy_overwrite')
|
os.fsdecode(destination),
|
||||||
|
type='misc',
|
||||||
|
subtype='copy_overwrite',
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
destination = _template_basename(destination) or destination
|
destination = _template_basename(destination) or destination
|
||||||
with open(destination, 'w', encoding='utf-8') as fdst:
|
with open(destination, 'w', encoding='utf-8') as fdst:
|
||||||
msg = __('Writing evaluated template result to %s')
|
msg = __('Writing evaluated template result to %s')
|
||||||
logger.info(msg, os.fsdecode(destination), type='misc',
|
logger.info(
|
||||||
subtype='template_evaluation')
|
msg,
|
||||||
|
os.fsdecode(destination),
|
||||||
|
type='misc',
|
||||||
|
subtype='template_evaluation',
|
||||||
|
)
|
||||||
fdst.write(rendered_template)
|
fdst.write(rendered_template)
|
||||||
else:
|
else:
|
||||||
copyfile(source, destination, force=force)
|
copyfile(source, destination, force=force)
|
||||||
|
|
||||||
|
|
||||||
def copy_asset(source: str | os.PathLike[str], destination: str | os.PathLike[str],
|
def copy_asset(
|
||||||
|
source: str | os.PathLike[str],
|
||||||
|
destination: str | os.PathLike[str],
|
||||||
excluded: PathMatcher = lambda path: False,
|
excluded: PathMatcher = lambda path: False,
|
||||||
context: dict[str, Any] | None = None, renderer: BaseRenderer | None = None,
|
context: dict[str, Any] | None = None,
|
||||||
|
renderer: BaseRenderer | None = None,
|
||||||
onerror: Callable[[str, Exception], None] | None = None,
|
onerror: Callable[[str, Exception], None] | None = None,
|
||||||
*, force: bool = False) -> None:
|
*,
|
||||||
|
force: bool = False,
|
||||||
|
) -> None:
|
||||||
"""Copy asset files to destination recursively.
|
"""Copy asset files to destination recursively.
|
||||||
|
|
||||||
On copying, it expands the template variables if context argument is given and
|
On copying, it expands the template variables if context argument is given and
|
||||||
@ -114,14 +130,14 @@ def copy_asset(source: str | os.PathLike[str], destination: str | os.PathLike[st
|
|||||||
|
|
||||||
if renderer is None:
|
if renderer is None:
|
||||||
from sphinx.util.template import SphinxRenderer
|
from sphinx.util.template import SphinxRenderer
|
||||||
|
|
||||||
renderer = SphinxRenderer()
|
renderer = SphinxRenderer()
|
||||||
|
|
||||||
ensuredir(destination)
|
ensuredir(destination)
|
||||||
if os.path.isfile(source):
|
if os.path.isfile(source):
|
||||||
copy_asset_file(source, destination,
|
copy_asset_file(
|
||||||
context=context,
|
source, destination, context=context, renderer=renderer, force=force
|
||||||
renderer=renderer,
|
)
|
||||||
force=force)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
for root, dirs, files in os.walk(source, followlinks=True):
|
for root, dirs, files in os.walk(source, followlinks=True):
|
||||||
@ -135,11 +151,13 @@ def copy_asset(source: str | os.PathLike[str], destination: str | os.PathLike[st
|
|||||||
for filename in files:
|
for filename in files:
|
||||||
if not excluded(posixpath.join(reldir, filename)):
|
if not excluded(posixpath.join(reldir, filename)):
|
||||||
try:
|
try:
|
||||||
copy_asset_file(posixpath.join(root, filename),
|
copy_asset_file(
|
||||||
|
posixpath.join(root, filename),
|
||||||
posixpath.join(destination, reldir),
|
posixpath.join(destination, reldir),
|
||||||
context=context,
|
context=context,
|
||||||
renderer=renderer,
|
renderer=renderer,
|
||||||
force=force)
|
force=force,
|
||||||
|
)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
if onerror:
|
if onerror:
|
||||||
onerror(posixpath.join(root, filename), exc)
|
onerror(posixpath.join(root, filename), exc)
|
||||||
|
@ -12,7 +12,7 @@ from sphinx.deprecation import RemovedInSphinx90Warning
|
|||||||
_WEEKDAY_NAME = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
|
_WEEKDAY_NAME = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
|
||||||
_MONTH_NAME = ('', # Placeholder for indexing purposes
|
_MONTH_NAME = ('', # Placeholder for indexing purposes
|
||||||
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
|
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
|
||||||
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
|
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec') # fmt: skip
|
||||||
_GMT_OFFSET = float(time.localtime().tm_gmtoff)
|
_GMT_OFFSET = float(time.localtime().tm_gmtoff)
|
||||||
|
|
||||||
|
|
||||||
@ -29,18 +29,20 @@ def rfc1123_to_epoch(rfc1123: str) -> float:
|
|||||||
t = parsedate_tz(rfc1123)
|
t = parsedate_tz(rfc1123)
|
||||||
if t is None:
|
if t is None:
|
||||||
raise ValueError
|
raise ValueError
|
||||||
if not rfc1123.endswith(" GMT"):
|
if not rfc1123.endswith(' GMT'):
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
"HTTP-date string does not meet RFC 7231 requirements "
|
'HTTP-date string does not meet RFC 7231 requirements '
|
||||||
f"(must end with 'GMT'): {rfc1123!r}",
|
f"(must end with 'GMT'): {rfc1123!r}",
|
||||||
RemovedInSphinx90Warning, stacklevel=3,
|
RemovedInSphinx90Warning,
|
||||||
|
stacklevel=3,
|
||||||
)
|
)
|
||||||
epoch_secs = time.mktime(time.struct_time(t[:9])) + _GMT_OFFSET
|
epoch_secs = time.mktime(time.struct_time(t[:9])) + _GMT_OFFSET
|
||||||
if (gmt_offset := t[9]) != 0:
|
if (gmt_offset := t[9]) != 0:
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
"HTTP-date string does not meet RFC 7231 requirements "
|
'HTTP-date string does not meet RFC 7231 requirements '
|
||||||
f"(must be GMT time): {rfc1123!r}",
|
f'(must be GMT time): {rfc1123!r}',
|
||||||
RemovedInSphinx90Warning, stacklevel=3,
|
RemovedInSphinx90Warning,
|
||||||
|
stacklevel=3,
|
||||||
)
|
)
|
||||||
return epoch_secs - (gmt_offset or 0)
|
return epoch_secs - (gmt_offset or 0)
|
||||||
return epoch_secs
|
return epoch_secs
|
||||||
|
@ -75,7 +75,6 @@ class LocaleFileInfoBase(NamedTuple):
|
|||||||
|
|
||||||
|
|
||||||
class CatalogInfo(LocaleFileInfoBase):
|
class CatalogInfo(LocaleFileInfoBase):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def po_file(self) -> str:
|
def po_file(self) -> str:
|
||||||
return self.domain + '.po'
|
return self.domain + '.po'
|
||||||
@ -94,8 +93,9 @@ class CatalogInfo(LocaleFileInfoBase):
|
|||||||
|
|
||||||
def is_outdated(self) -> bool:
|
def is_outdated(self) -> bool:
|
||||||
return (
|
return (
|
||||||
not path.exists(self.mo_path) or
|
not path.exists(self.mo_path)
|
||||||
_last_modified_time(self.mo_path) < _last_modified_time(self.po_path))
|
or _last_modified_time(self.mo_path) < _last_modified_time(self.po_path)
|
||||||
|
) # fmt: skip
|
||||||
|
|
||||||
def write_mo(self, locale: str, use_fuzzy: bool = False) -> None:
|
def write_mo(self, locale: str, use_fuzzy: bool = False) -> None:
|
||||||
with open(self.po_path, encoding=self.charset) as file_po:
|
with open(self.po_path, encoding=self.charset) as file_po:
|
||||||
@ -115,8 +115,13 @@ class CatalogInfo(LocaleFileInfoBase):
|
|||||||
class CatalogRepository:
|
class CatalogRepository:
|
||||||
"""A repository for message catalogs."""
|
"""A repository for message catalogs."""
|
||||||
|
|
||||||
def __init__(self, basedir: str | os.PathLike[str], locale_dirs: list[str],
|
def __init__(
|
||||||
language: str, encoding: str) -> None:
|
self,
|
||||||
|
basedir: str | os.PathLike[str],
|
||||||
|
locale_dirs: list[str],
|
||||||
|
language: str,
|
||||||
|
encoding: str,
|
||||||
|
) -> None:
|
||||||
self.basedir = basedir
|
self.basedir = basedir
|
||||||
self._locale_dirs = locale_dirs
|
self._locale_dirs = locale_dirs
|
||||||
self.language = language
|
self.language = language
|
||||||
@ -205,13 +210,17 @@ date_format_mappings = {
|
|||||||
'%z': 'ZZZ', # UTC offset in the form ±HHMM[SS[.ffffff]]
|
'%z': 'ZZZ', # UTC offset in the form ±HHMM[SS[.ffffff]]
|
||||||
# (empty string if the object is naive).
|
# (empty string if the object is naive).
|
||||||
'%%': '%',
|
'%%': '%',
|
||||||
}
|
} # fmt: skip
|
||||||
|
|
||||||
date_format_re = re.compile('(%s)' % '|'.join(date_format_mappings))
|
date_format_re = re.compile('(%s)' % '|'.join(date_format_mappings))
|
||||||
|
|
||||||
|
|
||||||
def babel_format_date(date: datetime, format: str, locale: str,
|
def babel_format_date(
|
||||||
formatter: Formatter = babel.dates.format_date) -> str:
|
date: datetime,
|
||||||
|
format: str,
|
||||||
|
locale: str,
|
||||||
|
formatter: Formatter = babel.dates.format_date,
|
||||||
|
) -> str:
|
||||||
# Check if we have the tzinfo attribute. If not we cannot do any time
|
# Check if we have the tzinfo attribute. If not we cannot do any time
|
||||||
# related formats.
|
# related formats.
|
||||||
if not hasattr(date, 'tzinfo'):
|
if not hasattr(date, 'tzinfo'):
|
||||||
@ -223,8 +232,13 @@ def babel_format_date(date: datetime, format: str, locale: str,
|
|||||||
# fallback to English
|
# fallback to English
|
||||||
return formatter(date, format, locale='en')
|
return formatter(date, format, locale='en')
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
logger.warning(__('Invalid date format. Quote the string by single quote '
|
logger.warning(
|
||||||
'if you want to output it directly: %s'), format)
|
__(
|
||||||
|
'Invalid date format. Quote the string by single quote '
|
||||||
|
'if you want to output it directly: %s'
|
||||||
|
),
|
||||||
|
format,
|
||||||
|
)
|
||||||
return format
|
return format
|
||||||
|
|
||||||
|
|
||||||
@ -267,12 +281,15 @@ def format_date(
|
|||||||
else:
|
else:
|
||||||
function = babel.dates.format_datetime
|
function = babel.dates.format_datetime
|
||||||
|
|
||||||
result.append(babel_format_date(date, babel_format, locale=language,
|
result.append(
|
||||||
formatter=function))
|
babel_format_date(
|
||||||
|
date, babel_format, locale=language, formatter=function
|
||||||
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
result.append(token)
|
result.append(token)
|
||||||
|
|
||||||
return "".join(result)
|
return ''.join(result)
|
||||||
|
|
||||||
|
|
||||||
def get_image_filename_for_language(
|
def get_image_filename_for_language(
|
||||||
|
@ -13,6 +13,7 @@ if TYPE_CHECKING:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
PILLOW_AVAILABLE = True
|
PILLOW_AVAILABLE = True
|
||||||
except ImportError:
|
except ImportError:
|
||||||
PILLOW_AVAILABLE = False
|
PILLOW_AVAILABLE = False
|
||||||
@ -54,13 +55,13 @@ def get_image_size(filename: str) -> tuple[int, int] | None:
|
|||||||
|
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def guess_mimetype(filename: PathLike[str] | str, default: str) -> str:
|
def guess_mimetype(filename: PathLike[str] | str, default: str) -> str: ... # NoQA: E704
|
||||||
...
|
|
||||||
|
|
||||||
|
|
||||||
@overload
|
@overload
|
||||||
def guess_mimetype(filename: PathLike[str] | str, default: None = None) -> str | None:
|
def guess_mimetype( # NoQA: E704
|
||||||
...
|
filename: PathLike[str] | str, default: None = None
|
||||||
|
) -> str | None: ...
|
||||||
|
|
||||||
|
|
||||||
def guess_mimetype(
|
def guess_mimetype(
|
||||||
@ -121,12 +122,12 @@ def _image_type_from_file(filename: PathLike[str] | str) -> str:
|
|||||||
|
|
||||||
# JPEG data
|
# JPEG data
|
||||||
# https://en.wikipedia.org/wiki/JPEG_File_Interchange_Format#File_format_structure
|
# https://en.wikipedia.org/wiki/JPEG_File_Interchange_Format#File_format_structure
|
||||||
if header.startswith(b'\xFF\xD8'):
|
if header.startswith(b'\xff\xd8'):
|
||||||
return 'jpeg'
|
return 'jpeg'
|
||||||
|
|
||||||
# Portable Network Graphics
|
# Portable Network Graphics
|
||||||
# https://en.wikipedia.org/wiki/PNG#File_header
|
# https://en.wikipedia.org/wiki/PNG#File_header
|
||||||
if header.startswith(b'\x89PNG\r\n\x1A\n'):
|
if header.startswith(b'\x89PNG\r\n\x1a\n'):
|
||||||
return 'png'
|
return 'png'
|
||||||
|
|
||||||
# Scalable Vector Graphics
|
# Scalable Vector Graphics
|
||||||
|
@ -52,11 +52,7 @@ if TYPE_CHECKING:
|
|||||||
| types.MethodDescriptorType
|
| types.MethodDescriptorType
|
||||||
| types.ClassMethodDescriptorType
|
| types.ClassMethodDescriptorType
|
||||||
)
|
)
|
||||||
_SignatureType: TypeAlias = (
|
_SignatureType: TypeAlias = Callable[..., Any] | staticmethod | classmethod
|
||||||
Callable[..., Any]
|
|
||||||
| staticmethod
|
|
||||||
| classmethod
|
|
||||||
)
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -266,7 +262,8 @@ def isstaticmethod(
|
|||||||
def isdescriptor(x: Any) -> TypeIs[_SupportsGet | _SupportsSet | _SupportsDelete]:
|
def isdescriptor(x: Any) -> TypeIs[_SupportsGet | _SupportsSet | _SupportsDelete]:
|
||||||
"""Check if the object is a :external+python:term:`descriptor`."""
|
"""Check if the object is a :external+python:term:`descriptor`."""
|
||||||
return any(
|
return any(
|
||||||
callable(safe_getattr(x, item, None)) for item in ('__get__', '__set__', '__delete__')
|
callable(safe_getattr(x, item, None))
|
||||||
|
for item in ('__get__', '__set__', '__delete__')
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -429,7 +426,10 @@ def object_description(obj: Any, *, _seen: frozenset[int] = frozenset()) -> str:
|
|||||||
sorted_keys = sorted(obj, key=lambda k: object_description(k, _seen=seen))
|
sorted_keys = sorted(obj, key=lambda k: object_description(k, _seen=seen))
|
||||||
|
|
||||||
items = (
|
items = (
|
||||||
(object_description(key, _seen=seen), object_description(obj[key], _seen=seen))
|
(
|
||||||
|
object_description(key, _seen=seen),
|
||||||
|
object_description(obj[key], _seen=seen),
|
||||||
|
)
|
||||||
for key in sorted_keys
|
for key in sorted_keys
|
||||||
)
|
)
|
||||||
return '{%s}' % ', '.join(f'{key}: {value}' for (key, value) in items)
|
return '{%s}' % ', '.join(f'{key}: {value}' for (key, value) in items)
|
||||||
@ -442,7 +442,9 @@ def object_description(obj: Any, *, _seen: frozenset[int] = frozenset()) -> str:
|
|||||||
except TypeError:
|
except TypeError:
|
||||||
# Cannot sort set values, fall back to using descriptions as a sort key
|
# Cannot sort set values, fall back to using descriptions as a sort key
|
||||||
sorted_values = sorted(obj, key=lambda x: object_description(x, _seen=seen))
|
sorted_values = sorted(obj, key=lambda x: object_description(x, _seen=seen))
|
||||||
return '{%s}' % ', '.join(object_description(x, _seen=seen) for x in sorted_values)
|
return '{%s}' % ', '.join(
|
||||||
|
object_description(x, _seen=seen) for x in sorted_values
|
||||||
|
)
|
||||||
elif isinstance(obj, frozenset):
|
elif isinstance(obj, frozenset):
|
||||||
if id(obj) in seen:
|
if id(obj) in seen:
|
||||||
return 'frozenset(...)'
|
return 'frozenset(...)'
|
||||||
@ -760,7 +762,10 @@ def stringify_signature(
|
|||||||
args = []
|
args = []
|
||||||
last_kind = None
|
last_kind = None
|
||||||
for param in sig.parameters.values():
|
for param in sig.parameters.values():
|
||||||
if param.kind != Parameter.POSITIONAL_ONLY and last_kind == Parameter.POSITIONAL_ONLY:
|
if (
|
||||||
|
param.kind != Parameter.POSITIONAL_ONLY
|
||||||
|
and last_kind == Parameter.POSITIONAL_ONLY
|
||||||
|
):
|
||||||
# PEP-570: Separator for Positional Only Parameter: /
|
# PEP-570: Separator for Positional Only Parameter: /
|
||||||
args.append('/')
|
args.append('/')
|
||||||
if param.kind == Parameter.KEYWORD_ONLY and last_kind in (
|
if param.kind == Parameter.KEYWORD_ONLY and last_kind in (
|
||||||
@ -797,7 +802,11 @@ def stringify_signature(
|
|||||||
args.append('/')
|
args.append('/')
|
||||||
|
|
||||||
concatenated_args = ', '.join(args)
|
concatenated_args = ', '.join(args)
|
||||||
if sig.return_annotation is EMPTY or not show_annotation or not show_return_annotation:
|
if (
|
||||||
|
sig.return_annotation is EMPTY
|
||||||
|
or not show_annotation
|
||||||
|
or not show_return_annotation
|
||||||
|
):
|
||||||
return f'({concatenated_args})'
|
return f'({concatenated_args})'
|
||||||
else:
|
else:
|
||||||
retann = stringify_annotation(sig.return_annotation, mode) # type: ignore[arg-type]
|
retann = stringify_annotation(sig.return_annotation, mode) # type: ignore[arg-type]
|
||||||
@ -842,11 +851,15 @@ def signature_from_ast(node: ast.FunctionDef, code: str = '') -> Signature:
|
|||||||
|
|
||||||
# normal arguments
|
# normal arguments
|
||||||
for arg, defexpr in zip(args.args, defaults[pos_only_offset:], strict=False):
|
for arg, defexpr in zip(args.args, defaults[pos_only_offset:], strict=False):
|
||||||
params.append(_define(Parameter.POSITIONAL_OR_KEYWORD, arg, code, defexpr=defexpr))
|
params.append(
|
||||||
|
_define(Parameter.POSITIONAL_OR_KEYWORD, arg, code, defexpr=defexpr)
|
||||||
|
)
|
||||||
|
|
||||||
# variadic positional argument (no possible default expression)
|
# variadic positional argument (no possible default expression)
|
||||||
if args.vararg:
|
if args.vararg:
|
||||||
params.append(_define(Parameter.VAR_POSITIONAL, args.vararg, code, defexpr=None))
|
params.append(
|
||||||
|
_define(Parameter.VAR_POSITIONAL, args.vararg, code, defexpr=None)
|
||||||
|
)
|
||||||
|
|
||||||
# keyword-only arguments
|
# keyword-only arguments
|
||||||
for arg, defexpr in zip(args.kwonlyargs, args.kw_defaults, strict=False):
|
for arg, defexpr in zip(args.kwonlyargs, args.kw_defaults, strict=False):
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
"""Inventory utility functions for Sphinx."""
|
"""Inventory utility functions for Sphinx."""
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@ -135,8 +136,11 @@ class InventoryFile:
|
|||||||
|
|
||||||
for line in stream.read_compressed_lines():
|
for line in stream.read_compressed_lines():
|
||||||
# be careful to handle names with embedded spaces correctly
|
# be careful to handle names with embedded spaces correctly
|
||||||
m = re.match(r'(.+?)\s+(\S+)\s+(-?\d+)\s+?(\S*)\s+(.*)',
|
m = re.match(
|
||||||
line.rstrip(), flags=re.VERBOSE)
|
r'(.+?)\s+(\S+)\s+(-?\d+)\s+?(\S*)\s+(.*)',
|
||||||
|
line.rstrip(),
|
||||||
|
flags=re.VERBOSE,
|
||||||
|
)
|
||||||
if not m:
|
if not m:
|
||||||
continue
|
continue
|
||||||
name, type, prio, location, dispname = m.groups()
|
name, type, prio, location, dispname = m.groups()
|
||||||
@ -155,15 +159,20 @@ class InventoryFile:
|
|||||||
# Some types require case insensitive matches:
|
# Some types require case insensitive matches:
|
||||||
# * 'term': https://github.com/sphinx-doc/sphinx/issues/9291
|
# * 'term': https://github.com/sphinx-doc/sphinx/issues/9291
|
||||||
# * 'label': https://github.com/sphinx-doc/sphinx/issues/12008
|
# * 'label': https://github.com/sphinx-doc/sphinx/issues/12008
|
||||||
definition = f"{type}:{name}"
|
definition = f'{type}:{name}'
|
||||||
content = prio, location, dispname
|
content = prio, location, dispname
|
||||||
lowercase_definition = definition.lower()
|
lowercase_definition = definition.lower()
|
||||||
if lowercase_definition in potential_ambiguities:
|
if lowercase_definition in potential_ambiguities:
|
||||||
if potential_ambiguities[lowercase_definition] != content:
|
if potential_ambiguities[lowercase_definition] != content:
|
||||||
actual_ambiguities.add(definition)
|
actual_ambiguities.add(definition)
|
||||||
else:
|
else:
|
||||||
logger.debug(__("inventory <%s> contains duplicate definitions of %s"),
|
logger.debug(
|
||||||
uri, definition, type='intersphinx', subtype='external')
|
__('inventory <%s> contains duplicate definitions of %s'),
|
||||||
|
uri,
|
||||||
|
definition,
|
||||||
|
type='intersphinx',
|
||||||
|
subtype='external',
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
potential_ambiguities[lowercase_definition] = content
|
potential_ambiguities[lowercase_definition] = content
|
||||||
if location.endswith('$'):
|
if location.endswith('$'):
|
||||||
@ -172,25 +181,35 @@ class InventoryFile:
|
|||||||
inv_item: InventoryItem = projname, version, location, dispname
|
inv_item: InventoryItem = projname, version, location, dispname
|
||||||
invdata.setdefault(type, {})[name] = inv_item
|
invdata.setdefault(type, {})[name] = inv_item
|
||||||
for ambiguity in actual_ambiguities:
|
for ambiguity in actual_ambiguities:
|
||||||
logger.info(__("inventory <%s> contains multiple definitions for %s"),
|
logger.info(
|
||||||
uri, ambiguity, type='intersphinx', subtype='external')
|
__('inventory <%s> contains multiple definitions for %s'),
|
||||||
|
uri,
|
||||||
|
ambiguity,
|
||||||
|
type='intersphinx',
|
||||||
|
subtype='external',
|
||||||
|
)
|
||||||
return invdata
|
return invdata
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def dump(
|
def dump(
|
||||||
cls: type[InventoryFile], filename: str, env: BuildEnvironment, builder: Builder,
|
cls: type[InventoryFile],
|
||||||
|
filename: str,
|
||||||
|
env: BuildEnvironment,
|
||||||
|
builder: Builder,
|
||||||
) -> None:
|
) -> None:
|
||||||
def escape(string: str) -> str:
|
def escape(string: str) -> str:
|
||||||
return re.sub("\\s+", " ", string)
|
return re.sub('\\s+', ' ', string)
|
||||||
|
|
||||||
with open(os.path.join(filename), 'wb') as f:
|
with open(os.path.join(filename), 'wb') as f:
|
||||||
# header
|
# header
|
||||||
f.write(('# Sphinx inventory version 2\n'
|
f.write(
|
||||||
'# Project: %s\n'
|
(
|
||||||
'# Version: %s\n'
|
'# Sphinx inventory version 2\n'
|
||||||
'# The remainder of this file is compressed using zlib.\n' %
|
f'# Project: {escape(env.config.project)}\n'
|
||||||
(escape(env.config.project),
|
f'# Version: {escape(env.config.version)}\n'
|
||||||
escape(env.config.version))).encode())
|
'# The remainder of this file is compressed using zlib.\n'
|
||||||
|
).encode()
|
||||||
|
)
|
||||||
|
|
||||||
# body
|
# body
|
||||||
compressor = zlib.compressobj(9)
|
compressor = zlib.compressobj(9)
|
||||||
@ -205,7 +224,6 @@ class InventoryFile:
|
|||||||
uri += '#' + anchor
|
uri += '#' + anchor
|
||||||
if dispname == fullname:
|
if dispname == fullname:
|
||||||
dispname = '-'
|
dispname = '-'
|
||||||
entry = ('%s %s:%s %s %s %s\n' %
|
entry = f'{fullname} {domain.name}:{type} {prio} {uri} {dispname}\n'
|
||||||
(fullname, domain.name, type, prio, uri, dispname))
|
|
||||||
f.write(compressor.compress(entry.encode()))
|
f.write(compressor.compress(entry.encode()))
|
||||||
f.write(compressor.flush())
|
f.write(compressor.flush())
|
||||||
|
@ -27,7 +27,9 @@ if TYPE_CHECKING:
|
|||||||
NAMESPACE = 'sphinx'
|
NAMESPACE = 'sphinx'
|
||||||
VERBOSE = 15
|
VERBOSE = 15
|
||||||
|
|
||||||
LEVEL_NAMES: defaultdict[str, int] = defaultdict(lambda: logging.WARNING, {
|
LEVEL_NAMES: defaultdict[str, int] = defaultdict(
|
||||||
|
lambda: logging.WARNING,
|
||||||
|
{
|
||||||
'CRITICAL': logging.CRITICAL,
|
'CRITICAL': logging.CRITICAL,
|
||||||
'SEVERE': logging.CRITICAL,
|
'SEVERE': logging.CRITICAL,
|
||||||
'ERROR': logging.ERROR,
|
'ERROR': logging.ERROR,
|
||||||
@ -35,19 +37,26 @@ LEVEL_NAMES: defaultdict[str, int] = defaultdict(lambda: logging.WARNING, {
|
|||||||
'INFO': logging.INFO,
|
'INFO': logging.INFO,
|
||||||
'VERBOSE': VERBOSE,
|
'VERBOSE': VERBOSE,
|
||||||
'DEBUG': logging.DEBUG,
|
'DEBUG': logging.DEBUG,
|
||||||
})
|
},
|
||||||
|
)
|
||||||
|
|
||||||
VERBOSITY_MAP: defaultdict[int, int] = defaultdict(lambda: logging.NOTSET, {
|
VERBOSITY_MAP: defaultdict[int, int] = defaultdict(
|
||||||
|
lambda: logging.NOTSET,
|
||||||
|
{
|
||||||
0: logging.INFO,
|
0: logging.INFO,
|
||||||
1: VERBOSE,
|
1: VERBOSE,
|
||||||
2: logging.DEBUG,
|
2: logging.DEBUG,
|
||||||
})
|
},
|
||||||
|
)
|
||||||
|
|
||||||
COLOR_MAP: defaultdict[int, str] = defaultdict(lambda: 'blue', {
|
COLOR_MAP: defaultdict[int, str] = defaultdict(
|
||||||
|
lambda: 'blue',
|
||||||
|
{
|
||||||
logging.ERROR: 'darkred',
|
logging.ERROR: 'darkred',
|
||||||
logging.WARNING: 'red',
|
logging.WARNING: 'red',
|
||||||
logging.DEBUG: 'darkgray',
|
logging.DEBUG: 'darkgray',
|
||||||
})
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def getLogger(name: str) -> SphinxLoggerAdapter:
|
def getLogger(name: str) -> SphinxLoggerAdapter:
|
||||||
@ -126,7 +135,7 @@ class SphinxLoggerAdapter(logging.LoggerAdapter):
|
|||||||
KEYWORDS = ['type', 'subtype', 'location', 'nonl', 'color', 'once']
|
KEYWORDS = ['type', 'subtype', 'location', 'nonl', 'color', 'once']
|
||||||
|
|
||||||
def log( # type: ignore[override]
|
def log( # type: ignore[override]
|
||||||
self, level: int | str, msg: str, *args: Any, **kwargs: Any,
|
self, level: int | str, msg: str, *args: Any, **kwargs: Any
|
||||||
) -> None:
|
) -> None:
|
||||||
if isinstance(level, int):
|
if isinstance(level, int):
|
||||||
super().log(level, msg, *args, **kwargs)
|
super().log(level, msg, *args, **kwargs)
|
||||||
@ -400,14 +409,14 @@ class _RaiseOnWarningFilter(logging.Filter):
|
|||||||
except (TypeError, ValueError):
|
except (TypeError, ValueError):
|
||||||
message = record.msg # use record.msg itself
|
message = record.msg # use record.msg itself
|
||||||
if location := getattr(record, 'location', ''):
|
if location := getattr(record, 'location', ''):
|
||||||
message = f"{location}:{message}"
|
message = f'{location}:{message}'
|
||||||
if record.exc_info is not None:
|
if record.exc_info is not None:
|
||||||
raise SphinxWarning(message) from record.exc_info[1]
|
raise SphinxWarning(message) from record.exc_info[1]
|
||||||
raise SphinxWarning(message)
|
raise SphinxWarning(message)
|
||||||
|
|
||||||
|
|
||||||
def is_suppressed_warning(
|
def is_suppressed_warning(
|
||||||
warning_type: str, sub_type: str, suppress_warnings: Set[str] | Sequence[str],
|
warning_type: str, sub_type: str, suppress_warnings: Set[str] | Sequence[str]
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Check whether the warning is suppressed or not."""
|
"""Check whether the warning is suppressed or not."""
|
||||||
if warning_type is None or len(suppress_warnings) == 0:
|
if warning_type is None or len(suppress_warnings) == 0:
|
||||||
@ -546,11 +555,11 @@ class WarningLogRecordTranslator(SphinxLogRecordTranslator):
|
|||||||
def get_node_location(node: Node) -> str | None:
|
def get_node_location(node: Node) -> str | None:
|
||||||
source, line = get_source_line(node)
|
source, line = get_source_line(node)
|
||||||
if source and line:
|
if source and line:
|
||||||
return f"{abspath(source)}:{line}"
|
return f'{abspath(source)}:{line}'
|
||||||
if source:
|
if source:
|
||||||
return f"{abspath(source)}:"
|
return f'{abspath(source)}:'
|
||||||
if line:
|
if line:
|
||||||
return f"<unknown>:{line}"
|
return f'<unknown>:{line}'
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
@ -580,7 +589,9 @@ class SafeEncodingWriter:
|
|||||||
except UnicodeEncodeError:
|
except UnicodeEncodeError:
|
||||||
# stream accept only str, not bytes. So, we encode and replace
|
# stream accept only str, not bytes. So, we encode and replace
|
||||||
# non-encodable characters, then decode them.
|
# non-encodable characters, then decode them.
|
||||||
self.stream.write(data.encode(self.encoding, 'replace').decode(self.encoding))
|
self.stream.write(
|
||||||
|
data.encode(self.encoding, 'replace').decode(self.encoding)
|
||||||
|
)
|
||||||
|
|
||||||
def flush(self) -> None:
|
def flush(self) -> None:
|
||||||
if hasattr(self.stream, 'flush'):
|
if hasattr(self.stream, 'flush'):
|
||||||
|
@ -112,7 +112,7 @@ def patfilter(names: Iterable[str], pat: str) -> list[str]:
|
|||||||
|
|
||||||
def get_matching_files(
|
def get_matching_files(
|
||||||
dirname: str | os.PathLike[str],
|
dirname: str | os.PathLike[str],
|
||||||
include_patterns: Iterable[str] = ("**",),
|
include_patterns: Iterable[str] = ('**',),
|
||||||
exclude_patterns: Iterable[str] = (),
|
exclude_patterns: Iterable[str] = (),
|
||||||
) -> Iterator[str]:
|
) -> Iterator[str]:
|
||||||
"""Get all file names in a directory, recursively.
|
"""Get all file names in a directory, recursively.
|
||||||
@ -132,8 +132,8 @@ def get_matching_files(
|
|||||||
|
|
||||||
for root, dirs, files in os.walk(dirname, followlinks=True):
|
for root, dirs, files in os.walk(dirname, followlinks=True):
|
||||||
relative_root = os.path.relpath(root, dirname)
|
relative_root = os.path.relpath(root, dirname)
|
||||||
if relative_root == ".":
|
if relative_root == '.':
|
||||||
relative_root = "" # suppress dirname for files on the target dir
|
relative_root = '' # suppress dirname for files on the target dir
|
||||||
|
|
||||||
# Filter files
|
# Filter files
|
||||||
included_files = []
|
included_files = []
|
||||||
|
@ -14,7 +14,7 @@ def get_node_equation_number(writer: HTML5Translator, node: nodes.math_block) ->
|
|||||||
if writer.builder.config.math_numfig and writer.builder.config.numfig:
|
if writer.builder.config.math_numfig and writer.builder.config.numfig:
|
||||||
figtype = 'displaymath'
|
figtype = 'displaymath'
|
||||||
if writer.builder.name == 'singlehtml':
|
if writer.builder.name == 'singlehtml':
|
||||||
key = f"{writer.docnames[-1]}/{figtype}" # type: ignore[has-type]
|
key = f'{writer.docnames[-1]}/{figtype}' # type: ignore[has-type]
|
||||||
else:
|
else:
|
||||||
key = figtype
|
key = figtype
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ explicit_title_re = re.compile(r'^(.+?)\s*(?<!\x00)<([^<]*?)>$', re.DOTALL)
|
|||||||
caption_ref_re = explicit_title_re # b/w compat alias
|
caption_ref_re = explicit_title_re # b/w compat alias
|
||||||
|
|
||||||
|
|
||||||
N = TypeVar("N", bound=Node)
|
N = TypeVar('N', bound=Node)
|
||||||
|
|
||||||
|
|
||||||
class NodeMatcher(Generic[N]):
|
class NodeMatcher(Generic[N]):
|
||||||
@ -135,8 +135,11 @@ def apply_source_workaround(node: Element) -> None:
|
|||||||
# * rawsource of term node will have: ``term text : classifier1 : classifier2``
|
# * rawsource of term node will have: ``term text : classifier1 : classifier2``
|
||||||
# * rawsource of classifier node will be None
|
# * rawsource of classifier node will be None
|
||||||
if isinstance(node, nodes.classifier) and not node.rawsource:
|
if isinstance(node, nodes.classifier) and not node.rawsource:
|
||||||
logger.debug('[i18n] PATCH: %r to have source, line and rawsource: %s',
|
logger.debug(
|
||||||
get_full_module_name(node), repr_domxml(node))
|
'[i18n] PATCH: %r to have source, line and rawsource: %s',
|
||||||
|
get_full_module_name(node),
|
||||||
|
repr_domxml(node),
|
||||||
|
)
|
||||||
definition_list_item = node.parent
|
definition_list_item = node.parent
|
||||||
node.source = definition_list_item.source
|
node.source = definition_list_item.source
|
||||||
node.line = definition_list_item.line - 1 # type: ignore[operator]
|
node.line = definition_list_item.line - 1 # type: ignore[operator]
|
||||||
@ -145,24 +148,37 @@ def apply_source_workaround(node: Element) -> None:
|
|||||||
# docutils-0.15 fills in rawsource attribute, but not in source.
|
# docutils-0.15 fills in rawsource attribute, but not in source.
|
||||||
node.source = node.parent.source
|
node.source = node.parent.source
|
||||||
if isinstance(node, nodes.image) and node.source is None:
|
if isinstance(node, nodes.image) and node.source is None:
|
||||||
logger.debug('[i18n] PATCH: %r to have source, line: %s',
|
logger.debug(
|
||||||
get_full_module_name(node), repr_domxml(node))
|
'[i18n] PATCH: %r to have source, line: %s',
|
||||||
|
get_full_module_name(node),
|
||||||
|
repr_domxml(node),
|
||||||
|
)
|
||||||
node.source, node.line = node.parent.source, node.parent.line
|
node.source, node.line = node.parent.source, node.parent.line
|
||||||
if isinstance(node, nodes.title) and node.source is None:
|
if isinstance(node, nodes.title) and node.source is None:
|
||||||
logger.debug('[i18n] PATCH: %r to have source: %s',
|
logger.debug(
|
||||||
get_full_module_name(node), repr_domxml(node))
|
'[i18n] PATCH: %r to have source: %s',
|
||||||
|
get_full_module_name(node),
|
||||||
|
repr_domxml(node),
|
||||||
|
)
|
||||||
node.source, node.line = node.parent.source, node.parent.line
|
node.source, node.line = node.parent.source, node.parent.line
|
||||||
if isinstance(node, nodes.term):
|
if isinstance(node, nodes.term):
|
||||||
logger.debug('[i18n] PATCH: %r to have rawsource: %s',
|
logger.debug(
|
||||||
get_full_module_name(node), repr_domxml(node))
|
'[i18n] PATCH: %r to have rawsource: %s',
|
||||||
|
get_full_module_name(node),
|
||||||
|
repr_domxml(node),
|
||||||
|
)
|
||||||
# strip classifier from rawsource of term
|
# strip classifier from rawsource of term
|
||||||
for classifier in reversed(list(node.parent.findall(nodes.classifier))):
|
for classifier in reversed(list(node.parent.findall(nodes.classifier))):
|
||||||
node.rawsource = re.sub(r'\s*:\s*%s' % re.escape(classifier.astext()),
|
node.rawsource = re.sub(
|
||||||
'', node.rawsource)
|
r'\s*:\s*%s' % re.escape(classifier.astext()), '', node.rawsource
|
||||||
|
)
|
||||||
if isinstance(node, nodes.topic) and node.source is None:
|
if isinstance(node, nodes.topic) and node.source is None:
|
||||||
# docutils-0.18 does not fill the source attribute of topic
|
# docutils-0.18 does not fill the source attribute of topic
|
||||||
logger.debug('[i18n] PATCH: %r to have source, line: %s',
|
logger.debug(
|
||||||
get_full_module_name(node), repr_domxml(node))
|
'[i18n] PATCH: %r to have source, line: %s',
|
||||||
|
get_full_module_name(node),
|
||||||
|
repr_domxml(node),
|
||||||
|
)
|
||||||
node.source, node.line = node.parent.source, node.parent.line
|
node.source, node.line = node.parent.source, node.parent.line
|
||||||
|
|
||||||
# workaround: literal_block under bullet list (#4913)
|
# workaround: literal_block under bullet list (#4913)
|
||||||
@ -178,14 +194,20 @@ def apply_source_workaround(node: Element) -> None:
|
|||||||
return
|
return
|
||||||
|
|
||||||
# workaround: some docutils nodes doesn't have source, line.
|
# workaround: some docutils nodes doesn't have source, line.
|
||||||
if isinstance(node, (
|
if isinstance(
|
||||||
|
node,
|
||||||
|
(
|
||||||
nodes.rubric # #1305 rubric directive
|
nodes.rubric # #1305 rubric directive
|
||||||
| nodes.line # #1477 line node
|
| nodes.line # #1477 line node
|
||||||
| nodes.image # #3093 image directive in substitution
|
| nodes.image # #3093 image directive in substitution
|
||||||
| nodes.field_name # #3335 field list syntax
|
| nodes.field_name # #3335 field list syntax
|
||||||
)):
|
),
|
||||||
logger.debug('[i18n] PATCH: %r to have source and line: %s',
|
):
|
||||||
get_full_module_name(node), repr_domxml(node))
|
logger.debug(
|
||||||
|
'[i18n] PATCH: %r to have source and line: %s',
|
||||||
|
get_full_module_name(node),
|
||||||
|
repr_domxml(node),
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
node.source = get_node_source(node)
|
node.source = get_node_source(node)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
@ -217,24 +239,36 @@ def is_translatable(node: Node) -> bool:
|
|||||||
|
|
||||||
if isinstance(node, nodes.TextElement):
|
if isinstance(node, nodes.TextElement):
|
||||||
if not node.source:
|
if not node.source:
|
||||||
logger.debug('[i18n] SKIP %r because no node.source: %s',
|
logger.debug(
|
||||||
get_full_module_name(node), repr_domxml(node))
|
'[i18n] SKIP %r because no node.source: %s',
|
||||||
|
get_full_module_name(node),
|
||||||
|
repr_domxml(node),
|
||||||
|
)
|
||||||
return False # built-in message
|
return False # built-in message
|
||||||
if isinstance(node, IGNORED_NODES) and 'translatable' not in node:
|
if isinstance(node, IGNORED_NODES) and 'translatable' not in node:
|
||||||
logger.debug("[i18n] SKIP %r because node is in IGNORED_NODES "
|
logger.debug(
|
||||||
|
'[i18n] SKIP %r because node is in IGNORED_NODES '
|
||||||
"and no node['translatable']: %s",
|
"and no node['translatable']: %s",
|
||||||
get_full_module_name(node), repr_domxml(node))
|
get_full_module_name(node),
|
||||||
|
repr_domxml(node),
|
||||||
|
)
|
||||||
return False
|
return False
|
||||||
if not node.get('translatable', True):
|
if not node.get('translatable', True):
|
||||||
# not(node['translatable'] == True or node['translatable'] is None)
|
# not(node['translatable'] == True or node['translatable'] is None)
|
||||||
logger.debug("[i18n] SKIP %r because not node['translatable']: %s",
|
logger.debug(
|
||||||
get_full_module_name(node), repr_domxml(node))
|
"[i18n] SKIP %r because not node['translatable']: %s",
|
||||||
|
get_full_module_name(node),
|
||||||
|
repr_domxml(node),
|
||||||
|
)
|
||||||
return False
|
return False
|
||||||
# <field_name>orphan</field_name>
|
# <field_name>orphan</field_name>
|
||||||
# XXX ignore all metadata (== docinfo)
|
# XXX ignore all metadata (== docinfo)
|
||||||
if isinstance(node, nodes.field_name) and (node.children[0] == 'orphan'):
|
if isinstance(node, nodes.field_name) and (node.children[0] == 'orphan'):
|
||||||
logger.debug('[i18n] SKIP %r because orphan node: %s',
|
logger.debug(
|
||||||
get_full_module_name(node), repr_domxml(node))
|
'[i18n] SKIP %r because orphan node: %s',
|
||||||
|
get_full_module_name(node),
|
||||||
|
repr_domxml(node),
|
||||||
|
)
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -249,7 +283,7 @@ LITERAL_TYPE_NODES = (
|
|||||||
)
|
)
|
||||||
IMAGE_TYPE_NODES = (
|
IMAGE_TYPE_NODES = (
|
||||||
nodes.image,
|
nodes.image,
|
||||||
)
|
) # fmt: skip
|
||||||
|
|
||||||
|
|
||||||
def extract_messages(doctree: Element) -> Iterable[tuple[Element, str]]:
|
def extract_messages(doctree: Element) -> Iterable[tuple[Element, str]]:
|
||||||
@ -272,7 +306,7 @@ def extract_messages(doctree: Element) -> Iterable[tuple[Element, str]]:
|
|||||||
else:
|
else:
|
||||||
msg = ''
|
msg = ''
|
||||||
elif isinstance(node, nodes.meta):
|
elif isinstance(node, nodes.meta):
|
||||||
msg = node["content"]
|
msg = node['content']
|
||||||
else:
|
else:
|
||||||
msg = node.rawsource.replace('\n', ' ').strip() # type: ignore[attr-defined]
|
msg = node.rawsource.replace('\n', ' ').strip() # type: ignore[attr-defined]
|
||||||
|
|
||||||
@ -325,8 +359,9 @@ def traverse_translatable_index(
|
|||||||
yield node, entries
|
yield node, entries
|
||||||
|
|
||||||
|
|
||||||
def nested_parse_with_titles(state: RSTState, content: StringList, node: Node,
|
def nested_parse_with_titles(
|
||||||
content_offset: int = 0) -> str:
|
state: RSTState, content: StringList, node: Node, content_offset: int = 0
|
||||||
|
) -> str:
|
||||||
"""Version of state.nested_parse() that allows titles and does not require
|
"""Version of state.nested_parse() that allows titles and does not require
|
||||||
titles to have the same decoration as the calling document.
|
titles to have the same decoration as the calling document.
|
||||||
|
|
||||||
@ -359,12 +394,12 @@ def split_explicit_title(text: str) -> tuple[bool, str, str]:
|
|||||||
return False, text, text
|
return False, text, text
|
||||||
|
|
||||||
|
|
||||||
indextypes = [
|
indextypes = ['single', 'pair', 'double', 'triple', 'see', 'seealso']
|
||||||
'single', 'pair', 'double', 'triple', 'see', 'seealso',
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def process_index_entry(entry: str, targetid: str,
|
def process_index_entry(
|
||||||
|
entry: str,
|
||||||
|
targetid: str,
|
||||||
) -> list[tuple[str, str, str, str, str | None]]:
|
) -> list[tuple[str, str, str, str, str | None]]:
|
||||||
from sphinx.domains.python import pairindextypes
|
from sphinx.domains.python import pairindextypes
|
||||||
|
|
||||||
@ -380,9 +415,16 @@ def process_index_entry(entry: str, targetid: str,
|
|||||||
value = entry[len(index_type) + 1 :].strip()
|
value = entry[len(index_type) + 1 :].strip()
|
||||||
value = f'{pairindextypes[index_type]}; {value}'
|
value = f'{pairindextypes[index_type]}; {value}'
|
||||||
# xref RemovedInSphinx90Warning
|
# xref RemovedInSphinx90Warning
|
||||||
logger.warning(__('%r is deprecated for index entries (from entry %r). '
|
logger.warning(
|
||||||
"Use 'pair: %s' instead."),
|
__(
|
||||||
index_type, entry, value, type='index')
|
'%r is deprecated for index entries (from entry %r). '
|
||||||
|
"Use 'pair: %s' instead."
|
||||||
|
),
|
||||||
|
index_type,
|
||||||
|
entry,
|
||||||
|
value,
|
||||||
|
type='index',
|
||||||
|
)
|
||||||
indexentries.append(('pair', value, targetid, main, None))
|
indexentries.append(('pair', value, targetid, main, None))
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
@ -430,13 +472,22 @@ def inline_all_toctrees(
|
|||||||
try:
|
try:
|
||||||
traversed.append(includefile)
|
traversed.append(includefile)
|
||||||
logger.info(indent + colorfunc(includefile))
|
logger.info(indent + colorfunc(includefile))
|
||||||
subtree = inline_all_toctrees(builder, docnameset, includefile,
|
subtree = inline_all_toctrees(
|
||||||
|
builder,
|
||||||
|
docnameset,
|
||||||
|
includefile,
|
||||||
builder.env.get_doctree(includefile),
|
builder.env.get_doctree(includefile),
|
||||||
colorfunc, traversed, indent)
|
colorfunc,
|
||||||
|
traversed,
|
||||||
|
indent,
|
||||||
|
)
|
||||||
docnameset.add(includefile)
|
docnameset.add(includefile)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.warning(__('toctree contains ref to nonexisting file %r'),
|
logger.warning(
|
||||||
includefile, location=docname)
|
__('toctree contains ref to nonexisting file %r'),
|
||||||
|
includefile,
|
||||||
|
location=docname,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
sof = addnodes.start_of_file(docname=includefile)
|
sof = addnodes.start_of_file(docname=includefile)
|
||||||
sof.children = subtree.children
|
sof.children = subtree.children
|
||||||
@ -478,7 +529,7 @@ def _make_id(string: str) -> str:
|
|||||||
_non_id_chars = re.compile('[^a-zA-Z0-9._]+')
|
_non_id_chars = re.compile('[^a-zA-Z0-9._]+')
|
||||||
_non_id_at_ends = re.compile('^[-0-9._]+|-+$')
|
_non_id_at_ends = re.compile('^[-0-9._]+|-+$')
|
||||||
_non_id_translate = {
|
_non_id_translate = {
|
||||||
0x00f8: 'o', # o with stroke
|
0x00F8: 'o', # o with stroke
|
||||||
0x0111: 'd', # d with stroke
|
0x0111: 'd', # d with stroke
|
||||||
0x0127: 'h', # h with stroke
|
0x0127: 'h', # h with stroke
|
||||||
0x0131: 'i', # dotless i
|
0x0131: 'i', # dotless i
|
||||||
@ -487,48 +538,52 @@ _non_id_translate = {
|
|||||||
0x0180: 'b', # b with stroke
|
0x0180: 'b', # b with stroke
|
||||||
0x0183: 'b', # b with topbar
|
0x0183: 'b', # b with topbar
|
||||||
0x0188: 'c', # c with hook
|
0x0188: 'c', # c with hook
|
||||||
0x018c: 'd', # d with topbar
|
0x018C: 'd', # d with topbar
|
||||||
0x0192: 'f', # f with hook
|
0x0192: 'f', # f with hook
|
||||||
0x0199: 'k', # k with hook
|
0x0199: 'k', # k with hook
|
||||||
0x019a: 'l', # l with bar
|
0x019A: 'l', # l with bar
|
||||||
0x019e: 'n', # n with long right leg
|
0x019E: 'n', # n with long right leg
|
||||||
0x01a5: 'p', # p with hook
|
0x01A5: 'p', # p with hook
|
||||||
0x01ab: 't', # t with palatal hook
|
0x01AB: 't', # t with palatal hook
|
||||||
0x01ad: 't', # t with hook
|
0x01AD: 't', # t with hook
|
||||||
0x01b4: 'y', # y with hook
|
0x01B4: 'y', # y with hook
|
||||||
0x01b6: 'z', # z with stroke
|
0x01B6: 'z', # z with stroke
|
||||||
0x01e5: 'g', # g with stroke
|
0x01E5: 'g', # g with stroke
|
||||||
0x0225: 'z', # z with hook
|
0x0225: 'z', # z with hook
|
||||||
0x0234: 'l', # l with curl
|
0x0234: 'l', # l with curl
|
||||||
0x0235: 'n', # n with curl
|
0x0235: 'n', # n with curl
|
||||||
0x0236: 't', # t with curl
|
0x0236: 't', # t with curl
|
||||||
0x0237: 'j', # dotless j
|
0x0237: 'j', # dotless j
|
||||||
0x023c: 'c', # c with stroke
|
0x023C: 'c', # c with stroke
|
||||||
0x023f: 's', # s with swash tail
|
0x023F: 's', # s with swash tail
|
||||||
0x0240: 'z', # z with swash tail
|
0x0240: 'z', # z with swash tail
|
||||||
0x0247: 'e', # e with stroke
|
0x0247: 'e', # e with stroke
|
||||||
0x0249: 'j', # j with stroke
|
0x0249: 'j', # j with stroke
|
||||||
0x024b: 'q', # q with hook tail
|
0x024B: 'q', # q with hook tail
|
||||||
0x024d: 'r', # r with stroke
|
0x024D: 'r', # r with stroke
|
||||||
0x024f: 'y', # y with stroke
|
0x024F: 'y', # y with stroke
|
||||||
}
|
}
|
||||||
_non_id_translate_digraphs = {
|
_non_id_translate_digraphs = {
|
||||||
0x00df: 'sz', # ligature sz
|
0x00DF: 'sz', # ligature sz
|
||||||
0x00e6: 'ae', # ae
|
0x00E6: 'ae', # ae
|
||||||
0x0153: 'oe', # ligature oe
|
0x0153: 'oe', # ligature oe
|
||||||
0x0238: 'db', # db digraph
|
0x0238: 'db', # db digraph
|
||||||
0x0239: 'qp', # qp digraph
|
0x0239: 'qp', # qp digraph
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def make_id(env: BuildEnvironment, document: nodes.document,
|
def make_id(
|
||||||
prefix: str = '', term: str | None = None) -> str:
|
env: BuildEnvironment,
|
||||||
|
document: nodes.document,
|
||||||
|
prefix: str = '',
|
||||||
|
term: str | None = None,
|
||||||
|
) -> str:
|
||||||
"""Generate an appropriate node_id for given *prefix* and *term*."""
|
"""Generate an appropriate node_id for given *prefix* and *term*."""
|
||||||
node_id = None
|
node_id = None
|
||||||
if prefix:
|
if prefix:
|
||||||
idformat = prefix + "-%s"
|
idformat = prefix + '-%s'
|
||||||
else:
|
else:
|
||||||
idformat = (document.settings.id_prefix or "id") + "%s"
|
idformat = (document.settings.id_prefix or 'id') + '%s'
|
||||||
|
|
||||||
# try to generate node_id by *term*
|
# try to generate node_id by *term*
|
||||||
if prefix and term:
|
if prefix and term:
|
||||||
@ -547,18 +602,26 @@ def make_id(env: BuildEnvironment, document: nodes.document,
|
|||||||
return node_id
|
return node_id
|
||||||
|
|
||||||
|
|
||||||
def find_pending_xref_condition(node: addnodes.pending_xref, condition: str,
|
def find_pending_xref_condition(
|
||||||
|
node: addnodes.pending_xref, condition: str
|
||||||
) -> Element | None:
|
) -> Element | None:
|
||||||
"""Pick matched pending_xref_condition node up from the pending_xref."""
|
"""Pick matched pending_xref_condition node up from the pending_xref."""
|
||||||
for subnode in node:
|
for subnode in node:
|
||||||
if (isinstance(subnode, addnodes.pending_xref_condition) and
|
if (
|
||||||
subnode.get('condition') == condition):
|
isinstance(subnode, addnodes.pending_xref_condition)
|
||||||
|
and subnode.get('condition') == condition
|
||||||
|
):
|
||||||
return subnode
|
return subnode
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def make_refnode(builder: Builder, fromdocname: str, todocname: str, targetid: str | None,
|
def make_refnode(
|
||||||
child: Node | list[Node], title: str | None = None,
|
builder: Builder,
|
||||||
|
fromdocname: str,
|
||||||
|
todocname: str,
|
||||||
|
targetid: str | None,
|
||||||
|
child: Node | list[Node],
|
||||||
|
title: str | None = None,
|
||||||
) -> nodes.reference:
|
) -> nodes.reference:
|
||||||
"""Shortcut to create a reference node."""
|
"""Shortcut to create a reference node."""
|
||||||
node = nodes.reference('', '', internal=True)
|
node = nodes.reference('', '', internal=True)
|
||||||
@ -566,8 +629,9 @@ def make_refnode(builder: Builder, fromdocname: str, todocname: str, targetid: s
|
|||||||
node['refid'] = targetid
|
node['refid'] = targetid
|
||||||
else:
|
else:
|
||||||
if targetid:
|
if targetid:
|
||||||
node['refuri'] = (builder.get_relative_uri(fromdocname, todocname) +
|
node['refuri'] = (
|
||||||
'#' + targetid)
|
builder.get_relative_uri(fromdocname, todocname) + '#' + targetid
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
node['refuri'] = builder.get_relative_uri(fromdocname, todocname)
|
node['refuri'] = builder.get_relative_uri(fromdocname, todocname)
|
||||||
if title:
|
if title:
|
||||||
@ -577,8 +641,9 @@ def make_refnode(builder: Builder, fromdocname: str, todocname: str, targetid: s
|
|||||||
|
|
||||||
|
|
||||||
def set_source_info(directive: Directive, node: Node) -> None:
|
def set_source_info(directive: Directive, node: Node) -> None:
|
||||||
node.source, node.line = \
|
node.source, node.line = directive.state_machine.get_source_and_line(
|
||||||
directive.state_machine.get_source_and_line(directive.lineno)
|
directive.lineno
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def set_role_source_info(inliner: Inliner, lineno: int, node: Node) -> None:
|
def set_role_source_info(inliner: Inliner, lineno: int, node: Node) -> None:
|
||||||
@ -635,7 +700,8 @@ def _only_node_keep_children(node: addnodes.only, tags: Tags) -> bool:
|
|||||||
logger.warning(
|
logger.warning(
|
||||||
__('exception while evaluating only directive expression: %s'),
|
__('exception while evaluating only directive expression: %s'),
|
||||||
err,
|
err,
|
||||||
location=node)
|
location=node,
|
||||||
|
)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
@ -651,10 +717,10 @@ def _copy_except__document(el: Element) -> Element:
|
|||||||
newnode.rawsource = el.rawsource
|
newnode.rawsource = el.rawsource
|
||||||
newnode.tagname = el.tagname
|
newnode.tagname = el.tagname
|
||||||
# copied in Element.copy()
|
# copied in Element.copy()
|
||||||
newnode.attributes = {k: (v
|
newnode.attributes = {
|
||||||
if k not in {'ids', 'classes', 'names', 'dupnames', 'backrefs'}
|
k: (v if k not in {'ids', 'classes', 'names', 'dupnames', 'backrefs'} else v[:])
|
||||||
else v[:])
|
for k, v in el.attributes.items()
|
||||||
for k, v in el.attributes.items()}
|
}
|
||||||
newnode.line = el.line
|
newnode.line = el.line
|
||||||
newnode.source = el.source
|
newnode.source = el.source
|
||||||
return newnode
|
return newnode
|
||||||
|
@ -25,7 +25,7 @@ if TYPE_CHECKING:
|
|||||||
# Define SEP as a manifest constant, not so much because we expect it to change
|
# Define SEP as a manifest constant, not so much because we expect it to change
|
||||||
# in the future as to avoid the suspicion that a stray "/" in the code is a
|
# in the future as to avoid the suspicion that a stray "/" in the code is a
|
||||||
# hangover from more *nix-oriented origins.
|
# hangover from more *nix-oriented origins.
|
||||||
SEP = "/"
|
SEP = '/'
|
||||||
|
|
||||||
|
|
||||||
def os_path(canonical_path: str, /) -> str:
|
def os_path(canonical_path: str, /) -> str:
|
||||||
@ -115,21 +115,23 @@ def copyfile(
|
|||||||
raise FileNotFoundError(msg)
|
raise FileNotFoundError(msg)
|
||||||
|
|
||||||
if (
|
if (
|
||||||
not (dest_exists := dest.exists()) or
|
not (dest_exists := dest.exists())
|
||||||
# comparison must be done using shallow=False since
|
# comparison must be done using shallow=False since
|
||||||
# two different files might have the same size
|
# two different files might have the same size
|
||||||
not filecmp.cmp(source, dest, shallow=False)
|
or not filecmp.cmp(source, dest, shallow=False)
|
||||||
):
|
):
|
||||||
if not force and dest_exists:
|
if not force and dest_exists:
|
||||||
# sphinx.util.logging imports sphinx.util.osutil,
|
# sphinx.util.logging imports sphinx.util.osutil,
|
||||||
# so use a local import to avoid circular imports
|
# so use a local import to avoid circular imports
|
||||||
from sphinx.util import logging
|
from sphinx.util import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
msg = __('Aborted attempted copy from %s to %s '
|
msg = __(
|
||||||
'(the destination path has existing data).')
|
'Aborted attempted copy from %s to %s '
|
||||||
logger.warning(msg, source, dest,
|
'(the destination path has existing data).'
|
||||||
type='misc', subtype='copy_overwrite')
|
)
|
||||||
|
logger.warning(msg, source, dest, type='misc', subtype='copy_overwrite')
|
||||||
return
|
return
|
||||||
|
|
||||||
shutil.copyfile(source, dest)
|
shutil.copyfile(source, dest)
|
||||||
@ -149,8 +151,9 @@ def make_filename_from_project(project: str) -> str:
|
|||||||
return make_filename(project.removesuffix(' Documentation')).lower()
|
return make_filename(project.removesuffix(' Documentation')).lower()
|
||||||
|
|
||||||
|
|
||||||
def relpath(path: str | os.PathLike[str],
|
def relpath(
|
||||||
start: str | os.PathLike[str] | None = os.curdir) -> str:
|
path: str | os.PathLike[str], start: str | os.PathLike[str] | None = os.curdir
|
||||||
|
) -> str:
|
||||||
"""Return a relative filepath to *path* either from the current directory or
|
"""Return a relative filepath to *path* either from the current directory or
|
||||||
from an optional *start* directory.
|
from an optional *start* directory.
|
||||||
|
|
||||||
@ -241,7 +244,7 @@ class FileAvoidWrite:
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
def __exit__(
|
def __exit__(
|
||||||
self, exc_type: type[Exception], exc_value: Exception, traceback: Any,
|
self, exc_type: type[Exception], exc_value: Exception, traceback: Any
|
||||||
) -> bool:
|
) -> bool:
|
||||||
self.close()
|
self.close()
|
||||||
return True
|
return True
|
||||||
|
@ -10,6 +10,7 @@ from typing import TYPE_CHECKING, Any
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
|
|
||||||
HAS_MULTIPROCESSING = True
|
HAS_MULTIPROCESSING = True
|
||||||
except ImportError:
|
except ImportError:
|
||||||
HAS_MULTIPROCESSING = False
|
HAS_MULTIPROCESSING = False
|
||||||
@ -33,7 +34,7 @@ class SerialTasks:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def add_task(
|
def add_task(
|
||||||
self, task_func: Callable, arg: Any = None, result_func: Callable | None = None,
|
self, task_func: Callable, arg: Any = None, result_func: Callable | None = None
|
||||||
) -> None:
|
) -> None:
|
||||||
if arg is not None:
|
if arg is not None:
|
||||||
res = task_func(arg)
|
res = task_func(arg)
|
||||||
@ -83,7 +84,7 @@ class ParallelTasks:
|
|||||||
pipe.send((failed, collector.logs, ret))
|
pipe.send((failed, collector.logs, ret))
|
||||||
|
|
||||||
def add_task(
|
def add_task(
|
||||||
self, task_func: Callable, arg: Any = None, result_func: Callable | None = None,
|
self, task_func: Callable, arg: Any = None, result_func: Callable | None = None
|
||||||
) -> None:
|
) -> None:
|
||||||
tid = self._taskid
|
tid = self._taskid
|
||||||
self._taskid += 1
|
self._taskid += 1
|
||||||
|
@ -53,7 +53,7 @@ def nested_parse_to_nodes(
|
|||||||
"""
|
"""
|
||||||
document = state.document
|
document = state.document
|
||||||
content = _text_to_string_list(
|
content = _text_to_string_list(
|
||||||
text, source=source, tab_width=document.settings.tab_width,
|
text, source=source, tab_width=document.settings.tab_width
|
||||||
)
|
)
|
||||||
node = Element() # Anonymous container for parsing
|
node = Element() # Anonymous container for parsing
|
||||||
node.document = document
|
node.document = document
|
||||||
@ -62,7 +62,9 @@ def nested_parse_to_nodes(
|
|||||||
state.nested_parse(content, offset, node, match_titles=allow_section_headings)
|
state.nested_parse(content, offset, node, match_titles=allow_section_headings)
|
||||||
else:
|
else:
|
||||||
with _fresh_title_style_context(state):
|
with _fresh_title_style_context(state):
|
||||||
state.nested_parse(content, offset, node, match_titles=allow_section_headings)
|
state.nested_parse(
|
||||||
|
content, offset, node, match_titles=allow_section_headings
|
||||||
|
)
|
||||||
return node.children
|
return node.children
|
||||||
|
|
||||||
|
|
||||||
@ -84,7 +86,7 @@ def _fresh_title_style_context(state: RSTState) -> Iterator[None]:
|
|||||||
|
|
||||||
|
|
||||||
def _text_to_string_list(
|
def _text_to_string_list(
|
||||||
text: str | StringList, /, *, source: str, tab_width: int,
|
text: str | StringList, /, *, source: str, tab_width: int
|
||||||
) -> StringList:
|
) -> StringList:
|
||||||
# Doesn't really belong in this module, but avoids circular imports.
|
# Doesn't really belong in this module, but avoids circular imports.
|
||||||
if isinstance(text, StringList):
|
if isinstance(text, StringList):
|
||||||
|
@ -10,7 +10,7 @@ LEN_DEPTH = 22
|
|||||||
|
|
||||||
DEPTH_CHUNK_LEN = struct.pack('!i', 10)
|
DEPTH_CHUNK_LEN = struct.pack('!i', 10)
|
||||||
DEPTH_CHUNK_START = b'tEXtDepth\x00'
|
DEPTH_CHUNK_START = b'tEXtDepth\x00'
|
||||||
IEND_CHUNK = b'\x00\x00\x00\x00IEND\xAE\x42\x60\x82'
|
IEND_CHUNK = b'\x00\x00\x00\x00IEND\xae\x42\x60\x82'
|
||||||
|
|
||||||
|
|
||||||
def read_png_depth(filename: str) -> int | None:
|
def read_png_depth(filename: str) -> int | None:
|
||||||
@ -37,7 +37,7 @@ def write_png_depth(filename: str, depth: int) -> None:
|
|||||||
# overwrite it with the depth chunk
|
# overwrite it with the depth chunk
|
||||||
f.write(DEPTH_CHUNK_LEN + DEPTH_CHUNK_START + data)
|
f.write(DEPTH_CHUNK_LEN + DEPTH_CHUNK_START + data)
|
||||||
# calculate the checksum over chunk name and data
|
# calculate the checksum over chunk name and data
|
||||||
crc = binascii.crc32(DEPTH_CHUNK_START + data) & 0xffffffff
|
crc = binascii.crc32(DEPTH_CHUNK_START + data) & 0xFFFFFFFF
|
||||||
f.write(struct.pack('!I', crc))
|
f.write(struct.pack('!I', crc))
|
||||||
# replace the IEND chunk
|
# replace the IEND chunk
|
||||||
f.write(IEND_CHUNK)
|
f.write(IEND_CHUNK)
|
||||||
|
@ -11,8 +11,10 @@ from urllib3.exceptions import InsecureRequestWarning
|
|||||||
|
|
||||||
import sphinx
|
import sphinx
|
||||||
|
|
||||||
_USER_AGENT = (f'Mozilla/5.0 (X11; Linux x86_64; rv:100.0) Gecko/20100101 Firefox/100.0 '
|
_USER_AGENT = (
|
||||||
f'Sphinx/{sphinx.__version__}')
|
f'Mozilla/5.0 (X11; Linux x86_64; rv:100.0) Gecko/20100101 Firefox/100.0 '
|
||||||
|
f'Sphinx/{sphinx.__version__}'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _get_tls_cacert(url: str, certs: str | dict[str, str] | None) -> str | bool:
|
def _get_tls_cacert(url: str, certs: str | dict[str, str] | None) -> str | bool:
|
||||||
@ -49,7 +51,9 @@ def head(url: str, **kwargs: Any) -> requests.Response:
|
|||||||
|
|
||||||
class _Session(requests.Session):
|
class _Session(requests.Session):
|
||||||
def request( # type: ignore[override]
|
def request( # type: ignore[override]
|
||||||
self, method: str, url: str,
|
self,
|
||||||
|
method: str,
|
||||||
|
url: str,
|
||||||
_user_agent: str = '',
|
_user_agent: str = '',
|
||||||
_tls_info: tuple[bool, str | dict[str, str] | None] = (), # type: ignore[assignment]
|
_tls_info: tuple[bool, str | dict[str, str] | None] = (), # type: ignore[assignment]
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
@ -72,5 +76,5 @@ class _Session(requests.Session):
|
|||||||
|
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
# ignore InsecureRequestWarning if verify=False
|
# ignore InsecureRequestWarning if verify=False
|
||||||
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
|
warnings.filterwarnings('ignore', category=InsecureRequestWarning)
|
||||||
return super().request(method, url, **kwargs)
|
return super().request(method, url, **kwargs)
|
||||||
|
@ -29,8 +29,8 @@ symbols_re = re.compile(r'([!-\-/:-@\[-`{-~])') # symbols without dot(0x2e)
|
|||||||
SECTIONING_CHARS = ['=', '-', '~']
|
SECTIONING_CHARS = ['=', '-', '~']
|
||||||
|
|
||||||
# width of characters
|
# width of characters
|
||||||
WIDECHARS: dict[str, str] = defaultdict(lambda: "WF") # WF: Wide + Full-width
|
WIDECHARS: dict[str, str] = defaultdict(lambda: 'WF') # WF: Wide + Full-width
|
||||||
WIDECHARS["ja"] = "WFA" # In Japanese, Ambiguous characters also have double width
|
WIDECHARS['ja'] = 'WFA' # In Japanese, Ambiguous characters also have double width
|
||||||
|
|
||||||
|
|
||||||
def escape(text: str) -> str:
|
def escape(text: str) -> str:
|
||||||
@ -41,6 +41,7 @@ def escape(text: str) -> str:
|
|||||||
|
|
||||||
def textwidth(text: str, widechars: str = 'WF') -> int:
|
def textwidth(text: str, widechars: str = 'WF') -> int:
|
||||||
"""Get width of text."""
|
"""Get width of text."""
|
||||||
|
|
||||||
def charwidth(char: str, widechars: str) -> int:
|
def charwidth(char: str, widechars: str) -> int:
|
||||||
if east_asian_width(char) in widechars:
|
if east_asian_width(char) in widechars:
|
||||||
return 2
|
return 2
|
||||||
@ -103,7 +104,8 @@ def append_epilog(content: StringList, epilog: str) -> None:
|
|||||||
if epilog:
|
if epilog:
|
||||||
if len(content) > 0:
|
if len(content) > 0:
|
||||||
source, lineno = content.info(-1)
|
source, lineno = content.info(-1)
|
||||||
lineno = cast(int, lineno) # lineno will never be None, since len(content) > 0
|
# lineno will never be None, since len(content) > 0
|
||||||
|
lineno = cast(int, lineno)
|
||||||
else:
|
else:
|
||||||
source = '<generated>'
|
source = '<generated>'
|
||||||
lineno = 0
|
lineno = 0
|
||||||
|
@ -69,8 +69,11 @@ class Tags:
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def tags(self) -> dict[str, Literal[True]]:
|
def tags(self) -> dict[str, Literal[True]]:
|
||||||
warnings.warn('Tags.tags is deprecated, use methods on Tags.',
|
warnings.warn(
|
||||||
RemovedInSphinx90Warning, stacklevel=2)
|
'Tags.tags is deprecated, use methods on Tags.',
|
||||||
|
RemovedInSphinx90Warning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
return dict.fromkeys(self._tags, True)
|
return dict.fromkeys(self._tags, True)
|
||||||
|
|
||||||
def eval_condition(self, condition: str) -> bool:
|
def eval_condition(self, condition: str) -> bool:
|
||||||
|
@ -49,7 +49,7 @@ class FileRenderer(BaseRenderer):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def render_from_file(
|
def render_from_file(
|
||||||
cls: type[FileRenderer], filename: str, context: dict[str, Any],
|
cls: type[FileRenderer], filename: str, context: dict[str, Any]
|
||||||
) -> str:
|
) -> str:
|
||||||
dirname = os.path.dirname(filename)
|
dirname = os.path.dirname(filename)
|
||||||
basename = os.path.basename(filename)
|
basename = os.path.basename(filename)
|
||||||
@ -57,21 +57,26 @@ class FileRenderer(BaseRenderer):
|
|||||||
|
|
||||||
|
|
||||||
class SphinxRenderer(FileRenderer):
|
class SphinxRenderer(FileRenderer):
|
||||||
def __init__(self, template_path: Sequence[str | os.PathLike[str]] | None = None) -> None:
|
def __init__(
|
||||||
|
self, template_path: Sequence[str | os.PathLike[str]] | None = None
|
||||||
|
) -> None:
|
||||||
if template_path is None:
|
if template_path is None:
|
||||||
template_path = os.path.join(package_dir, 'templates')
|
template_path = os.path.join(package_dir, 'templates')
|
||||||
super().__init__(template_path)
|
super().__init__(template_path)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def render_from_file(
|
def render_from_file(
|
||||||
cls: type[FileRenderer], filename: str, context: dict[str, Any],
|
cls: type[FileRenderer], filename: str, context: dict[str, Any]
|
||||||
) -> str:
|
) -> str:
|
||||||
return FileRenderer.render_from_file(filename, context)
|
return FileRenderer.render_from_file(filename, context)
|
||||||
|
|
||||||
|
|
||||||
class LaTeXRenderer(SphinxRenderer):
|
class LaTeXRenderer(SphinxRenderer):
|
||||||
def __init__(self, template_path: Sequence[str | os.PathLike[str]] | None = None,
|
def __init__(
|
||||||
latex_engine: str | None = None) -> None:
|
self,
|
||||||
|
template_path: Sequence[str | os.PathLike[str]] | None = None,
|
||||||
|
latex_engine: str | None = None,
|
||||||
|
) -> None:
|
||||||
if template_path is None:
|
if template_path is None:
|
||||||
template_path = [os.path.join(package_dir, 'templates', 'latex')]
|
template_path = [os.path.join(package_dir, 'templates', 'latex')]
|
||||||
super().__init__(template_path)
|
super().__init__(template_path)
|
||||||
@ -93,8 +98,11 @@ class LaTeXRenderer(SphinxRenderer):
|
|||||||
|
|
||||||
|
|
||||||
class ReSTRenderer(SphinxRenderer):
|
class ReSTRenderer(SphinxRenderer):
|
||||||
def __init__(self, template_path: Sequence[str | os.PathLike[str]] | None = None,
|
def __init__(
|
||||||
language: str | None = None) -> None:
|
self,
|
||||||
|
template_path: Sequence[str | os.PathLike[str]] | None = None,
|
||||||
|
language: str | None = None,
|
||||||
|
) -> None:
|
||||||
super().__init__(template_path)
|
super().__init__(template_path)
|
||||||
|
|
||||||
# add language to environment
|
# add language to environment
|
||||||
@ -109,9 +117,12 @@ class ReSTRenderer(SphinxRenderer):
|
|||||||
class SphinxTemplateLoader(BaseLoader):
|
class SphinxTemplateLoader(BaseLoader):
|
||||||
"""A loader supporting template inheritance"""
|
"""A loader supporting template inheritance"""
|
||||||
|
|
||||||
def __init__(self, confdir: str | os.PathLike[str],
|
def __init__(
|
||||||
|
self,
|
||||||
|
confdir: str | os.PathLike[str],
|
||||||
templates_paths: Sequence[str | os.PathLike[str]],
|
templates_paths: Sequence[str | os.PathLike[str]],
|
||||||
system_templates_paths: Sequence[str | os.PathLike[str]]) -> None:
|
system_templates_paths: Sequence[str | os.PathLike[str]],
|
||||||
|
) -> None:
|
||||||
self.loaders = []
|
self.loaders = []
|
||||||
self.sysloaders = []
|
self.sysloaders = []
|
||||||
|
|
||||||
|
@ -92,8 +92,9 @@ PathMatcher: TypeAlias = Callable[[str], bool]
|
|||||||
|
|
||||||
# common role functions
|
# common role functions
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
|
||||||
class RoleFunction(Protocol):
|
class RoleFunction(Protocol):
|
||||||
def __call__(
|
def __call__( # NoQA: E704
|
||||||
self,
|
self,
|
||||||
name: str,
|
name: str,
|
||||||
rawtext: str,
|
rawtext: str,
|
||||||
@ -103,8 +104,8 @@ if TYPE_CHECKING:
|
|||||||
/,
|
/,
|
||||||
options: dict[str, Any] | None = None,
|
options: dict[str, Any] | None = None,
|
||||||
content: Sequence[str] = (),
|
content: Sequence[str] = (),
|
||||||
) -> tuple[list[nodes.Node], list[nodes.system_message]]:
|
) -> tuple[list[nodes.Node], list[nodes.system_message]]: ...
|
||||||
...
|
|
||||||
else:
|
else:
|
||||||
RoleFunction: TypeAlias = Callable[
|
RoleFunction: TypeAlias = Callable[
|
||||||
[str, str, str, int, Inliner, dict[str, Any], Sequence[str]],
|
[str, str, str, int, Inliner, dict[str, Any], Sequence[str]],
|
||||||
@ -126,19 +127,17 @@ if TYPE_CHECKING:
|
|||||||
_T_co = TypeVar('_T_co', str, bytes, covariant=True)
|
_T_co = TypeVar('_T_co', str, bytes, covariant=True)
|
||||||
|
|
||||||
class _ReadableStream(Protocol[_T_co]):
|
class _ReadableStream(Protocol[_T_co]):
|
||||||
def read(self, size: int = ...) -> _T_co:
|
def read(self, size: int = ...) -> _T_co: ... # NoQA: E704
|
||||||
...
|
|
||||||
|
|
||||||
def __enter__(self) -> Self:
|
def __enter__(self) -> Self: ... # NoQA: E704
|
||||||
...
|
|
||||||
|
|
||||||
def __exit__(
|
def __exit__( # NoQA: E704
|
||||||
self,
|
self,
|
||||||
exc_type: type[BaseException] | None,
|
exc_type: type[BaseException] | None,
|
||||||
exc_val: BaseException | None,
|
exc_val: BaseException | None,
|
||||||
exc_tb: TracebackType | None
|
exc_tb: TracebackType | None,
|
||||||
) -> None:
|
) -> None: ...
|
||||||
...
|
|
||||||
|
|
||||||
# inventory data on memory
|
# inventory data on memory
|
||||||
InventoryItem: TypeAlias = tuple[
|
InventoryItem: TypeAlias = tuple[
|
||||||
@ -189,7 +188,9 @@ def get_type_hints(
|
|||||||
from sphinx.util.inspect import safe_getattr # lazy loading
|
from sphinx.util.inspect import safe_getattr # lazy loading
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return typing.get_type_hints(obj, globalns, localns, include_extras=include_extras)
|
return typing.get_type_hints(
|
||||||
|
obj, globalns, localns, include_extras=include_extras
|
||||||
|
)
|
||||||
except NameError:
|
except NameError:
|
||||||
# Failed to evaluate ForwardRef (maybe TYPE_CHECKING)
|
# Failed to evaluate ForwardRef (maybe TYPE_CHECKING)
|
||||||
return safe_getattr(obj, '__annotations__', {})
|
return safe_getattr(obj, '__annotations__', {})
|
||||||
@ -212,7 +213,10 @@ def is_system_TypeVar(typ: Any) -> bool:
|
|||||||
|
|
||||||
def _is_annotated_form(obj: Any) -> TypeIs[Annotated[Any, ...]]:
|
def _is_annotated_form(obj: Any) -> TypeIs[Annotated[Any, ...]]:
|
||||||
"""Check if *obj* is an annotated type."""
|
"""Check if *obj* is an annotated type."""
|
||||||
return typing.get_origin(obj) is Annotated or str(obj).startswith('typing.Annotated')
|
return (
|
||||||
|
typing.get_origin(obj) is Annotated
|
||||||
|
or str(obj).startswith('typing.Annotated')
|
||||||
|
) # fmt: skip
|
||||||
|
|
||||||
|
|
||||||
def _is_unpack_form(obj: Any) -> bool:
|
def _is_unpack_form(obj: Any) -> bool:
|
||||||
@ -286,18 +290,21 @@ def restify(cls: Any, mode: _RestifyMode = 'fully-qualified-except-typing') -> s
|
|||||||
elif dataclasses.is_dataclass(m):
|
elif dataclasses.is_dataclass(m):
|
||||||
# use restify for the repr of field values rather than repr
|
# use restify for the repr of field values rather than repr
|
||||||
d_fields = ', '.join([
|
d_fields = ', '.join([
|
||||||
fr"{f.name}=\ {restify(getattr(m, f.name), mode)}"
|
rf'{f.name}=\ {restify(getattr(m, f.name), mode)}'
|
||||||
for f in dataclasses.fields(m) if f.repr
|
for f in dataclasses.fields(m)
|
||||||
|
if f.repr
|
||||||
])
|
])
|
||||||
meta_args.append(fr'{restify(type(m), mode)}\ ({d_fields})')
|
meta_args.append(rf'{restify(type(m), mode)}\ ({d_fields})')
|
||||||
else:
|
else:
|
||||||
meta_args.append(repr(m))
|
meta_args.append(repr(m))
|
||||||
meta = ', '.join(meta_args)
|
meta = ', '.join(meta_args)
|
||||||
if sys.version_info[:2] <= (3, 11):
|
if sys.version_info[:2] <= (3, 11):
|
||||||
# Hardcoded to fix errors on Python 3.11 and earlier.
|
# Hardcoded to fix errors on Python 3.11 and earlier.
|
||||||
return fr':py:class:`~typing.Annotated`\ [{args}, {meta}]'
|
return rf':py:class:`~typing.Annotated`\ [{args}, {meta}]'
|
||||||
return (f':py:class:`{module_prefix}{cls.__module__}.{cls.__name__}`'
|
return (
|
||||||
fr'\ [{args}, {meta}]')
|
f':py:class:`{module_prefix}{cls.__module__}.{cls.__name__}`'
|
||||||
|
rf'\ [{args}, {meta}]'
|
||||||
|
)
|
||||||
elif isinstance(cls, NewType):
|
elif isinstance(cls, NewType):
|
||||||
return f':py:class:`{module_prefix}{cls.__module__}.{cls.__name__}`' # type: ignore[attr-defined]
|
return f':py:class:`{module_prefix}{cls.__module__}.{cls.__name__}`' # type: ignore[attr-defined]
|
||||||
elif isinstance(cls, types.UnionType):
|
elif isinstance(cls, types.UnionType):
|
||||||
@ -307,14 +314,14 @@ def restify(cls: Any, mode: _RestifyMode = 'fully-qualified-except-typing') -> s
|
|||||||
elif cls.__module__ in ('__builtin__', 'builtins'):
|
elif cls.__module__ in ('__builtin__', 'builtins'):
|
||||||
if hasattr(cls, '__args__'):
|
if hasattr(cls, '__args__'):
|
||||||
if not cls.__args__: # Empty tuple, list, ...
|
if not cls.__args__: # Empty tuple, list, ...
|
||||||
return fr':py:class:`{cls.__name__}`\ [{cls.__args__!r}]'
|
return rf':py:class:`{cls.__name__}`\ [{cls.__args__!r}]'
|
||||||
|
|
||||||
concatenated_args = ', '.join(restify(arg, mode) for arg in cls.__args__)
|
concatenated_args = ', '.join(
|
||||||
return fr':py:class:`{cls.__name__}`\ [{concatenated_args}]'
|
restify(arg, mode) for arg in cls.__args__
|
||||||
|
)
|
||||||
|
return rf':py:class:`{cls.__name__}`\ [{concatenated_args}]'
|
||||||
return f':py:class:`{cls.__name__}`'
|
return f':py:class:`{cls.__name__}`'
|
||||||
elif (isgenericalias(cls)
|
elif isgenericalias(cls) and cls_module_is_typing and cls.__origin__ is Union:
|
||||||
and cls_module_is_typing
|
|
||||||
and cls.__origin__ is Union):
|
|
||||||
# *cls* is defined in ``typing``, and thus ``__args__`` must exist
|
# *cls* is defined in ``typing``, and thus ``__args__`` must exist
|
||||||
return ' | '.join(restify(a, mode) for a in cls.__args__)
|
return ' | '.join(restify(a, mode) for a in cls.__args__)
|
||||||
elif isgenericalias(cls):
|
elif isgenericalias(cls):
|
||||||
@ -338,19 +345,20 @@ def restify(cls: Any, mode: _RestifyMode = 'fully-qualified-except-typing') -> s
|
|||||||
if (
|
if (
|
||||||
(cls_module_is_typing and cls.__name__ == 'Callable')
|
(cls_module_is_typing and cls.__name__ == 'Callable')
|
||||||
or (cls.__module__ == 'collections.abc' and cls.__name__ == 'Callable')
|
or (cls.__module__ == 'collections.abc' and cls.__name__ == 'Callable')
|
||||||
):
|
): # fmt: skip
|
||||||
args = ', '.join(restify(a, mode) for a in __args__[:-1])
|
args = ', '.join(restify(a, mode) for a in __args__[:-1])
|
||||||
returns = restify(__args__[-1], mode)
|
returns = restify(__args__[-1], mode)
|
||||||
return fr'{text}\ [[{args}], {returns}]'
|
return rf'{text}\ [[{args}], {returns}]'
|
||||||
|
|
||||||
if cls_module_is_typing and cls.__origin__.__name__ == 'Literal':
|
if cls_module_is_typing and cls.__origin__.__name__ == 'Literal':
|
||||||
args = ', '.join(_format_literal_arg_restify(a, mode=mode)
|
args = ', '.join(
|
||||||
for a in cls.__args__)
|
_format_literal_arg_restify(a, mode=mode) for a in cls.__args__
|
||||||
return fr'{text}\ [{args}]'
|
)
|
||||||
|
return rf'{text}\ [{args}]'
|
||||||
|
|
||||||
# generic representation of the parameters
|
# generic representation of the parameters
|
||||||
args = ', '.join(restify(a, mode) for a in __args__)
|
args = ', '.join(restify(a, mode) for a in __args__)
|
||||||
return fr'{text}\ [{args}]'
|
return rf'{text}\ [{args}]'
|
||||||
elif isinstance(cls, typing._SpecialForm):
|
elif isinstance(cls, typing._SpecialForm):
|
||||||
return f':py:obj:`~{cls.__module__}.{cls.__name__}`' # type: ignore[attr-defined]
|
return f':py:obj:`~{cls.__module__}.{cls.__name__}`' # type: ignore[attr-defined]
|
||||||
elif sys.version_info[:2] >= (3, 11) and cls is typing.Any:
|
elif sys.version_info[:2] >= (3, 11) and cls is typing.Any:
|
||||||
@ -375,7 +383,9 @@ def _format_literal_arg_restify(arg: Any, /, *, mode: str) -> str:
|
|||||||
enum_cls = arg.__class__
|
enum_cls = arg.__class__
|
||||||
if mode == 'smart' or enum_cls.__module__ == 'typing':
|
if mode == 'smart' or enum_cls.__module__ == 'typing':
|
||||||
# MyEnum.member
|
# MyEnum.member
|
||||||
return f':py:attr:`~{enum_cls.__module__}.{enum_cls.__qualname__}.{arg.name}`'
|
return (
|
||||||
|
f':py:attr:`~{enum_cls.__module__}.{enum_cls.__qualname__}.{arg.name}`'
|
||||||
|
)
|
||||||
# module.MyEnum.member
|
# module.MyEnum.member
|
||||||
return f':py:attr:`{enum_cls.__module__}.{enum_cls.__qualname__}.{arg.name}`'
|
return f':py:attr:`{enum_cls.__module__}.{enum_cls.__qualname__}.{arg.name}`'
|
||||||
return repr(arg)
|
return repr(arg)
|
||||||
@ -431,7 +441,10 @@ def stringify_annotation(
|
|||||||
# Extract the annotation's base type by considering formattable cases
|
# Extract the annotation's base type by considering formattable cases
|
||||||
if isinstance(annotation, TypeVar) and not _is_unpack_form(annotation):
|
if isinstance(annotation, TypeVar) and not _is_unpack_form(annotation):
|
||||||
# typing_extensions.Unpack is incorrectly determined as a TypeVar
|
# typing_extensions.Unpack is incorrectly determined as a TypeVar
|
||||||
if annotation_module_is_typing and mode in {'fully-qualified-except-typing', 'smart'}:
|
if annotation_module_is_typing and mode in {
|
||||||
|
'fully-qualified-except-typing',
|
||||||
|
'smart',
|
||||||
|
}:
|
||||||
return annotation_name
|
return annotation_name
|
||||||
return module_prefix + f'{annotation_module}.{annotation_name}'
|
return module_prefix + f'{annotation_module}.{annotation_name}'
|
||||||
elif isinstance(annotation, NewType):
|
elif isinstance(annotation, NewType):
|
||||||
@ -461,7 +474,9 @@ def stringify_annotation(
|
|||||||
|
|
||||||
module_prefix = f'{annotation_module}.'
|
module_prefix = f'{annotation_module}.'
|
||||||
annotation_forward_arg: str | None = getattr(annotation, '__forward_arg__', None)
|
annotation_forward_arg: str | None = getattr(annotation, '__forward_arg__', None)
|
||||||
if annotation_qualname or (annotation_module_is_typing and not annotation_forward_arg):
|
if annotation_qualname or (
|
||||||
|
annotation_module_is_typing and not annotation_forward_arg
|
||||||
|
):
|
||||||
if mode == 'smart':
|
if mode == 'smart':
|
||||||
module_prefix = f'~{module_prefix}'
|
module_prefix = f'~{module_prefix}'
|
||||||
if annotation_module_is_typing and mode == 'fully-qualified-except-typing':
|
if annotation_module_is_typing and mode == 'fully-qualified-except-typing':
|
||||||
@ -484,7 +499,8 @@ def stringify_annotation(
|
|||||||
# in this case, we know that the annotation is a member
|
# in this case, we know that the annotation is a member
|
||||||
# of ``typing`` and all of them define ``__origin__``
|
# of ``typing`` and all of them define ``__origin__``
|
||||||
qualname = stringify_annotation(
|
qualname = stringify_annotation(
|
||||||
annotation.__origin__, 'fully-qualified-except-typing',
|
annotation.__origin__,
|
||||||
|
'fully-qualified-except-typing',
|
||||||
).replace('typing.', '') # ex. Union
|
).replace('typing.', '') # ex. Union
|
||||||
elif annotation_qualname:
|
elif annotation_qualname:
|
||||||
qualname = annotation_qualname
|
qualname = annotation_qualname
|
||||||
@ -505,21 +521,25 @@ def stringify_annotation(
|
|||||||
if (
|
if (
|
||||||
qualname in {'Union', 'types.UnionType'}
|
qualname in {'Union', 'types.UnionType'}
|
||||||
and all(getattr(a, '__origin__', ...) is typing.Literal for a in annotation_args)
|
and all(getattr(a, '__origin__', ...) is typing.Literal for a in annotation_args)
|
||||||
):
|
): # fmt: skip
|
||||||
# special case to flatten a Union of Literals into a literal
|
# special case to flatten a Union of Literals into a literal
|
||||||
flattened_args = typing.Literal[annotation_args].__args__ # type: ignore[attr-defined]
|
flattened_args = typing.Literal[annotation_args].__args__ # type: ignore[attr-defined]
|
||||||
args = ', '.join(_format_literal_arg_stringify(a, mode=mode)
|
args = ', '.join(
|
||||||
for a in flattened_args)
|
_format_literal_arg_stringify(a, mode=mode) for a in flattened_args
|
||||||
|
)
|
||||||
return f'{module_prefix}Literal[{args}]'
|
return f'{module_prefix}Literal[{args}]'
|
||||||
if qualname in {'Optional', 'Union', 'types.UnionType'}:
|
if qualname in {'Optional', 'Union', 'types.UnionType'}:
|
||||||
return ' | '.join(stringify_annotation(a, mode) for a in annotation_args)
|
return ' | '.join(stringify_annotation(a, mode) for a in annotation_args)
|
||||||
elif qualname == 'Callable':
|
elif qualname == 'Callable':
|
||||||
args = ', '.join(stringify_annotation(a, mode) for a in annotation_args[:-1])
|
args = ', '.join(
|
||||||
|
stringify_annotation(a, mode) for a in annotation_args[:-1]
|
||||||
|
)
|
||||||
returns = stringify_annotation(annotation_args[-1], mode)
|
returns = stringify_annotation(annotation_args[-1], mode)
|
||||||
return f'{module_prefix}Callable[[{args}], {returns}]'
|
return f'{module_prefix}Callable[[{args}], {returns}]'
|
||||||
elif qualname == 'Literal':
|
elif qualname == 'Literal':
|
||||||
args = ', '.join(_format_literal_arg_stringify(a, mode=mode)
|
args = ', '.join(
|
||||||
for a in annotation_args)
|
_format_literal_arg_stringify(a, mode=mode) for a in annotation_args
|
||||||
|
)
|
||||||
return f'{module_prefix}Literal[{args}]'
|
return f'{module_prefix}Literal[{args}]'
|
||||||
elif _is_annotated_form(annotation): # for py310+
|
elif _is_annotated_form(annotation): # for py310+
|
||||||
args = stringify_annotation(annotation_args[0], mode)
|
args = stringify_annotation(annotation_args[0], mode)
|
||||||
@ -530,10 +550,13 @@ def stringify_annotation(
|
|||||||
elif dataclasses.is_dataclass(m):
|
elif dataclasses.is_dataclass(m):
|
||||||
# use stringify_annotation for the repr of field values rather than repr
|
# use stringify_annotation for the repr of field values rather than repr
|
||||||
d_fields = ', '.join([
|
d_fields = ', '.join([
|
||||||
f"{f.name}={stringify_annotation(getattr(m, f.name), mode)}"
|
f'{f.name}={stringify_annotation(getattr(m, f.name), mode)}'
|
||||||
for f in dataclasses.fields(m) if f.repr
|
for f in dataclasses.fields(m)
|
||||||
|
if f.repr
|
||||||
])
|
])
|
||||||
meta_args.append(f'{stringify_annotation(type(m), mode)}({d_fields})')
|
meta_args.append(
|
||||||
|
f'{stringify_annotation(type(m), mode)}({d_fields})'
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
meta_args.append(repr(m))
|
meta_args.append(repr(m))
|
||||||
meta = ', '.join(meta_args)
|
meta = ', '.join(meta_args)
|
||||||
@ -568,7 +591,7 @@ def _format_literal_arg_stringify(arg: Any, /, *, mode: str) -> str:
|
|||||||
|
|
||||||
# deprecated name -> (object to return, canonical path or empty string, removal version)
|
# deprecated name -> (object to return, canonical path or empty string, removal version)
|
||||||
_DEPRECATED_OBJECTS: dict[str, tuple[Any, str, tuple[int, int]]] = {
|
_DEPRECATED_OBJECTS: dict[str, tuple[Any, str, tuple[int, int]]] = {
|
||||||
}
|
} # fmt: skip
|
||||||
|
|
||||||
|
|
||||||
def __getattr__(name: str) -> Any:
|
def __getattr__(name: str) -> Any:
|
||||||
|
Loading…
Reference in New Issue
Block a user