mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Merge pull request #8976 from tk0miya/refactor_vartypes_util
refactor: Use PEP-526 based variable annotation (sphinx.util)
This commit is contained in:
commit
4bb151bf36
@ -47,8 +47,8 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Generally useful regular expressions.
|
||||
ws_re = re.compile(r'\s+')
|
||||
url_re = re.compile(r'(?P<schema>.+)://.*')
|
||||
ws_re: Pattern = re.compile(r'\s+')
|
||||
url_re: Pattern = re.compile(r'(?P<schema>.+)://.*')
|
||||
|
||||
|
||||
# High-level utility functions.
|
||||
@ -107,7 +107,7 @@ class FilenameUniqDict(dict):
|
||||
appear in. Used for images and downloadable files in the environment.
|
||||
"""
|
||||
def __init__(self) -> None:
|
||||
self._existing = set() # type: Set[str]
|
||||
self._existing: Set[str] = set()
|
||||
|
||||
def add_file(self, docname: str, newfile: str) -> str:
|
||||
if newfile in self:
|
||||
@ -379,7 +379,7 @@ def format_exception_cut_frames(x: int = 1) -> str:
|
||||
"""Format an exception with traceback, but only the last x frames."""
|
||||
typ, val, tb = sys.exc_info()
|
||||
# res = ['Traceback (most recent call last):\n']
|
||||
res = [] # type: List[str]
|
||||
res: List[str] = []
|
||||
tbres = traceback.format_tb(tb)
|
||||
res += tbres[-x:]
|
||||
res += traceback.format_exception_only(typ, val)
|
||||
|
@ -98,7 +98,7 @@ class ASTBaseBase:
|
||||
return False
|
||||
return True
|
||||
|
||||
__hash__ = None # type: Callable[[], int]
|
||||
__hash__: Callable[[], int] = None
|
||||
|
||||
def clone(self) -> Any:
|
||||
return deepcopy(self)
|
||||
@ -223,9 +223,9 @@ class BaseParser:
|
||||
|
||||
self.pos = 0
|
||||
self.end = len(self.definition)
|
||||
self.last_match = None # type: Match
|
||||
self._previous_state = (0, None) # type: Tuple[int, Match]
|
||||
self.otherErrors = [] # type: List[DefinitionError]
|
||||
self.last_match: Match = None
|
||||
self._previous_state: Tuple[int, Match] = (0, None)
|
||||
self.otherErrors: List[DefinitionError] = []
|
||||
|
||||
# in our tests the following is set to False to capture bad parsing
|
||||
self.allowFallbackExpressionParsing = True
|
||||
@ -356,7 +356,7 @@ class BaseParser:
|
||||
# TODO: add handling of string literals and similar
|
||||
brackets = {'(': ')', '[': ']', '{': '}'}
|
||||
startPos = self.pos
|
||||
symbols = [] # type: List[str]
|
||||
symbols: List[str] = []
|
||||
while not self.eof:
|
||||
if len(symbols) == 0 and self.current_char in end:
|
||||
break
|
||||
|
@ -11,7 +11,7 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from typing import Dict
|
||||
from typing import Dict, Pattern
|
||||
|
||||
try:
|
||||
# check if colorama is installed to support color on Windows
|
||||
@ -20,8 +20,8 @@ except ImportError:
|
||||
colorama = None
|
||||
|
||||
|
||||
_ansi_re = re.compile('\x1b\\[(\\d\\d;){0,2}\\d\\dm')
|
||||
codes = {} # type: Dict[str, str]
|
||||
_ansi_re: Pattern = re.compile('\x1b\\[(\\d\\d;){0,2}\\d\\dm')
|
||||
codes: Dict[str, str] = {}
|
||||
|
||||
|
||||
def terminal_safe(s: str) -> str:
|
||||
@ -44,7 +44,7 @@ def get_terminal_width() -> int:
|
||||
return terminal_width
|
||||
|
||||
|
||||
_tw = get_terminal_width()
|
||||
_tw: int = get_terminal_width()
|
||||
|
||||
|
||||
def term_width_line(text: str) -> str:
|
||||
|
@ -27,7 +27,7 @@ def _is_single_paragraph(node: nodes.field_body) -> bool:
|
||||
if len(node) == 0:
|
||||
return False
|
||||
elif len(node) > 1:
|
||||
for subnode in node[1:]: # type: nodes.Node
|
||||
for subnode in node[1:]: # type: Node
|
||||
if not isinstance(subnode, nodes.system_message):
|
||||
return False
|
||||
if isinstance(node[0], nodes.paragraph):
|
||||
@ -195,7 +195,7 @@ class TypedField(GroupedField):
|
||||
fieldname = nodes.field_name('', self.label)
|
||||
if len(items) == 1 and self.can_collapse:
|
||||
fieldarg, content = items[0]
|
||||
bodynode = handle_item(fieldarg, content) # type: nodes.Node
|
||||
bodynode: Node = handle_item(fieldarg, content)
|
||||
else:
|
||||
bodynode = self.list_type()
|
||||
for fieldarg, content in items:
|
||||
@ -209,7 +209,7 @@ class DocFieldTransformer:
|
||||
Transforms field lists in "doc field" syntax into better-looking
|
||||
equivalents, using the field type definitions given on a domain.
|
||||
"""
|
||||
typemap = None # type: Dict[str, Tuple[Field, bool]]
|
||||
typemap: Dict[str, Tuple[Field, bool]] = None
|
||||
|
||||
def __init__(self, directive: "ObjectDescription") -> None:
|
||||
self.directive = directive
|
||||
@ -227,9 +227,9 @@ class DocFieldTransformer:
|
||||
"""Transform a single field list *node*."""
|
||||
typemap = self.typemap
|
||||
|
||||
entries = [] # type: List[Union[nodes.field, Tuple[Field, Any]]]
|
||||
groupindices = {} # type: Dict[str, int]
|
||||
types = {} # type: Dict[str, Dict]
|
||||
entries: List[Union[nodes.field, Tuple[Field, Any]]] = []
|
||||
groupindices: Dict[str, int] = {}
|
||||
types: Dict[str, Dict] = {}
|
||||
|
||||
# step 1: traverse all fields and collect field types and content
|
||||
for field in cast(List[nodes.field], node):
|
||||
|
@ -23,7 +23,7 @@ field_list_item_re = re.compile(Body.patterns['field_marker'])
|
||||
def extract_metadata(s: str) -> Dict[str, str]:
|
||||
"""Extract metadata from docstring."""
|
||||
in_other_element = False
|
||||
metadata = {} # type: Dict[str, str]
|
||||
metadata: Dict[str, str] = {}
|
||||
|
||||
if not s:
|
||||
return metadata
|
||||
|
@ -42,7 +42,7 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
__version_info__ = tuple(LooseVersion(docutils.__version__).version)
|
||||
additional_nodes = set() # type: Set[Type[nodes.Element]]
|
||||
additional_nodes: Set[Type[Element]] = set()
|
||||
|
||||
|
||||
@contextmanager
|
||||
@ -176,8 +176,8 @@ class sphinx_domains:
|
||||
"""
|
||||
def __init__(self, env: "BuildEnvironment") -> None:
|
||||
self.env = env
|
||||
self.directive_func = None # type: Callable
|
||||
self.roles_func = None # type: Callable
|
||||
self.directive_func: Callable = None
|
||||
self.roles_func: Callable = None
|
||||
|
||||
def __enter__(self) -> None:
|
||||
self.enable()
|
||||
@ -491,7 +491,7 @@ class SphinxTranslator(nodes.NodeVisitor):
|
||||
|
||||
# cache a vanilla instance of nodes.document
|
||||
# Used in new_document() function
|
||||
__document_cache__ = None # type: nodes.document
|
||||
__document_cache__: nodes.document = None
|
||||
|
||||
|
||||
def new_document(source_path: str, settings: Any = None) -> nodes.document:
|
||||
|
@ -93,7 +93,7 @@ class InventoryFile:
|
||||
|
||||
@classmethod
|
||||
def load_v1(cls, stream: InventoryFileReader, uri: str, join: Callable) -> Inventory:
|
||||
invdata = {} # type: Inventory
|
||||
invdata: Inventory = {}
|
||||
projname = stream.readline().rstrip()[11:]
|
||||
version = stream.readline().rstrip()[11:]
|
||||
for line in stream.readlines():
|
||||
@ -111,7 +111,7 @@ class InventoryFile:
|
||||
|
||||
@classmethod
|
||||
def load_v2(cls, stream: InventoryFileReader, uri: str, join: Callable) -> Inventory:
|
||||
invdata = {} # type: Inventory
|
||||
invdata: Inventory = {}
|
||||
projname = stream.readline().rstrip()[11:]
|
||||
version = stream.readline().rstrip()[11:]
|
||||
line = stream.readline()
|
||||
|
@ -109,8 +109,8 @@ def loads(x: str) -> Any:
|
||||
nothing = object()
|
||||
i = 0
|
||||
n = len(x)
|
||||
stack = [] # type: List[Union[List, Dict]]
|
||||
obj = nothing # type: Any
|
||||
stack: List[Union[List, Dict]] = []
|
||||
obj: Any = nothing
|
||||
key = False
|
||||
keys = []
|
||||
while i < n:
|
||||
@ -160,7 +160,7 @@ def loads(x: str) -> Any:
|
||||
raise ValueError("multiple values")
|
||||
key = False
|
||||
else:
|
||||
y = None # type: Any
|
||||
y: Any = None
|
||||
m = _str_re.match(x, i)
|
||||
if m:
|
||||
y = decode_string(m.group()[1:-1])
|
||||
|
@ -28,7 +28,7 @@ if TYPE_CHECKING:
|
||||
NAMESPACE = 'sphinx'
|
||||
VERBOSE = 15
|
||||
|
||||
LEVEL_NAMES = defaultdict(lambda: logging.WARNING) # type: Dict[str, int]
|
||||
LEVEL_NAMES: Dict[str, int] = defaultdict(lambda: logging.WARNING)
|
||||
LEVEL_NAMES.update({
|
||||
'CRITICAL': logging.CRITICAL,
|
||||
'SEVERE': logging.CRITICAL,
|
||||
@ -39,7 +39,7 @@ LEVEL_NAMES.update({
|
||||
'DEBUG': logging.DEBUG,
|
||||
})
|
||||
|
||||
VERBOSITY_MAP = defaultdict(lambda: 0) # type: Dict[int, int]
|
||||
VERBOSITY_MAP: Dict[int, int] = defaultdict(lambda: 0)
|
||||
VERBOSITY_MAP.update({
|
||||
0: logging.INFO,
|
||||
1: VERBOSE,
|
||||
@ -91,7 +91,7 @@ def convert_serializable(records: List[logging.LogRecord]) -> None:
|
||||
class SphinxLogRecord(logging.LogRecord):
|
||||
"""Log record class supporting location"""
|
||||
prefix = ''
|
||||
location = None # type: Any
|
||||
location: Any = None
|
||||
|
||||
def getMessage(self) -> str:
|
||||
message = super().getMessage()
|
||||
@ -163,6 +163,8 @@ class NewLineStreamHandler(logging.StreamHandler):
|
||||
class MemoryHandler(logging.handlers.BufferingHandler):
|
||||
"""Handler buffering all logs."""
|
||||
|
||||
buffer: List[logging.LogRecord]
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__(-1)
|
||||
|
||||
@ -174,7 +176,7 @@ class MemoryHandler(logging.handlers.BufferingHandler):
|
||||
try:
|
||||
for record in self.buffer:
|
||||
logger.handle(record)
|
||||
self.buffer = [] # type: List[logging.LogRecord]
|
||||
self.buffer = []
|
||||
finally:
|
||||
self.release()
|
||||
|
||||
@ -328,7 +330,7 @@ def prefixed_warnings(prefix: str) -> Generator[None, None, None]:
|
||||
|
||||
class LogCollector:
|
||||
def __init__(self) -> None:
|
||||
self.logs = [] # type: List[logging.LogRecord]
|
||||
self.logs: List[logging.LogRecord] = []
|
||||
|
||||
@contextmanager
|
||||
def collect(self) -> Generator[None, None, None]:
|
||||
@ -449,7 +451,7 @@ class OnceFilter(logging.Filter):
|
||||
|
||||
def __init__(self, name: str = '') -> None:
|
||||
super().__init__(name)
|
||||
self.messages = {} # type: Dict[str, List]
|
||||
self.messages: Dict[str, List] = {}
|
||||
|
||||
def filter(self, record: logging.LogRecord) -> bool:
|
||||
once = getattr(record, 'once', '')
|
||||
@ -470,7 +472,7 @@ class SphinxLogRecordTranslator(logging.Filter):
|
||||
* Make a instance of SphinxLogRecord
|
||||
* docname to path if location given
|
||||
"""
|
||||
LogRecordClass = None # type: Type[logging.LogRecord]
|
||||
LogRecordClass: Type[logging.LogRecord] = None
|
||||
|
||||
def __init__(self, app: "Sphinx") -> None:
|
||||
self.app = app
|
||||
|
@ -21,7 +21,7 @@ def _translate_pattern(pat: str) -> str:
|
||||
match slashes.
|
||||
"""
|
||||
i, n = 0, len(pat)
|
||||
res = '' # type: str
|
||||
res = ''
|
||||
while i < n:
|
||||
c = pat[i]
|
||||
i += 1
|
||||
@ -86,7 +86,7 @@ class Matcher:
|
||||
DOTFILES = Matcher(['**/.*'])
|
||||
|
||||
|
||||
_pat_cache = {} # type: Dict[str, Pattern]
|
||||
_pat_cache: Dict[str, Pattern] = {}
|
||||
|
||||
|
||||
def patmatch(name: str, pat: str) -> Optional[Match[str]]:
|
||||
|
@ -251,7 +251,7 @@ META_TYPE_NODES = (
|
||||
|
||||
def extract_messages(doctree: Element) -> Iterable[Tuple[Element, str]]:
|
||||
"""Extract translatable messages from a document tree."""
|
||||
for node in doctree.traverse(is_translatable): # type: nodes.Element
|
||||
for node in doctree.traverse(is_translatable): # type: Element
|
||||
if isinstance(node, addnodes.translatable):
|
||||
for msg in node.extract_original_messages():
|
||||
yield node, msg
|
||||
@ -363,7 +363,7 @@ indextypes = [
|
||||
def process_index_entry(entry: str, targetid: str) -> List[Tuple[str, str, str, str, str]]:
|
||||
from sphinx.domains.python import pairindextypes
|
||||
|
||||
indexentries = [] # type: List[Tuple[str, str, str, str, str]]
|
||||
indexentries: List[Tuple[str, str, str, str, str]] = []
|
||||
entry = entry.strip()
|
||||
oentry = entry
|
||||
main = ''
|
||||
|
@ -185,7 +185,7 @@ class FileAvoidWrite:
|
||||
"""
|
||||
def __init__(self, path: str) -> None:
|
||||
self._path = path
|
||||
self._io = None # type: Optional[StringIO]
|
||||
self._io: Optional[StringIO] = None
|
||||
|
||||
def write(self, data: str) -> None:
|
||||
if not self._io:
|
||||
|
@ -60,15 +60,15 @@ class ParallelTasks:
|
||||
def __init__(self, nproc: int) -> None:
|
||||
self.nproc = nproc
|
||||
# (optional) function performed by each task on the result of main task
|
||||
self._result_funcs = {} # type: Dict[int, Callable]
|
||||
self._result_funcs: Dict[int, Callable] = {}
|
||||
# task arguments
|
||||
self._args = {} # type: Dict[int, List[Any]]
|
||||
self._args: Dict[int, List[Any]] = {}
|
||||
# list of subprocesses (both started and waiting)
|
||||
self._procs = {} # type: Dict[int, multiprocessing.Process]
|
||||
self._procs: Dict[int, multiprocessing.Process] = {}
|
||||
# list of receiving pipe connections of running subprocesses
|
||||
self._precvs = {} # type: Dict[int, Any]
|
||||
self._precvs: Dict[int, Any] = {}
|
||||
# list of receiving pipe connections of waiting subprocesses
|
||||
self._precvsWaiting = {} # type: Dict[int, Any]
|
||||
self._precvsWaiting: Dict[int, Any] = {}
|
||||
# number of working subprocesses
|
||||
self._pworking = 0
|
||||
# task number of each subprocess
|
||||
|
@ -30,8 +30,7 @@ symbols_re = re.compile(r'([!-\-/:-@\[-`{-~])') # symbols without dot(0x2e)
|
||||
SECTIONING_CHARS = ['=', '-', '~']
|
||||
|
||||
# width of characters
|
||||
WIDECHARS = defaultdict(lambda: "WF") # type: Dict[str, str]
|
||||
# WF: Wide + Full-width
|
||||
WIDECHARS: Dict[str, str] = defaultdict(lambda: "WF") # WF: Wide + Full-width
|
||||
WIDECHARS["ja"] = "WFA" # In Japanese, Ambiguous characters also have double width
|
||||
|
||||
|
||||
|
@ -98,12 +98,12 @@ unicode_tex_replacements = [
|
||||
# %, {, }, \, #, and ~ are the only ones which must be replaced by _ character
|
||||
# It would be simpler to define it entirely here rather than in init().
|
||||
# Unicode replacements are superfluous, as idescape() uses backslashreplace
|
||||
tex_replace_map = {} # type: Dict[int, str]
|
||||
tex_replace_map: Dict[int, str] = {}
|
||||
|
||||
_tex_escape_map = {} # type: Dict[int, str]
|
||||
_tex_escape_map_without_unicode = {} # type: Dict[int, str]
|
||||
_tex_hlescape_map = {} # type: Dict[int, str]
|
||||
_tex_hlescape_map_without_unicode = {} # type: Dict[int, str]
|
||||
_tex_escape_map: Dict[int, str] = {}
|
||||
_tex_escape_map_without_unicode: Dict[int, str] = {}
|
||||
_tex_hlescape_map: Dict[int, str] = {}
|
||||
_tex_hlescape_map_without_unicode: Dict[int, str] = {}
|
||||
|
||||
|
||||
def escape(s: str, latex_engine: str = None) -> str:
|
||||
|
Loading…
Reference in New Issue
Block a user