Merge pull request #6438 from tk0miya/refactor_type_annotation4

Migrate to py3 style type annotation: sphinx.util.requests (part3)
This commit is contained in:
Takeshi KOMIYA 2019-06-05 11:58:47 +09:00 committed by GitHub
commit f8a501d868
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 137 additions and 254 deletions

View File

@ -337,15 +337,13 @@ _coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
def detect_encoding(readline: Callable[[], bytes]) -> str: def detect_encoding(readline: Callable[[], bytes]) -> str:
"""Like tokenize.detect_encoding() from Py3k, but a bit simplified.""" """Like tokenize.detect_encoding() from Py3k, but a bit simplified."""
def read_or_stop(): def read_or_stop() -> bytes:
# type: () -> bytes
try: try:
return readline() return readline()
except StopIteration: except StopIteration:
return None return None
def get_normal_name(orig_enc): def get_normal_name(orig_enc: str) -> str:
# type: (str) -> str
"""Imitates get_normal_name in tokenizer.c.""" """Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters. # Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace('_', '-') enc = orig_enc[:12].lower().replace('_', '-')
@ -356,8 +354,7 @@ def detect_encoding(readline: Callable[[], bytes]) -> str:
return 'iso-8859-1' return 'iso-8859-1'
return orig_enc return orig_enc
def find_cookie(line): def find_cookie(line: bytes) -> str:
# type: (bytes) -> str
try: try:
line_string = line.decode('ascii') line_string = line.decode('ascii')
except UnicodeDecodeError: except UnicodeDecodeError:

View File

@ -8,15 +8,12 @@
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
""" """
from docutils import nodes
if False: from sphinx.builders.html import HTMLTranslator
# For type annotation
from docutils import nodes # NOQA
from sphinx.builders.html import HTMLTranslator # NOQA
def get_node_equation_number(writer, node): def get_node_equation_number(writer: HTMLTranslator, node: nodes.math_block) -> str:
# type: (HTMLTranslator, nodes.math_block) -> str
if writer.builder.config.math_numfig and writer.builder.config.numfig: if writer.builder.config.math_numfig and writer.builder.config.numfig:
figtype = 'displaymath' figtype = 'displaymath'
if writer.builder.name == 'singlehtml': if writer.builder.name == 'singlehtml':
@ -31,10 +28,8 @@ def get_node_equation_number(writer, node):
return node['number'] return node['number']
def wrap_displaymath(text, label, numbering): def wrap_displaymath(text: str, label: str, numbering: bool) -> str:
# type: (str, str, bool) -> str def is_equation(part: str) -> str:
def is_equation(part):
# type: (str) -> str
return part.strip() return part.strip()
if label is None: if label is None:

View File

@ -10,9 +10,14 @@
import re import re
import warnings import warnings
from typing import Any, cast from typing import Any, Callable, Iterable, List, Set, Tuple, Type
from typing import cast
from docutils import nodes from docutils import nodes
from docutils.nodes import Element, Node
from docutils.parsers.rst import Directive
from docutils.parsers.rst.states import Inliner
from docutils.statemachine import StringList
from sphinx import addnodes from sphinx import addnodes
from sphinx.deprecation import RemovedInSphinx40Warning from sphinx.deprecation import RemovedInSphinx40Warning
@ -21,11 +26,8 @@ from sphinx.util import logging
if False: if False:
# For type annotation # For type annotation
from typing import Callable, Iterable, List, Optional, Set, Tuple, Type # NOQA from sphinx.builders import Builder
from docutils.parsers.rst.states import Inliner # NOQA from sphinx.utils.tags import Tags
from docutils.statemachine import StringList # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.utils.tags import Tags # NOQA
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -57,13 +59,11 @@ class NodeMatcher:
# => [<reference ...>, <reference ...>, ...] # => [<reference ...>, <reference ...>, ...]
""" """
def __init__(self, *classes, **attrs): def __init__(self, *classes: Type[Node], **attrs) -> None:
# type: (Type[nodes.Node], Any) -> None
self.classes = classes self.classes = classes
self.attrs = attrs self.attrs = attrs
def match(self, node): def match(self, node: Node) -> bool:
# type: (nodes.Node) -> bool
try: try:
if self.classes and not isinstance(node, self.classes): if self.classes and not isinstance(node, self.classes):
return False return False
@ -85,13 +85,11 @@ class NodeMatcher:
# for non-Element nodes # for non-Element nodes
return False return False
def __call__(self, node): def __call__(self, node: Node) -> bool:
# type: (nodes.Node) -> bool
return self.match(node) return self.match(node)
def get_full_module_name(node): def get_full_module_name(node: Node) -> str:
# type: (nodes.Node) -> str
""" """
return full module dotted path like: 'docutils.nodes.paragraph' return full module dotted path like: 'docutils.nodes.paragraph'
@ -101,8 +99,7 @@ def get_full_module_name(node):
return '{}.{}'.format(node.__module__, node.__class__.__name__) return '{}.{}'.format(node.__module__, node.__class__.__name__)
def repr_domxml(node, length=80): def repr_domxml(node: Node, length: int = 80) -> str:
# type: (nodes.Node, Optional[int]) -> str
""" """
return DOM XML representation of the specified node like: return DOM XML representation of the specified node like:
'<paragraph translatable="False"><inline classes="versionmodified">New in version...' '<paragraph translatable="False"><inline classes="versionmodified">New in version...'
@ -122,8 +119,7 @@ def repr_domxml(node, length=80):
return text return text
def apply_source_workaround(node): def apply_source_workaround(node: Element) -> None:
# type: (nodes.Element) -> None
# workaround: nodes.term have wrong rawsource if classifier is specified. # workaround: nodes.term have wrong rawsource if classifier is specified.
# The behavior of docutils-0.11, 0.12 is: # The behavior of docutils-0.11, 0.12 is:
# * when ``term text : classifier1 : classifier2`` is specified, # * when ``term text : classifier1 : classifier2`` is specified,
@ -186,8 +182,7 @@ IGNORED_NODES = (
) )
def is_pending_meta(node): def is_pending_meta(node: Node) -> bool:
# type: (nodes.Node) -> bool
if (isinstance(node, nodes.pending) and if (isinstance(node, nodes.pending) and
isinstance(node.details.get('nodes', [None])[0], addnodes.meta)): isinstance(node.details.get('nodes', [None])[0], addnodes.meta)):
return True return True
@ -195,8 +190,7 @@ def is_pending_meta(node):
return False return False
def is_translatable(node): def is_translatable(node: Node) -> bool:
# type: (nodes.Node) -> bool
if isinstance(node, addnodes.translatable): if isinstance(node, addnodes.translatable):
return True return True
@ -251,8 +245,7 @@ META_TYPE_NODES = (
) )
def extract_messages(doctree): def extract_messages(doctree: Element) -> Iterable[Tuple[Element, str]]:
# type: (nodes.Element) -> Iterable[Tuple[nodes.Element, str]]
"""Extract translatable messages from a document tree.""" """Extract translatable messages from a document tree."""
for node in doctree.traverse(is_translatable): # type: nodes.Element for node in doctree.traverse(is_translatable): # type: nodes.Element
if isinstance(node, addnodes.translatable): if isinstance(node, addnodes.translatable):
@ -279,39 +272,34 @@ def extract_messages(doctree):
yield node, msg yield node, msg
def find_source_node(node): def find_source_node(node: Element) -> str:
# type: (nodes.Element) -> str
warnings.warn('find_source_node() is deprecated.', warnings.warn('find_source_node() is deprecated.',
RemovedInSphinx40Warning) RemovedInSphinx40Warning)
return get_node_source(node) return get_node_source(node)
def get_node_source(node): def get_node_source(node: Element) -> str:
# type: (nodes.Element) -> str
for pnode in traverse_parent(node): for pnode in traverse_parent(node):
if pnode.source: if pnode.source:
return pnode.source return pnode.source
return None return None
def get_node_line(node): def get_node_line(node: Element) -> int:
# type: (nodes.Element) -> int
for pnode in traverse_parent(node): for pnode in traverse_parent(node):
if pnode.line: if pnode.line:
return pnode.line return pnode.line
return None return None
def traverse_parent(node, cls=None): def traverse_parent(node: Element, cls: Any = None) -> Iterable[Element]:
# type: (nodes.Element, Any) -> Iterable[nodes.Element]
while node: while node:
if cls is None or isinstance(node, cls): if cls is None or isinstance(node, cls):
yield node yield node
node = node.parent node = node.parent
def get_prev_node(node): def get_prev_node(node: Node) -> Node:
# type: (nodes.Node) -> nodes.Node
pos = node.parent.index(node) pos = node.parent.index(node)
if pos > 0: if pos > 0:
return node.parent[pos - 1] return node.parent[pos - 1]
@ -319,8 +307,7 @@ def get_prev_node(node):
return None return None
def traverse_translatable_index(doctree): def traverse_translatable_index(doctree: Element) -> Iterable[Tuple[Element, List[str]]]:
# type: (nodes.Element) -> Iterable[Tuple[nodes.Element, List[str]]]
"""Traverse translatable index node from a document tree.""" """Traverse translatable index node from a document tree."""
for node in doctree.traverse(NodeMatcher(addnodes.index, inline=False)): # type: addnodes.index # NOQA for node in doctree.traverse(NodeMatcher(addnodes.index, inline=False)): # type: addnodes.index # NOQA
if 'raw_entries' in node: if 'raw_entries' in node:
@ -330,8 +317,7 @@ def traverse_translatable_index(doctree):
yield node, entries yield node, entries
def nested_parse_with_titles(state, content, node): def nested_parse_with_titles(state: Any, content: StringList, node: Node) -> str:
# type: (Any, StringList, nodes.Node) -> str
"""Version of state.nested_parse() that allows titles and does not require """Version of state.nested_parse() that allows titles and does not require
titles to have the same decoration as the calling document. titles to have the same decoration as the calling document.
@ -350,8 +336,7 @@ def nested_parse_with_titles(state, content, node):
state.memo.section_level = surrounding_section_level state.memo.section_level = surrounding_section_level
def clean_astext(node): def clean_astext(node: Element) -> str:
# type: (nodes.Element) -> str
"""Like node.astext(), but ignore images.""" """Like node.astext(), but ignore images."""
node = node.deepcopy() node = node.deepcopy()
for img in node.traverse(nodes.image): for img in node.traverse(nodes.image):
@ -361,8 +346,7 @@ def clean_astext(node):
return node.astext() return node.astext()
def split_explicit_title(text): def split_explicit_title(text: str) -> Tuple[bool, str, str]:
# type: (str) -> Tuple[bool, str, str]
"""Split role content into title and target, if given.""" """Split role content into title and target, if given."""
match = explicit_title_re.match(text) match = explicit_title_re.match(text)
if match: if match:
@ -375,8 +359,7 @@ indextypes = [
] ]
def process_index_entry(entry, targetid): def process_index_entry(entry: str, targetid: str) -> List[Tuple[str, str, str, str, str]]:
# type: (str, str) -> List[Tuple[str, str, str, str, str]]
from sphinx.domains.python import pairindextypes from sphinx.domains.python import pairindextypes
indexentries = [] # type: List[Tuple[str, str, str, str, str]] indexentries = [] # type: List[Tuple[str, str, str, str, str]]
@ -414,8 +397,9 @@ def process_index_entry(entry, targetid):
return indexentries return indexentries
def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc, traversed): def inline_all_toctrees(builder: "Builder", docnameset: Set[str], docname: str,
# type: (Builder, Set[str], str, nodes.document, Callable, List[str]) -> nodes.document tree: nodes.document, colorfunc: Callable, traversed: List[str]
) -> nodes.document:
"""Inline all toctrees in the *tree*. """Inline all toctrees in the *tree*.
Record all docnames in *docnameset*, and output docnames with *colorfunc*. Record all docnames in *docnameset*, and output docnames with *colorfunc*.
@ -447,8 +431,8 @@ def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc, traversed
return tree return tree
def make_refnode(builder, fromdocname, todocname, targetid, child, title=None): def make_refnode(builder: "Builder", fromdocname: str, todocname: str, targetid: str,
# type: (Builder, str, str, str, nodes.Node, str) -> nodes.reference child: Node, title: str = None) -> nodes.reference:
"""Shortcut to create a reference node.""" """Shortcut to create a reference node."""
node = nodes.reference('', '', internal=True) node = nodes.reference('', '', internal=True)
if fromdocname == todocname and targetid: if fromdocname == todocname and targetid:
@ -465,19 +449,16 @@ def make_refnode(builder, fromdocname, todocname, targetid, child, title=None):
return node return node
def set_source_info(directive, node): def set_source_info(directive: Directive, node: Node) -> None:
# type: (Any, nodes.Node) -> None
node.source, node.line = \ node.source, node.line = \
directive.state_machine.get_source_and_line(directive.lineno) directive.state_machine.get_source_and_line(directive.lineno)
def set_role_source_info(inliner, lineno, node): def set_role_source_info(inliner: Inliner, lineno: int, node: Node) -> None:
# type: (Inliner, int, nodes.Node) -> None
node.source, node.line = inliner.reporter.get_source_and_line(lineno) # type: ignore node.source, node.line = inliner.reporter.get_source_and_line(lineno) # type: ignore
def copy_source_info(src, dst): def copy_source_info(src: Element, dst: Element) -> None:
# type: (nodes.Element, nodes.Element) -> None
dst.source = get_node_source(src) dst.source = get_node_source(src)
dst.line = get_node_line(src) dst.line = get_node_line(src)
@ -493,8 +474,7 @@ NON_SMARTQUOTABLE_PARENT_NODES = (
) )
def is_smartquotable(node): def is_smartquotable(node: Node) -> bool:
# type: (nodes.Node) -> bool
"""Check the node is smart-quotable or not.""" """Check the node is smart-quotable or not."""
if isinstance(node.parent, NON_SMARTQUOTABLE_PARENT_NODES): if isinstance(node.parent, NON_SMARTQUOTABLE_PARENT_NODES):
return False return False
@ -506,8 +486,7 @@ def is_smartquotable(node):
return True return True
def process_only_nodes(document, tags): def process_only_nodes(document: Node, tags: "Tags") -> None:
# type: (nodes.Node, Tags) -> None
"""Filter ``only`` nodes which does not match *tags*.""" """Filter ``only`` nodes which does not match *tags*."""
for node in document.traverse(addnodes.only): for node in document.traverse(addnodes.only):
try: try:
@ -530,8 +509,7 @@ def process_only_nodes(document, tags):
# monkey-patch Element.copy to copy the rawsource and line # monkey-patch Element.copy to copy the rawsource and line
# for docutils-0.14 or older versions. # for docutils-0.14 or older versions.
def _new_copy(self): def _new_copy(self: Element) -> Element:
# type: (nodes.Element) -> nodes.Element
newnode = self.__class__(self.rawsource, **self.attributes) newnode = self.__class__(self.rawsource, **self.attributes)
if isinstance(self, nodes.Element): if isinstance(self, nodes.Element):
newnode.source = self.source newnode.source = self.source

View File

@ -19,14 +19,11 @@ import time
import warnings import warnings
from io import StringIO from io import StringIO
from os import path from os import path
from typing import Any, Generator, Iterator, List, Tuple, Type
from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.testing.path import path as Path from sphinx.testing.path import path as Path
if False:
# For type annotation
from typing import Any, Iterator, List, Tuple # NOQA
# Errnos that we need. # Errnos that we need.
EEXIST = getattr(errno, 'EEXIST', 0) # RemovedInSphinx40Warning EEXIST = getattr(errno, 'EEXIST', 0) # RemovedInSphinx40Warning
ENOENT = getattr(errno, 'ENOENT', 0) # RemovedInSphinx40Warning ENOENT = getattr(errno, 'ENOENT', 0) # RemovedInSphinx40Warning
@ -41,19 +38,16 @@ EINVAL = getattr(errno, 'EINVAL', 0) # RemovedInSphinx40Warning
SEP = "/" SEP = "/"
def os_path(canonicalpath): def os_path(canonicalpath: str) -> str:
# type: (str) -> str
return canonicalpath.replace(SEP, path.sep) return canonicalpath.replace(SEP, path.sep)
def canon_path(nativepath): def canon_path(nativepath: str) -> str:
# type: (str) -> str
"""Return path in OS-independent form""" """Return path in OS-independent form"""
return nativepath.replace(path.sep, SEP) return nativepath.replace(path.sep, SEP)
def relative_uri(base, to): def relative_uri(base: str, to: str) -> str:
# type: (str, str) -> str
"""Return a relative URL from ``base`` to ``to``.""" """Return a relative URL from ``base`` to ``to``."""
if to.startswith(SEP): if to.startswith(SEP):
return to return to
@ -76,22 +70,19 @@ def relative_uri(base, to):
return ('..' + SEP) * (len(b2) - 1) + SEP.join(t2) return ('..' + SEP) * (len(b2) - 1) + SEP.join(t2)
def ensuredir(path): def ensuredir(path: str) -> None:
# type: (str) -> None
"""Ensure that a path exists.""" """Ensure that a path exists."""
os.makedirs(path, exist_ok=True) os.makedirs(path, exist_ok=True)
def walk(top, topdown=True, followlinks=False): def walk(top: str, topdown: bool = True, followlinks: bool = False) -> Iterator[Tuple[str, List[str], List[str]]]: # NOQA
# type: (str, bool, bool) -> Iterator[Tuple[str, List[str], List[str]]]
warnings.warn('sphinx.util.osutil.walk() is deprecated for removal. ' warnings.warn('sphinx.util.osutil.walk() is deprecated for removal. '
'Please use os.walk() instead.', 'Please use os.walk() instead.',
RemovedInSphinx40Warning) RemovedInSphinx40Warning)
return os.walk(top, topdown=topdown, followlinks=followlinks) return os.walk(top, topdown=topdown, followlinks=followlinks)
def mtimes_of_files(dirnames, suffix): def mtimes_of_files(dirnames: List[str], suffix: str) -> Iterator[float]:
# type: (List[str], str) -> Iterator[float]
for dirname in dirnames: for dirname in dirnames:
for root, dirs, files in os.walk(dirname): for root, dirs, files in os.walk(dirname):
for sfile in files: for sfile in files:
@ -102,8 +93,7 @@ def mtimes_of_files(dirnames, suffix):
pass pass
def movefile(source, dest): def movefile(source: str, dest: str) -> None:
# type: (str, str) -> None
"""Move a file, removing the destination if it exists.""" """Move a file, removing the destination if it exists."""
if os.path.exists(dest): if os.path.exists(dest):
try: try:
@ -113,16 +103,14 @@ def movefile(source, dest):
os.rename(source, dest) os.rename(source, dest)
def copytimes(source, dest): def copytimes(source: str, dest: str) -> None:
# type: (str, str) -> None
"""Copy a file's modification times.""" """Copy a file's modification times."""
st = os.stat(source) st = os.stat(source)
if hasattr(os, 'utime'): if hasattr(os, 'utime'):
os.utime(dest, (st.st_atime, st.st_mtime)) os.utime(dest, (st.st_atime, st.st_mtime))
def copyfile(source, dest): def copyfile(source: str, dest: str) -> None:
# type: (str, str) -> None
"""Copy a file and its modification times, if possible. """Copy a file and its modification times, if possible.
Note: ``copyfile`` skips copying if the file has not been changed""" Note: ``copyfile`` skips copying if the file has not been changed"""
@ -139,18 +127,15 @@ no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
project_suffix_re = re.compile(' Documentation$') project_suffix_re = re.compile(' Documentation$')
def make_filename(string): def make_filename(string: str) -> str:
# type: (str) -> str
return no_fn_re.sub('', string) or 'sphinx' return no_fn_re.sub('', string) or 'sphinx'
def make_filename_from_project(project): def make_filename_from_project(project: str) -> str:
# type: (str) -> str
return make_filename(project_suffix_re.sub('', project)).lower() return make_filename(project_suffix_re.sub('', project)).lower()
def ustrftime(format, *args): def ustrftime(format: str, *args) -> str:
# type: (str, Any) -> str
"""[DEPRECATED] strftime for unicode strings.""" """[DEPRECATED] strftime for unicode strings."""
warnings.warn('sphinx.util.osutil.ustrtime is deprecated for removal', warnings.warn('sphinx.util.osutil.ustrtime is deprecated for removal',
RemovedInSphinx30Warning, stacklevel=2) RemovedInSphinx30Warning, stacklevel=2)
@ -171,8 +156,7 @@ def ustrftime(format, *args):
return r.encode().decode('unicode-escape') return r.encode().decode('unicode-escape')
def relpath(path, start=os.curdir): def relpath(path: str, start: str = os.curdir) -> str:
# type: (str, str) -> str
"""Return a relative filepath to *path* either from the current directory or """Return a relative filepath to *path* either from the current directory or
from an optional *start* directory. from an optional *start* directory.
@ -189,8 +173,7 @@ safe_relpath = relpath # for compatibility
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
def abspath(pathdir): def abspath(pathdir: str) -> str:
# type: (str) -> str
if isinstance(pathdir, Path): if isinstance(pathdir, Path):
return pathdir.abspath() return pathdir.abspath()
else: else:
@ -205,8 +188,7 @@ def abspath(pathdir):
return pathdir return pathdir
def getcwd(): def getcwd() -> str:
# type: () -> str
warnings.warn('sphinx.util.osutil.getcwd() is deprecated. ' warnings.warn('sphinx.util.osutil.getcwd() is deprecated. '
'Please use os.getcwd() instead.', 'Please use os.getcwd() instead.',
RemovedInSphinx40Warning) RemovedInSphinx40Warning)
@ -214,8 +196,7 @@ def getcwd():
@contextlib.contextmanager @contextlib.contextmanager
def cd(target_dir): def cd(target_dir: str) -> Generator[None, None, None]:
# type: (str) -> Iterator[None]
cwd = os.getcwd() cwd = os.getcwd()
try: try:
os.chdir(target_dir) os.chdir(target_dir)
@ -236,19 +217,16 @@ class FileAvoidWrite:
Objects can be used as context managers. Objects can be used as context managers.
""" """
def __init__(self, path): def __init__(self, path: str) -> None:
# type: (str) -> None
self._path = path self._path = path
self._io = None # type: StringIO self._io = None # type: StringIO
def write(self, data): def write(self, data: str) -> None:
# type: (str) -> None
if not self._io: if not self._io:
self._io = StringIO() self._io = StringIO()
self._io.write(data) self._io.write(data)
def close(self): def close(self) -> None:
# type: () -> None
"""Stop accepting writes and write file, if needed.""" """Stop accepting writes and write file, if needed."""
if not self._io: if not self._io:
raise Exception('FileAvoidWrite does not support empty files.') raise Exception('FileAvoidWrite does not support empty files.')
@ -267,16 +245,14 @@ class FileAvoidWrite:
with open(self._path, 'w') as f: with open(self._path, 'w') as f:
f.write(buf) f.write(buf)
def __enter__(self): def __enter__(self) -> "FileAvoidWrite":
# type: () -> FileAvoidWrite
return self return self
def __exit__(self, type, value, traceback): def __exit__(self, exc_type: Type[Exception], exc_value: Exception, traceback: Any) -> bool: # NOQA
# type: (str, str, str) -> None
self.close() self.close()
return True
def __getattr__(self, name): def __getattr__(self, name: str) -> Any:
# type: (str) -> Any
# Proxy to _io instance. # Proxy to _io instance.
if not self._io: if not self._io:
raise Exception('Must write to FileAvoidWrite before other ' raise Exception('Must write to FileAvoidWrite before other '
@ -285,8 +261,7 @@ class FileAvoidWrite:
return getattr(self._io, name) return getattr(self._io, name)
def rmtree(path): def rmtree(path: str) -> None:
# type: (str) -> None
if os.path.isdir(path): if os.path.isdir(path):
shutil.rmtree(path) shutil.rmtree(path)
else: else:

View File

@ -12,6 +12,7 @@ import os
import time import time
import traceback import traceback
from math import sqrt from math import sqrt
from typing import Any, Callable, Dict, List, Sequence
try: try:
import multiprocessing import multiprocessing
@ -21,10 +22,6 @@ except ImportError:
from sphinx.errors import SphinxParallelError from sphinx.errors import SphinxParallelError
from sphinx.util import logging from sphinx.util import logging
if False:
# For type annotation
from typing import Any, Callable, Dict, List, Sequence # NOQA
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -35,12 +32,10 @@ parallel_available = multiprocessing and (os.name == 'posix')
class SerialTasks: class SerialTasks:
"""Has the same interface as ParallelTasks, but executes tasks directly.""" """Has the same interface as ParallelTasks, but executes tasks directly."""
def __init__(self, nproc=1): def __init__(self, nproc: int = 1) -> None:
# type: (int) -> None
pass pass
def add_task(self, task_func, arg=None, result_func=None): def add_task(self, task_func: Callable, arg: Any = None, result_func: Callable = None) -> None: # NOQA
# type: (Callable, Any, Callable) -> None
if arg is not None: if arg is not None:
res = task_func(arg) res = task_func(arg)
else: else:
@ -48,16 +43,14 @@ class SerialTasks:
if result_func: if result_func:
result_func(res) result_func(res)
def join(self): def join(self) -> None:
# type: () -> None
pass pass
class ParallelTasks: class ParallelTasks:
"""Executes *nproc* tasks in parallel after forking.""" """Executes *nproc* tasks in parallel after forking."""
def __init__(self, nproc): def __init__(self, nproc: int) -> None:
# type: (int) -> None
self.nproc = nproc self.nproc = nproc
# (optional) function performed by each task on the result of main task # (optional) function performed by each task on the result of main task
self._result_funcs = {} # type: Dict[int, Callable] self._result_funcs = {} # type: Dict[int, Callable]
@ -74,8 +67,7 @@ class ParallelTasks:
# task number of each subprocess # task number of each subprocess
self._taskid = 0 self._taskid = 0
def _process(self, pipe, func, arg): def _process(self, pipe: Any, func: Callable, arg: Any) -> None:
# type: (Any, Callable, Any) -> None
try: try:
collector = logging.LogCollector() collector = logging.LogCollector()
with collector.collect(): with collector.collect():
@ -91,8 +83,7 @@ class ParallelTasks:
logging.convert_serializable(collector.logs) logging.convert_serializable(collector.logs)
pipe.send((failed, collector.logs, ret)) pipe.send((failed, collector.logs, ret))
def add_task(self, task_func, arg=None, result_func=None): def add_task(self, task_func: Callable, arg: Any = None, result_func: Callable = None) -> None: # NOQA
# type: (Callable, Any, Callable) -> None
tid = self._taskid tid = self._taskid
self._taskid += 1 self._taskid += 1
self._result_funcs[tid] = result_func or (lambda arg, result: None) self._result_funcs[tid] = result_func or (lambda arg, result: None)
@ -104,13 +95,11 @@ class ParallelTasks:
self._precvsWaiting[tid] = precv self._precvsWaiting[tid] = precv
self._join_one() self._join_one()
def join(self): def join(self) -> None:
# type: () -> None
while self._pworking: while self._pworking:
self._join_one() self._join_one()
def _join_one(self): def _join_one(self) -> None:
# type: () -> None
for tid, pipe in self._precvs.items(): for tid, pipe in self._precvs.items():
if pipe.poll(): if pipe.poll():
exc, logs, result = pipe.recv() exc, logs, result = pipe.recv()
@ -132,8 +121,7 @@ class ParallelTasks:
self._pworking += 1 self._pworking += 1
def make_chunks(arguments, nproc, maxbatch=10): def make_chunks(arguments: Sequence[str], nproc: int, maxbatch: int = 10) -> List[Any]:
# type: (Sequence[str], int, int) -> List[Any]
# determine how many documents to read in one go # determine how many documents to read in one go
nargs = len(arguments) nargs = len(arguments)
chunksize = nargs // nproc chunksize = nargs // nproc

View File

@ -10,11 +10,14 @@
import warnings import warnings
from contextlib import contextmanager from contextlib import contextmanager
from typing import Generator, Union
from urllib.parse import urlsplit from urllib.parse import urlsplit
import pkg_resources import pkg_resources
import requests import requests
from sphinx.config import Config
try: try:
from requests.packages.urllib3.exceptions import SSLError from requests.packages.urllib3.exceptions import SSLError
except ImportError: except ImportError:
@ -54,17 +57,12 @@ else:
pkg_resources.VersionConflict): pkg_resources.VersionConflict):
pass # ignored pass # ignored
if False:
# For type annotation
from typing import Any, Generator, Union # NOQA
from sphinx.config import Config # NOQA
useragent_header = [('User-Agent', useragent_header = [('User-Agent',
'Mozilla/5.0 (X11; Linux x86_64; rv:25.0) Gecko/20100101 Firefox/25.0')] 'Mozilla/5.0 (X11; Linux x86_64; rv:25.0) Gecko/20100101 Firefox/25.0')]
def is_ssl_error(exc): def is_ssl_error(exc: Exception) -> bool:
# type: (Exception) -> bool
"""Check an exception is SSLError.""" """Check an exception is SSLError."""
if isinstance(exc, SSLError): if isinstance(exc, SSLError):
return True return True
@ -77,8 +75,7 @@ def is_ssl_error(exc):
@contextmanager @contextmanager
def ignore_insecure_warning(**kwargs): def ignore_insecure_warning(**kwargs) -> Generator[None, None, None]:
# type: (Any) -> Generator
with warnings.catch_warnings(): with warnings.catch_warnings():
if not kwargs.get('verify') and InsecureRequestWarning: if not kwargs.get('verify') and InsecureRequestWarning:
# ignore InsecureRequestWarning if verify=False # ignore InsecureRequestWarning if verify=False
@ -86,8 +83,7 @@ def ignore_insecure_warning(**kwargs):
yield yield
def _get_tls_cacert(url, config): def _get_tls_cacert(url: str, config: Config) -> Union[str, bool]:
# type: (str, Config) -> Union[str, bool]
"""Get additional CA cert for a specific URL. """Get additional CA cert for a specific URL.
This also returns ``False`` if verification is disabled. This also returns ``False`` if verification is disabled.
@ -109,8 +105,7 @@ def _get_tls_cacert(url, config):
return certs.get(hostname, True) return certs.get(hostname, True)
def get(url, **kwargs): def get(url: str, **kwargs) -> requests.Response:
# type: (str, Any) -> requests.Response
"""Sends a GET request like requests.get(). """Sends a GET request like requests.get().
This sets up User-Agent header and TLS verification automatically.""" This sets up User-Agent header and TLS verification automatically."""
@ -123,8 +118,7 @@ def get(url, **kwargs):
return requests.get(url, **kwargs) return requests.get(url, **kwargs)
def head(url, **kwargs): def head(url: str, **kwargs) -> requests.Response:
# type: (str, Any) -> requests.Response
"""Sends a HEAD request like requests.head(). """Sends a HEAD request like requests.head().
This sets up User-Agent header and TLS verification automatically.""" This sets up User-Agent header and TLS verification automatically."""

View File

@ -11,23 +11,20 @@
import re import re
from collections import defaultdict from collections import defaultdict
from contextlib import contextmanager from contextlib import contextmanager
from typing import Dict, Generator
from unicodedata import east_asian_width from unicodedata import east_asian_width
from docutils.parsers.rst import roles from docutils.parsers.rst import roles
from docutils.parsers.rst.languages import en as english from docutils.parsers.rst.languages import en as english
from docutils.statemachine import StringList
from docutils.utils import Reporter from docutils.utils import Reporter
from jinja2 import Environment
from jinja2 import environmentfilter from jinja2 import environmentfilter
from sphinx.locale import __ from sphinx.locale import __
from sphinx.util import docutils from sphinx.util import docutils
from sphinx.util import logging from sphinx.util import logging
if False:
# For type annotation
from typing import Callable, Dict, Generator # NOQA
from docutils.statemachine import StringList # NOQA
from jinja2 import Environment # NOQA
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
docinfo_re = re.compile(':\\w+:.*?') docinfo_re = re.compile(':\\w+:.*?')
@ -40,18 +37,15 @@ WIDECHARS = defaultdict(lambda: "WF") # type: Dict[str, str]
WIDECHARS["ja"] = "WFA" # In Japanese, Ambiguous characters also have double width WIDECHARS["ja"] = "WFA" # In Japanese, Ambiguous characters also have double width
def escape(text): def escape(text: str) -> str:
# type: (str) -> str
text = symbols_re.sub(r'\\\1', text) text = symbols_re.sub(r'\\\1', text)
text = re.sub(r'^\.', r'\.', text) # escape a dot at top text = re.sub(r'^\.', r'\.', text) # escape a dot at top
return text return text
def textwidth(text, widechars='WF'): def textwidth(text: str, widechars: str = 'WF') -> int:
# type: (str, str) -> int
"""Get width of text.""" """Get width of text."""
def charwidth(char, widechars): def charwidth(char: str, widechars: str) -> int:
# type: (str, str) -> int
if east_asian_width(char) in widechars: if east_asian_width(char) in widechars:
return 2 return 2
else: else:
@ -61,8 +55,7 @@ def textwidth(text, widechars='WF'):
@environmentfilter @environmentfilter
def heading(env, text, level=1): def heading(env: Environment, text: str, level: int = 1) -> str:
# type: (Environment, str, int) -> str
"""Create a heading for *level*.""" """Create a heading for *level*."""
assert level <= 3 assert level <= 3
width = textwidth(text, WIDECHARS[env.language]) # type: ignore width = textwidth(text, WIDECHARS[env.language]) # type: ignore
@ -71,8 +64,7 @@ def heading(env, text, level=1):
@contextmanager @contextmanager
def default_role(docname, name): def default_role(docname: str, name: str) -> Generator[None, None, None]:
# type: (str, str) -> Generator
if name: if name:
dummy_reporter = Reporter('', 4, 4) dummy_reporter = Reporter('', 4, 4)
role_fn, _ = roles.role(name, english, 0, dummy_reporter) role_fn, _ = roles.role(name, english, 0, dummy_reporter)
@ -86,8 +78,7 @@ def default_role(docname, name):
docutils.unregister_role('') docutils.unregister_role('')
def prepend_prolog(content, prolog): def prepend_prolog(content: StringList, prolog: str) -> None:
# type: (StringList, str) -> None
"""Prepend a string to content body as prolog.""" """Prepend a string to content body as prolog."""
if prolog: if prolog:
pos = 0 pos = 0
@ -109,8 +100,7 @@ def prepend_prolog(content, prolog):
content.insert(pos + lineno + 1, '', '<generated>', 0) content.insert(pos + lineno + 1, '', '<generated>', 0)
def append_epilog(content, epilog): def append_epilog(content: StringList, epilog: str) -> None:
# type: (StringList, str) -> None
"""Append a string to content body as epilog.""" """Append a string to content body as epilog."""
if epilog: if epilog:
content.append('', '<generated>', 0) content.append('', '<generated>', 0)

View File

@ -26,14 +26,12 @@
""" """
import re import re
from typing import Generator, Iterable, Tuple
from docutils.utils import smartquotes from docutils.utils import smartquotes
from sphinx.util.docutils import __version_info__ as docutils_version from sphinx.util.docutils import __version_info__ as docutils_version
if False: # For type annotation
from typing import Generator, Iterable, Tuple # NOQA
langquotes = {'af': '“”‘’', langquotes = {'af': '“”‘’',
'af-x-altquot': '„”‚’', 'af-x-altquot': '„”‚’',
@ -125,8 +123,7 @@ langquotes = {'af': '“”‘’',
} }
def educateQuotes(text, language='en'): def educateQuotes(text: str, language: str = 'en') -> str:
# type: (str, str) -> str
""" """
Parameter: - text string (unicode or bytes). Parameter: - text string (unicode or bytes).
- language (`BCP 47` language tag.) - language (`BCP 47` language tag.)
@ -240,8 +237,10 @@ def educateQuotes(text, language='en'):
return text return text
def educate_tokens(text_tokens, attr=smartquotes.default_smartypants_attr, language='en'): def educate_tokens(text_tokens: Iterable[Tuple[str, str]],
# type: (Iterable[Tuple[str, str]], str, str) -> Generator[str, None, None] attr: str = smartquotes.default_smartypants_attr,
language: str = 'en'
) -> Generator[str, None, None]:
"""Return iterator that "educates" the items of `text_tokens`. """Return iterator that "educates" the items of `text_tokens`.
This is modified to intercept the ``attr='2'`` as it was used by the This is modified to intercept the ``attr='2'`` as it was used by the

View File

@ -18,18 +18,15 @@ except ImportError:
class BaseStemmer: class BaseStemmer:
def stem(self, word): def stem(self, word: str) -> str:
# type: (str) -> str
raise NotImplementedError() raise NotImplementedError()
class PyStemmer(BaseStemmer): class PyStemmer(BaseStemmer):
def __init__(self): def __init__(self) -> None:
# type: () -> None
self.stemmer = _PyStemmer('porter') self.stemmer = _PyStemmer('porter')
def stem(self, word): def stem(self, word: str) -> str:
# type: (str) -> str
return self.stemmer.stemWord(word) return self.stemmer.stemWord(word)
@ -37,13 +34,11 @@ class StandardStemmer(PorterStemmer, BaseStemmer): # type: ignore
"""All those porter stemmer implementations look hideous; """All those porter stemmer implementations look hideous;
make at least the stem method nicer. make at least the stem method nicer.
""" """
def stem(self, word): # type: ignore def stem(self, word: str) -> str: # type: ignore
# type: (str) -> str
return super().stem(word, 0, len(word) - 1) return super().stem(word, 0, len(word) - 1)
def get_stemmer(): def get_stemmer() -> BaseStemmer:
# type: () -> BaseStemmer
if PYSTEMMER: if PYSTEMMER:
return PyStemmer() return PyStemmer()
else: else:

View File

@ -30,8 +30,7 @@
class PorterStemmer: class PorterStemmer:
def __init__(self): def __init__(self) -> None:
# type: () -> None
"""The main part of the stemming algorithm starts here. """The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[k0], b is a buffer holding a word to be stemmed. The letters are in b[k0],
b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is
@ -47,8 +46,7 @@ class PorterStemmer:
self.k0 = 0 self.k0 = 0
self.j = 0 # j is a general offset into the string self.j = 0 # j is a general offset into the string
def cons(self, i): def cons(self, i: int) -> int:
# type: (int) -> int
"""cons(i) is TRUE <=> b[i] is a consonant.""" """cons(i) is TRUE <=> b[i] is a consonant."""
if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' \ if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' \
or self.b[i] == 'o' or self.b[i] == 'u': or self.b[i] == 'o' or self.b[i] == 'u':
@ -60,8 +58,7 @@ class PorterStemmer:
return (not self.cons(i - 1)) return (not self.cons(i - 1))
return 1 return 1
def m(self): def m(self) -> int:
# type: () -> int
"""m() measures the number of consonant sequences between k0 and j. """m() measures the number of consonant sequences between k0 and j.
if c is a consonant sequence and v a vowel sequence, and <..> if c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence, indicates arbitrary presence,
@ -98,16 +95,14 @@ class PorterStemmer:
i = i + 1 i = i + 1
i = i + 1 i = i + 1
def vowelinstem(self): def vowelinstem(self) -> int:
# type: () -> int
"""vowelinstem() is TRUE <=> k0,...j contains a vowel""" """vowelinstem() is TRUE <=> k0,...j contains a vowel"""
for i in range(self.k0, self.j + 1): for i in range(self.k0, self.j + 1):
if not self.cons(i): if not self.cons(i):
return 1 return 1
return 0 return 0
def doublec(self, j): def doublec(self, j: int) -> int:
# type: (int) -> int
"""doublec(j) is TRUE <=> j,(j-1) contain a double consonant.""" """doublec(j) is TRUE <=> j,(j-1) contain a double consonant."""
if j < (self.k0 + 1): if j < (self.k0 + 1):
return 0 return 0
@ -115,8 +110,7 @@ class PorterStemmer:
return 0 return 0
return self.cons(j) return self.cons(j)
def cvc(self, i): def cvc(self, i: int) -> int:
# type: (int) -> int
"""cvc(i) is TRUE <=> i-2,i-1,i has the form """cvc(i) is TRUE <=> i-2,i-1,i has the form
consonant - vowel - consonant consonant - vowel - consonant
and also if the second c is not w,x or y. this is used when trying to and also if the second c is not w,x or y. this is used when trying to
@ -133,8 +127,7 @@ class PorterStemmer:
return 0 return 0
return 1 return 1
def ends(self, s): def ends(self, s: str) -> int:
# type: (str) -> int
"""ends(s) is TRUE <=> k0,...k ends with the string s.""" """ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s) length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up if s[length - 1] != self.b[self.k]: # tiny speed-up
@ -146,22 +139,19 @@ class PorterStemmer:
self.j = self.k - length self.j = self.k - length
return 1 return 1
def setto(self, s): def setto(self, s: str) -> None:
# type: (str) -> None
"""setto(s) sets (j+1),...k to the characters in the string s, """setto(s) sets (j+1),...k to the characters in the string s,
readjusting k.""" readjusting k."""
length = len(s) length = len(s)
self.b = self.b[:self.j + 1] + s + self.b[self.j + length + 1:] self.b = self.b[:self.j + 1] + s + self.b[self.j + length + 1:]
self.k = self.j + length self.k = self.j + length
def r(self, s): def r(self, s: str) -> None:
# type: (str) -> None
"""r(s) is used further down.""" """r(s) is used further down."""
if self.m() > 0: if self.m() > 0:
self.setto(s) self.setto(s)
def step1ab(self): def step1ab(self) -> None:
# type: () -> None
"""step1ab() gets rid of plurals and -ed or -ing. e.g. """step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress caresses -> caress
@ -208,15 +198,13 @@ class PorterStemmer:
elif (self.m() == 1 and self.cvc(self.k)): elif (self.m() == 1 and self.cvc(self.k)):
self.setto("e") self.setto("e")
def step1c(self): def step1c(self) -> None:
# type: () -> None
"""step1c() turns terminal y to i when there is another vowel in """step1c() turns terminal y to i when there is another vowel in
the stem.""" the stem."""
if (self.ends("y") and self.vowelinstem()): if (self.ends("y") and self.vowelinstem()):
self.b = self.b[:self.k] + 'i' + self.b[self.k + 1:] self.b = self.b[:self.k] + 'i' + self.b[self.k + 1:]
def step2(self): def step2(self) -> None:
# type: () -> None
"""step2() maps double suffices to single ones. """step2() maps double suffices to single ones.
so -ization ( = -ize plus -ation) maps to -ize etc. note that the so -ization ( = -ize plus -ation) maps to -ize etc. note that the
string before the suffix must give m() > 0. string before the suffix must give m() > 0.
@ -275,8 +263,7 @@ class PorterStemmer:
self.r("log") self.r("log")
# To match the published algorithm, delete this phrase # To match the published algorithm, delete this phrase
def step3(self): def step3(self) -> None:
# type: () -> None
"""step3() dels with -ic-, -full, -ness etc. similar strategy """step3() dels with -ic-, -full, -ness etc. similar strategy
to step2.""" to step2."""
if self.b[self.k] == 'e': if self.b[self.k] == 'e':
@ -298,8 +285,7 @@ class PorterStemmer:
if self.ends("ness"): if self.ends("ness"):
self.r("") self.r("")
def step4(self): def step4(self) -> None:
# type: () -> None
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>.""" """step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
if self.b[self.k - 1] == 'a': if self.b[self.k - 1] == 'a':
if self.ends("al"): if self.ends("al"):
@ -382,8 +368,7 @@ class PorterStemmer:
if self.m() > 1: if self.m() > 1:
self.k = self.j self.k = self.j
def step5(self): def step5(self) -> None:
# type: () -> None
"""step5() removes a final -e if m() > 1, and changes -ll to -l if """step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1. m() > 1.
""" """
@ -395,8 +380,7 @@ class PorterStemmer:
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1: if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k - 1 self.k = self.k - 1
def stem(self, p, i, j): def stem(self, p: str, i: int, j: int) -> str:
# type: (str, int, int) -> str
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed """In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The offset to the last character of a string, (p[j+1] == '\0'). The

View File

@ -9,7 +9,9 @@
""" """
import os import os
from typing import Dict
from jinja2.loaders import BaseLoader
from jinja2.sandbox import SandboxedEnvironment from jinja2.sandbox import SandboxedEnvironment
from sphinx import package_dir from sphinx import package_dir
@ -17,58 +19,45 @@ from sphinx.jinja2glue import SphinxFileSystemLoader
from sphinx.locale import get_translator from sphinx.locale import get_translator
from sphinx.util import rst, texescape from sphinx.util import rst, texescape
if False:
# For type annotation
from typing import Dict # NOQA
from jinja2.loaders import BaseLoader # NOQA
class BaseRenderer: class BaseRenderer:
def __init__(self, loader=None): def __init__(self, loader: BaseLoader = None) -> None:
# type: (BaseLoader) -> None
self.env = SandboxedEnvironment(loader=loader, extensions=['jinja2.ext.i18n']) self.env = SandboxedEnvironment(loader=loader, extensions=['jinja2.ext.i18n'])
self.env.filters['repr'] = repr self.env.filters['repr'] = repr
self.env.install_gettext_translations(get_translator()) # type: ignore self.env.install_gettext_translations(get_translator()) # type: ignore
def render(self, template_name, context): def render(self, template_name: str, context: Dict) -> str:
# type: (str, Dict) -> str
return self.env.get_template(template_name).render(context) return self.env.get_template(template_name).render(context)
def render_string(self, source, context): def render_string(self, source: str, context: Dict) -> str:
# type: (str, Dict) -> str
return self.env.from_string(source).render(context) return self.env.from_string(source).render(context)
class FileRenderer(BaseRenderer): class FileRenderer(BaseRenderer):
def __init__(self, search_path): def __init__(self, search_path: str) -> None:
# type: (str) -> None
loader = SphinxFileSystemLoader(search_path) loader = SphinxFileSystemLoader(search_path)
super().__init__(loader) super().__init__(loader)
@classmethod @classmethod
def render_from_file(cls, filename, context): def render_from_file(cls, filename: str, context: Dict) -> str:
# type: (str, Dict) -> str
dirname = os.path.dirname(filename) dirname = os.path.dirname(filename)
basename = os.path.basename(filename) basename = os.path.basename(filename)
return cls(dirname).render(basename, context) return cls(dirname).render(basename, context)
class SphinxRenderer(FileRenderer): class SphinxRenderer(FileRenderer):
def __init__(self, template_path=None): def __init__(self, template_path: str = None) -> None:
# type: (str) -> None
if template_path is None: if template_path is None:
template_path = os.path.join(package_dir, 'templates') template_path = os.path.join(package_dir, 'templates')
super().__init__(template_path) super().__init__(template_path)
@classmethod @classmethod
def render_from_file(cls, filename, context): def render_from_file(cls, filename: str, context: Dict) -> str:
# type: (str, Dict) -> str
return FileRenderer.render_from_file(filename, context) return FileRenderer.render_from_file(filename, context)
class LaTeXRenderer(SphinxRenderer): class LaTeXRenderer(SphinxRenderer):
def __init__(self, template_path=None): def __init__(self, template_path: str = None) -> None:
# type: (str) -> None
if template_path is None: if template_path is None:
template_path = os.path.join(package_dir, 'templates', 'latex') template_path = os.path.join(package_dir, 'templates', 'latex')
super().__init__(template_path) super().__init__(template_path)
@ -87,8 +76,7 @@ class LaTeXRenderer(SphinxRenderer):
class ReSTRenderer(SphinxRenderer): class ReSTRenderer(SphinxRenderer):
def __init__(self, template_path=None, language=None): def __init__(self, template_path: str = None, language: str = None) -> None:
# type: (str, str) -> None
super().__init__(template_path) super().__init__(template_path)
# add language to environment # add language to environment