From 70600e99818ab517585035f46d9146133a68f91a Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA Date: Sun, 6 Nov 2016 22:14:55 +0900 Subject: [PATCH 001/190] Bump version --- CHANGES | 12 ++++++++++++ sphinx/__init__.py | 6 +++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/CHANGES b/CHANGES index 2b6e3d588..29f9eb64c 100644 --- a/CHANGES +++ b/CHANGES @@ -1,3 +1,15 @@ +Release 1.6 (in development) +============================ + +Incompatible changes +-------------------- + +Features added +-------------- + +Bugs fixed +---------- + Release 1.5 beta1 (released Nov 6, 2016) ======================================== diff --git a/sphinx/__init__.py b/sphinx/__init__.py index 3647f5ebb..368f9d8fe 100644 --- a/sphinx/__init__.py +++ b/sphinx/__init__.py @@ -15,13 +15,13 @@ import sys from os import path -__version__ = '1.5b1' -__released__ = '1.5b1' # used when Sphinx builds its own docs +__version__ = '1.6' +__released__ = '1.6+' # used when Sphinx builds its own docs # version info for better programmatic use # possible values for 3rd element: 'alpha', 'beta', 'rc', 'final' # 'final' has 0 as the last element -version_info = (1, 5, 0, 'beta', 1) +version_info = (1, 6, 0, 'beta', 1) package_dir = path.abspath(path.dirname(__file__)) From 22545a092736fa4c1679c547f3e0c25ed473fdb4 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA Date: Sat, 12 Nov 2016 23:05:59 +0900 Subject: [PATCH 002/190] #3136: Add ``:name:`` option to the directives in ``sphinx.ext.graphviz`` --- CHANGES | 2 ++ doc/ext/graphviz.rst | 3 +++ sphinx/ext/graphviz.py | 4 ++++ 3 files changed, 9 insertions(+) diff --git a/CHANGES b/CHANGES index 29f9eb64c..1c1eb8f84 100644 --- a/CHANGES +++ b/CHANGES @@ -7,6 +7,8 @@ Incompatible changes Features added -------------- +* #3136: Add ``:name:`` option to the directives in ``sphinx.ext.graphviz`` + Bugs fixed ---------- diff --git a/doc/ext/graphviz.rst b/doc/ext/graphviz.rst index 0994c932a..555df7c28 100644 --- a/doc/ext/graphviz.rst +++ b/doc/ext/graphviz.rst @@ -100,6 +100,9 @@ It adds these directives: All three directives support a ``align`` option to align the graph horizontal. The values "left", "center", "right" are allowed. +.. versionadded:: 1.6 + All three directives support a ``name`` option to set the label to graph. + There are also these new config values: .. confval:: graphviz_dot diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py index 5e76eb8ba..47c8dcfff 100644 --- a/sphinx/ext/graphviz.py +++ b/sphinx/ext/graphviz.py @@ -75,6 +75,7 @@ class Graphviz(Directive): 'inline': directives.flag, 'caption': directives.unchanged, 'graphviz_dot': directives.unchanged, + 'name': directives.unchanged, } def run(self): @@ -117,6 +118,7 @@ class Graphviz(Directive): if caption: node = figure_wrapper(self, node, caption) + self.add_name(node) return [node] @@ -134,6 +136,7 @@ class GraphvizSimple(Directive): 'inline': directives.flag, 'caption': directives.unchanged, 'graphviz_dot': directives.unchanged, + 'name': directives.unchanged, } def run(self): @@ -154,6 +157,7 @@ class GraphvizSimple(Directive): if caption: node = figure_wrapper(self, node, caption) + self.add_name(node) return [node] From db732ac0b839a028a868a180550bb4f55d6e9b4b Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA Date: Mon, 7 Nov 2016 13:15:18 +0900 Subject: [PATCH 003/190] Prepare to type-check using mypy --- .gitignore | 1 + Makefile | 7 +++++-- mypy.ini | 6 ++++++ setup.py | 1 + test-reqs.txt | 1 + tox.ini | 4 ++++ utils/check_sources.py | 3 +++ 7 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 mypy.ini diff --git a/.gitignore b/.gitignore index be28908ec..86a8baf9d 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ *.swp .dir-locals.el +.mypy_cache/ .ropeproject/ TAGS .tags diff --git a/Makefile b/Makefile index 01e3a7837..86226f3b5 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ PYTHON ?= python -.PHONY: all style-check clean clean-pyc clean-patchfiles clean-backupfiles \ +.PHONY: all style-check type-check clean clean-pyc clean-patchfiles clean-backupfiles \ clean-generated pylint reindent test covertest build DONT_CHECK = -i build -i dist -i sphinx/style/jquery.js \ @@ -30,11 +30,14 @@ DONT_CHECK = -i build -i dist -i sphinx/style/jquery.js \ -i sphinx/search/tr.py \ -i .tox -all: clean-pyc clean-backupfiles style-check test +all: clean-pyc clean-backupfiles style-check type-check test style-check: @$(PYTHON) utils/check_sources.py $(DONT_CHECK) . +type-check: + mypy sphinx/ + clean: clean-pyc clean-pycache clean-patchfiles clean-backupfiles clean-generated clean-testfiles clean-buildfiles clean-pyc: diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 000000000..17ded7ab8 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,6 @@ +[mypy] +python_version = 2.7 +silent_imports = True +fast_parser = True +incremental = True +check_untyped_defs = True diff --git a/setup.py b/setup.py index 37c10b345..e23c4fb59 100644 --- a/setup.py +++ b/setup.py @@ -51,6 +51,7 @@ requires = [ 'alabaster>=0.7,<0.8', 'imagesize', 'requests', + 'typing', ] extras_require = { # Environment Marker works for wheel 0.24 or later diff --git a/test-reqs.txt b/test-reqs.txt index b53adbfe5..13cb3a9ff 100644 --- a/test-reqs.txt +++ b/test-reqs.txt @@ -16,3 +16,4 @@ imagesize requests html5lib enum34 +typing diff --git a/tox.ini b/tox.ini index ca3cac99b..957fbcc38 100644 --- a/tox.ini +++ b/tox.ini @@ -47,6 +47,10 @@ deps= {[testenv]deps} [testenv:py35] +deps= + mypy-lang + typed_ast + {[testenv]deps} commands= {envpython} tests/run.py -m '^[tT]est' {posargs} sphinx-build -q -W -b html -d {envtmpdir}/doctrees doc {envtmpdir}/html diff --git a/utils/check_sources.py b/utils/check_sources.py index 18d444057..d4a5ab491 100755 --- a/utils/check_sources.py +++ b/utils/check_sources.py @@ -46,6 +46,7 @@ copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' % (name_mail_re, name_mail_re)) not_ix_re = re.compile(r'\bnot\s+\S+?\s+i[sn]\s\S+') is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b') +noqa_re = re.compile(r'#\s+NOQA\s*$', re.I) misspellings = ["developement", "adress", # ALLOW-MISSPELLING "verificate", "informations"] # ALLOW-MISSPELLING @@ -81,6 +82,8 @@ def check_syntax(fn, lines): @checker('.py') def check_style(fn, lines): for lno, line in enumerate(lines): + if noqa_re.search(line): + continue if len(line.rstrip('\n')) > 95: yield lno+1, "line too long" if line.strip().startswith('#'): From 8cfb281b05653a32f480799cb39d4c7532d27f05 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA Date: Wed, 9 Nov 2016 11:45:12 +0900 Subject: [PATCH 004/190] Add type-check annotations to sphinx.util --- sphinx/util/__init__.py | 42 +++++++++++++++++++++++++++++----------- sphinx/util/console.py | 10 +++++++++- sphinx/util/docfields.py | 23 ++++++++++++++++++++-- sphinx/util/docutils.py | 16 +++++++++++++-- sphinx/util/i18n.py | 30 +++++++++++++++++++++------- sphinx/util/inspect.py | 20 ++++++++++++++----- sphinx/util/jsdump.py | 19 ++++++++++++++---- sphinx/util/matching.py | 15 ++++++++++++-- sphinx/util/nodes.py | 28 ++++++++++++++++++++++++++- sphinx/util/osutil.py | 30 ++++++++++++++++++++++++---- sphinx/util/parallel.py | 26 ++++++++++++++++++------- sphinx/util/pycompat.py | 17 ++++++++++++---- 12 files changed, 226 insertions(+), 50 deletions(-) diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index 7ac5c62f7..f24ffb681 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -42,19 +42,25 @@ from sphinx.util.nodes import ( # noqa caption_ref_re) from sphinx.util.matching import patfilter # noqa +if False: + # For type annotation + from typing import Any, Callable, Iterable, Pattern, Sequence, Tuple # NOQA + # Generally useful regular expressions. -ws_re = re.compile(r'\s+') -url_re = re.compile(r'(?P.+)://.*') +ws_re = re.compile(r'\s+') # type: Pattern +url_re = re.compile(r'(?P.+)://.*') # type: Pattern # High-level utility functions. def docname_join(basedocname, docname): + # type: (unicode, unicode) -> unicode return posixpath.normpath( posixpath.join('/' + basedocname, '..', docname))[1:] def path_stabilize(filepath): + # type: (unicode) -> unicode "normalize path separater and unicode string" newpath = filepath.replace(os.path.sep, SEP) if isinstance(newpath, text_type): @@ -63,6 +69,7 @@ def path_stabilize(filepath): def get_matching_files(dirname, exclude_matchers=()): + # type: (unicode, Tuple[Callable[[unicode], bool], ...]) -> Iterable[unicode] """Get all file names in a directory, recursively. Exclude files and dirs matching some matcher in *exclude_matchers*. @@ -75,9 +82,9 @@ def get_matching_files(dirname, exclude_matchers=()): relativeroot = root[dirlen:] qdirs = enumerate(path_stabilize(path.join(relativeroot, dn)) - for dn in dirs) + for dn in dirs) # type: Iterable[Tuple[int, unicode]] qfiles = enumerate(path_stabilize(path.join(relativeroot, fn)) - for fn in files) + for fn in files) # type: Iterable[Tuple[int, unicode]] for matcher in exclude_matchers: qdirs = [entry for entry in qdirs if not matcher(entry[1])] qfiles = [entry for entry in qfiles if not matcher(entry[1])] @@ -89,6 +96,7 @@ def get_matching_files(dirname, exclude_matchers=()): def get_matching_docs(dirname, suffixes, exclude_matchers=()): + # type: (unicode, List[unicode], Tuple[Callable[[unicode], bool], ...]) -> Iterable[unicode] # NOQA """Get all file names (without suffixes) matching a suffix in a directory, recursively. @@ -97,7 +105,7 @@ def get_matching_docs(dirname, suffixes, exclude_matchers=()): suffixpatterns = ['*' + s for s in suffixes] for filename in get_matching_files(dirname, exclude_matchers): for suffixpattern in suffixpatterns: - if fnmatch.fnmatch(filename, suffixpattern): + if fnmatch.fnmatch(filename, suffixpattern): # type: ignore yield filename[:-len(suffixpattern)+1] break @@ -109,9 +117,10 @@ class FilenameUniqDict(dict): appear in. Used for images and downloadable files in the environment. """ def __init__(self): - self._existing = set() + self._existing = set() # type: Set[unicode] def add_file(self, docname, newfile): + # type: (unicode, unicode) -> unicode if newfile in self: self[newfile][0].add(docname) return self[newfile][1] @@ -126,6 +135,7 @@ class FilenameUniqDict(dict): return uniquename def purge_doc(self, docname): + # type: (unicode) -> None for filename, (docs, unique) in list(self.items()): docs.discard(docname) if not docs: @@ -133,6 +143,7 @@ class FilenameUniqDict(dict): self._existing.discard(unique) def merge_other(self, docnames, other): + # type: (List[unicode], Dict[unicode, Tuple[Set[unicode], Any]]) -> None for filename, (docs, unique) in other.items(): for doc in docs & docnames: self.add_file(doc, filename) @@ -146,6 +157,7 @@ class FilenameUniqDict(dict): def copy_static_entry(source, targetdir, builder, context={}, exclude_matchers=(), level=0): + # type: (unicode, unicode, Any, Dict, Tuple[Callable, ...], int) -> None """[DEPRECATED] Copy a HTML builder static_path entry from source to targetdir. Handles all possible cases of files, directories and subdirectories. @@ -183,6 +195,7 @@ _DEBUG_HEADER = '''\ def save_traceback(app): + # type: (Any) -> unicode """Save the current exception's traceback in a temporary file.""" import sphinx import jinja2 @@ -190,7 +203,7 @@ def save_traceback(app): import platform exc = sys.exc_info()[1] if isinstance(exc, SphinxParallelError): - exc_format = '(Error in parallel process)\n' + exc.traceback + exc_format = '(Error in parallel process)\n' + exc.traceback # type: ignore else: exc_format = traceback.format_exc() fd, path = tempfile.mkstemp('.log', 'sphinx-err-') @@ -220,6 +233,7 @@ def save_traceback(app): def get_module_source(modname): + # type: (str) -> Tuple[unicode, unicode] """Try to find the source code for a module. Can return ('file', 'filename') in which case the source is in the given @@ -259,6 +273,7 @@ def get_module_source(modname): def get_full_modname(modname, attribute): + # type: (str, unicode) -> unicode __import__(modname) module = sys.modules[modname] @@ -277,6 +292,7 @@ _coding_re = re.compile(r'coding[:=]\s*([-\w.]+)') def detect_encoding(readline): + # type: (Callable) -> unicode """Like tokenize.detect_encoding() from Py3k, but a bit simplified.""" def read_or_stop(): @@ -433,10 +449,11 @@ def split_index_msg(type, value): def format_exception_cut_frames(x=1): + # type: (int) -> unicode """Format an exception with traceback, but only the last x frames.""" typ, val, tb = sys.exc_info() # res = ['Traceback (most recent call last):\n'] - res = [] + res = [] # type: List[unicode] tbres = traceback.format_tb(tb) res += tbres[-x:] res += traceback.format_exception_only(typ, val) @@ -449,7 +466,7 @@ class PeekableIterator(object): what's the next item. """ def __init__(self, iterable): - self.remaining = deque() + self.remaining = deque() # type: deque self._iterator = iter(iterable) def __iter__(self): @@ -477,6 +494,7 @@ class PeekableIterator(object): def import_object(objname, source=None): + # type: (str, unicode) -> Any try: module, name = objname.rsplit('.', 1) except ValueError as err: @@ -496,7 +514,8 @@ def import_object(objname, source=None): def encode_uri(uri): - split = list(urlsplit(uri)) + # type: (unicode) -> unicode + split = list(urlsplit(uri)) # type: Any split[1] = split[1].encode('idna').decode('ascii') split[2] = quote_plus(split[2].encode('utf-8'), '/').decode('ascii') query = list((q, quote_plus(v.encode('utf-8'))) @@ -506,8 +525,9 @@ def encode_uri(uri): def split_docinfo(text): + # type: (unicode) -> Sequence[unicode] docinfo_re = re.compile('\A((?:\s*:\w+:.*?\n)+)', re.M) - result = docinfo_re.split(text, 1) + result = docinfo_re.split(text, 1) # type: ignore if len(result) == 1: return '', result[0] else: diff --git a/sphinx/util/console.py b/sphinx/util/console.py index 593634b11..b952d7183 100644 --- a/sphinx/util/console.py +++ b/sphinx/util/console.py @@ -20,10 +20,11 @@ except ImportError: colorama = None _ansi_re = re.compile('\x1b\\[(\\d\\d;){0,2}\\d\\dm') -codes = {} +codes = {} # type: Dict[str, str] def get_terminal_width(): + # type: () -> int """Borrowed from the py lib.""" try: import termios @@ -43,6 +44,7 @@ _tw = get_terminal_width() def term_width_line(text): + # type: (str) -> str if not codes: # if no coloring, don't output fancy backspaces return text + '\n' @@ -52,6 +54,7 @@ def term_width_line(text): def color_terminal(): + # type: () -> bool if sys.platform == 'win32' and colorama is not None: colorama.init() return True @@ -68,24 +71,29 @@ def color_terminal(): def nocolor(): + # type: () -> None if sys.platform == 'win32' and colorama is not None: colorama.deinit() codes.clear() def coloron(): + # type: () -> None codes.update(_orig_codes) def colorize(name, text): + # type: (str, str) -> str return codes.get(name, '') + text + codes.get('reset', '') def strip_colors(s): + # type: (str) -> str return re.compile('\x1b.*?m').sub('', s) def create_color_func(name): + # type: (str) -> None def inner(text): return colorize(name, text) globals()[name] = inner diff --git a/sphinx/util/docfields.py b/sphinx/util/docfields.py index d5cb4038f..6bf38ebed 100644 --- a/sphinx/util/docfields.py +++ b/sphinx/util/docfields.py @@ -15,8 +15,14 @@ from docutils import nodes from sphinx import addnodes +if False: + # For type annotation + from typing import Any, Tuple # NOQA + from sphinx.domains import Domain # NOQA + def _is_single_paragraph(node): + # type: (nodes.Node) -> bool """True if the node only contains one paragraph (and system messages).""" if len(node) == 0: return False @@ -47,6 +53,7 @@ class Field(object): def __init__(self, name, names=(), label=None, has_arg=True, rolename=None, bodyrolename=None): + # type: (unicode, Tuple[unicode, ...], unicode, bool, unicode, unicode) -> None self.name = name self.names = names self.label = label @@ -56,6 +63,7 @@ class Field(object): def make_xref(self, rolename, domain, target, innernode=addnodes.literal_emphasis, contnode=None): + # type: (unicode, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node if not rolename: return contnode or innernode(target, target) refnode = addnodes.pending_xref('', refdomain=domain, refexplicit=False, @@ -65,12 +73,15 @@ class Field(object): def make_xrefs(self, rolename, domain, target, innernode=addnodes.literal_emphasis, contnode=None): + # type: (unicode, unicode, unicode, nodes.Node, nodes.Node) -> List[nodes.Node] return [self.make_xref(rolename, domain, target, innernode, contnode)] def make_entry(self, fieldarg, content): + # type: (List, unicode) -> Tuple[List, unicode] return (fieldarg, content) def make_field(self, types, domain, item): + # type: (List, unicode, Tuple) -> nodes.field fieldarg, content = item fieldname = nodes.field_name('', self.label) if fieldarg: @@ -106,10 +117,12 @@ class GroupedField(Field): def __init__(self, name, names=(), label=None, rolename=None, can_collapse=False): + # type: (unicode, Tuple[unicode, ...], unicode, unicode, bool) -> None Field.__init__(self, name, names, label, True, rolename) self.can_collapse = can_collapse def make_field(self, types, domain, items): + # type: (List, unicode, Tuple) -> nodes.field fieldname = nodes.field_name('', self.label) listnode = self.list_type() for fieldarg, content in items: @@ -151,11 +164,13 @@ class TypedField(GroupedField): def __init__(self, name, names=(), typenames=(), label=None, rolename=None, typerolename=None, can_collapse=False): + # type: (unicode, Tuple[unicode, ...], Tuple[unicode, ...], unicode, unicode, unicode, bool) -> None # NOQA GroupedField.__init__(self, name, names, label, rolename, can_collapse) self.typenames = typenames self.typerolename = typerolename def make_field(self, types, domain, items): + # type: (List, unicode, Tuple) -> nodes.field def handle_item(fieldarg, content): par = nodes.paragraph() par.extend(self.make_xrefs(self.rolename, domain, fieldarg, @@ -196,6 +211,7 @@ class DocFieldTransformer(object): """ def __init__(self, directive): + # type: (Any) -> None self.domain = directive.domain if '_doc_field_type_map' not in directive.__class__.__dict__: directive.__class__._doc_field_type_map = \ @@ -203,6 +219,7 @@ class DocFieldTransformer(object): self.typemap = directive._doc_field_type_map def preprocess_fieldtypes(self, types): + # type: (List) -> Dict[unicode, Tuple[Any, bool]] typemap = {} for fieldtype in types: for name in fieldtype.names: @@ -213,6 +230,7 @@ class DocFieldTransformer(object): return typemap def transform_all(self, node): + # type: (nodes.Node) -> None """Transform all field list children of a node.""" # don't traverse, only handle field lists that are immediate children for child in node: @@ -220,12 +238,13 @@ class DocFieldTransformer(object): self.transform(child) def transform(self, node): + # type: (nodes.Node) -> None """Transform a single field list *node*.""" typemap = self.typemap entries = [] - groupindices = {} - types = {} + groupindices = {} # type: Dict[unicode, int] + types = {} # type: Dict[unicode, Dict] # step 1: traverse all fields and collect field types and content for field in node: diff --git a/sphinx/util/docutils.py b/sphinx/util/docutils.py index be9e2edad..a18d0b560 100644 --- a/sphinx/util/docutils.py +++ b/sphinx/util/docutils.py @@ -12,11 +12,19 @@ from __future__ import absolute_import from copy import copy from contextlib import contextmanager + from docutils.parsers.rst import directives, roles +if False: + # For type annotation + from typing import Any, Callable, Iterator, Tuple # NOQA + from docutils import nodes # NOQA + from sphinx.environment import BuildEnvironment # NOQA + @contextmanager def docutils_namespace(): + # type: () -> Iterator[None] """Create namespace for reST parsers.""" try: _directives = copy(directives._directives) @@ -37,9 +45,10 @@ class sphinx_domains(object): markup takes precedence. """ def __init__(self, env): + # type: (BuildEnvironment) -> None self.env = env - self.directive_func = None - self.roles_func = None + self.directive_func = None # type: Callable + self.roles_func = None # type: Callable def __enter__(self): self.enable() @@ -59,6 +68,7 @@ class sphinx_domains(object): roles.role = self.role_func def lookup_domain_element(self, type, name): + # type: (unicode, unicode) -> Tuple[Any, List] """Lookup a markup element (directive or role), given its name which can be a full name (with domain). """ @@ -87,12 +97,14 @@ class sphinx_domains(object): raise ElementLookupError def lookup_directive(self, name, lang_module, document): + # type: (unicode, unicode, nodes.document) -> Tuple[Any, List] try: return self.lookup_domain_element('directive', name) except ElementLookupError: return self.directive_func(name, lang_module, document) def lookup_role(self, name, lang_module, lineno, reporter): + # type: (unicode, unicode, int, Any) -> Tuple[Any, List] try: return self.lookup_domain_element('role', name) except ElementLookupError: diff --git a/sphinx/util/i18n.py b/sphinx/util/i18n.py index 112353d47..efbbb75f7 100644 --- a/sphinx/util/i18n.py +++ b/sphinx/util/i18n.py @@ -22,9 +22,12 @@ from babel.messages.pofile import read_po from babel.messages.mofile import write_mo from sphinx.errors import SphinxError -from sphinx.util.osutil import walk -from sphinx.util import SEP +from sphinx.util.osutil import SEP, walk +if False: + # For type annotation + from typing import Callable # NOQA + from sphinx.environment import BuildEnvironment # NOQA LocaleFileInfoBase = namedtuple('CatalogInfo', 'base_dir,domain,charset') @@ -33,32 +36,39 @@ class CatalogInfo(LocaleFileInfoBase): @property def po_file(self): + # type: () -> unicode return self.domain + '.po' @property def mo_file(self): + # type: () -> unicode return self.domain + '.mo' @property def po_path(self): + # type: () -> unicode return path.join(self.base_dir, self.po_file) @property def mo_path(self): + # type: () -> unicode return path.join(self.base_dir, self.mo_file) def is_outdated(self): + # type: () -> bool return ( not path.exists(self.mo_path) or path.getmtime(self.mo_path) < path.getmtime(self.po_path)) def write_mo(self, locale): + # type: (unicode) -> None with io.open(self.po_path, 'rt', encoding=self.charset) as po: with io.open(self.mo_path, 'wb') as mo: write_mo(mo, read_po(po, locale)) def find_catalog(docname, compaction): + # type: (unicode, bool) -> unicode if compaction: ret = docname.split(SEP, 1)[0] else: @@ -68,18 +78,20 @@ def find_catalog(docname, compaction): def find_catalog_files(docname, srcdir, locale_dirs, lang, compaction): + # type: (unicode, unicode, List[unicode], unicode, bool) -> List[unicode] if not(lang and locale_dirs): return [] domain = find_catalog(docname, compaction) - files = [gettext.find(domain, path.join(srcdir, dir_), [lang]) - for dir_ in locale_dirs] - files = [path.relpath(f, srcdir) for f in files if f] - return files + files = [gettext.find(domain, path.join(srcdir, dir_), [lang]) # type: ignore + for dir_ in locale_dirs] # type: ignore + files = [path.relpath(f, srcdir) for f in files if f] # type: ignore + return files # type: ignore def find_catalog_source_files(locale_dirs, locale, domains=None, gettext_compact=False, charset='utf-8', force_all=False): + # type: (List[unicode], unicode, List[unicode], bool, unicode, bool) -> Set[CatalogInfo] """ :param list locale_dirs: list of path as `['locale_dir1', 'locale_dir2', ...]` to find @@ -99,7 +111,7 @@ def find_catalog_source_files(locale_dirs, locale, domains=None, gettext_compact if not locale: return [] # locale is not specified - catalogs = set() + catalogs = set() # type: Set[CatalogInfo] for locale_dir in locale_dirs: if not locale_dir: continue # skip system locale directory @@ -158,6 +170,7 @@ date_format_mappings = { def babel_format_date(date, format, locale, warn=None, formatter=babel.dates.format_date): + # type: (datetime, unicode, unicode, Callable, Callable) -> unicode if locale is None: locale = 'en' @@ -180,6 +193,7 @@ def babel_format_date(date, format, locale, warn=None, formatter=babel.dates.for def format_date(format, date=None, language=None, warn=None): + # type: (str, datetime, unicode, Callable) -> unicode if format is None: format = 'medium' @@ -226,6 +240,7 @@ def format_date(format, date=None, language=None, warn=None): def get_image_filename_for_language(filename, env): + # type: (unicode, BuildEnvironment) -> unicode if not env.config.language: return filename @@ -245,6 +260,7 @@ def get_image_filename_for_language(filename, env): def search_image_for_language(filename, env): + # type: (unicode, BuildEnvironment) -> unicode if not env.config.language: return filename diff --git a/sphinx/util/inspect.py b/sphinx/util/inspect.py index 147d43592..4439e09f6 100644 --- a/sphinx/util/inspect.py +++ b/sphinx/util/inspect.py @@ -12,10 +12,14 @@ import re from six import PY3, binary_type -from six.moves import builtins +from six.moves import builtins # type: ignore from sphinx.util import force_decode +if False: + # For type annotation + from typing import Any, Callable, Tuple # NOQA + # this imports the standard library inspect module without resorting to # relatively import this module inspect = __import__('inspect') @@ -67,7 +71,7 @@ else: # 2.7 """Like inspect.getargspec but supports functools.partial as well.""" if inspect.ismethod(func): func = func.__func__ - parts = 0, () + parts = 0, () # type: Tuple[int, Tuple[unicode, ...]] if type(func) is partial: keywords = func.keywords if keywords is None: @@ -101,6 +105,7 @@ except ImportError: def isenumattribute(x): + # type: (Any) -> bool """Check if the object is attribute of enum.""" if enum is None: return False @@ -108,6 +113,7 @@ def isenumattribute(x): def isdescriptor(x): + # type: (Any) -> bool """Check if the object is some kind of descriptor.""" for item in '__get__', '__set__', '__delete__': if hasattr(safe_getattr(x, item, None), '__call__'): @@ -116,6 +122,7 @@ def isdescriptor(x): def safe_getattr(obj, name, *defargs): + # type: (Any, unicode, unicode) -> object """A getattr() that turns all exceptions into AttributeErrors.""" try: return getattr(obj, name, *defargs) @@ -138,8 +145,9 @@ def safe_getattr(obj, name, *defargs): def safe_getmembers(object, predicate=None, attr_getter=safe_getattr): + # type: (Any, Callable[[unicode], bool], Callable) -> List[Tuple[unicode, Any]] """A version of inspect.getmembers() that uses safe_getattr().""" - results = [] + results = [] # type: List[Tuple[unicode, Any]] for key in dir(object): try: value = attr_getter(object, key, None) @@ -152,6 +160,7 @@ def safe_getmembers(object, predicate=None, attr_getter=safe_getattr): def object_description(object): + # type: (Any) -> unicode """A repr() implementation that returns text safe to use in reST context.""" try: s = repr(object) @@ -166,6 +175,7 @@ def object_description(object): def is_builtin_class_method(obj, attr_name): + # type: (Any, unicode) -> bool """If attr_name is implemented at builtin class, return True. >>> is_builtin_class_method(int, '__init__') @@ -177,6 +187,6 @@ def is_builtin_class_method(obj, attr_name): classes = [c for c in inspect.getmro(obj) if attr_name in c.__dict__] cls = classes[0] if classes else object - if not hasattr(builtins, safe_getattr(cls, '__name__', '')): + if not hasattr(builtins, safe_getattr(cls, '__name__', '')): # type: ignore return False - return getattr(builtins, safe_getattr(cls, '__name__', '')) is cls + return getattr(builtins, safe_getattr(cls, '__name__', '')) is cls # type: ignore diff --git a/sphinx/util/jsdump.py b/sphinx/util/jsdump.py index 5a2148c5b..330b5c0ee 100644 --- a/sphinx/util/jsdump.py +++ b/sphinx/util/jsdump.py @@ -16,6 +16,10 @@ from six import iteritems, integer_types, string_types from sphinx.util.pycompat import u +if False: + # For type annotation + from typing import Any, IO, Union # NOQA + _str_re = re.compile(r'"(\\\\|\\"|[^"])*"') _int_re = re.compile(r'\d+') _name_re = re.compile(r'[a-zA-Z_]\w*') @@ -37,6 +41,7 @@ ESCAPED = re.compile(r'\\u.{4}|\\.') def encode_string(s): + # type: (str) -> str def replace(match): s = match.group(0) try: @@ -55,6 +60,7 @@ def encode_string(s): def decode_string(s): + # type: (str) -> str return ESCAPED.sub(lambda m: eval(u + '"' + m.group() + '"'), s) @@ -77,6 +83,7 @@ double in super""".split()) def dumps(obj, key=False): + # type: (Any, bool) -> str if key: if not isinstance(obj, string_types): obj = str(obj) @@ -88,7 +95,7 @@ def dumps(obj, key=False): return 'null' elif obj is True or obj is False: return obj and 'true' or 'false' - elif isinstance(obj, integer_types + (float,)): + elif isinstance(obj, integer_types + (float,)): # type: ignore return str(obj) elif isinstance(obj, dict): return '{%s}' % ','.join(sorted('%s:%s' % ( @@ -100,20 +107,22 @@ def dumps(obj, key=False): elif isinstance(obj, (tuple, list)): return '[%s]' % ','.join(dumps(x) for x in obj) elif isinstance(obj, string_types): - return encode_string(obj) + return encode_string(obj) # type: ignore raise TypeError(type(obj)) def dump(obj, f): + # type: (Any, IO) -> None f.write(dumps(obj)) def loads(x): + # type: (str) -> Any """Loader that can read the JS subset the indexer produces.""" nothing = object() i = 0 n = len(x) - stack = [] + stack = [] # type: List[Union[List, Dict]] obj = nothing key = False keys = [] @@ -164,6 +173,7 @@ def loads(x): raise ValueError("multiple values") key = False else: + y = None # type: Any m = _str_re.match(x, i) if m: y = decode_string(m.group()[1:-1]) @@ -193,11 +203,12 @@ def loads(x): obj[keys[-1]] = y key = False else: - obj.append(y) + obj.append(y) # type: ignore if obj is nothing: raise ValueError("nothing loaded from string") return obj def load(f): + # type: (IO) -> Any return loads(f.read()) diff --git a/sphinx/util/matching.py b/sphinx/util/matching.py index fc7750be9..be4bfee34 100644 --- a/sphinx/util/matching.py +++ b/sphinx/util/matching.py @@ -11,15 +11,20 @@ import re +if False: + # For type annotation + from typing import Callable, Match, Pattern # NOQA + def _translate_pattern(pat): + # type: (unicode) -> unicode """Translate a shell-style glob pattern to a regular expression. Adapted from the fnmatch module, but enhanced so that single stars don't match slashes. """ i, n = 0, len(pat) - res = '' + res = '' # type: unicode while i < n: c = pat[i] i += 1 @@ -59,6 +64,7 @@ def _translate_pattern(pat): def compile_matchers(patterns): + # type: (List[unicode]) -> List[Callable[[unicode], Match[unicode]]] return [re.compile(_translate_pattern(pat)).match for pat in patterns] @@ -70,23 +76,27 @@ class Matcher(object): """ def __init__(self, patterns): + # type: (List[unicode]) -> None expanded = [pat[3:] for pat in patterns if pat.startswith('**/')] self.patterns = compile_matchers(patterns + expanded) def __call__(self, string): + # type: (unicode) -> bool return self.match(string) def match(self, string): + # type: (unicode) -> bool return any(pat(string) for pat in self.patterns) DOTFILES = Matcher(['**/.*']) -_pat_cache = {} +_pat_cache = {} # type: Dict[unicode, Pattern] def patmatch(name, pat): + # type: (unicode, unicode) -> re.Match """Return if name matches pat. Adapted from fnmatch module.""" if pat not in _pat_cache: _pat_cache[pat] = re.compile(_translate_pattern(pat)) @@ -94,6 +104,7 @@ def patmatch(name, pat): def patfilter(names, pat): + # type: (List[unicode], unicode) -> List[unicode] """Return the subset of the list NAMES that match PAT. Adapted from fnmatch module. diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py index fe3b0f2f9..2568ea4aa 100644 --- a/sphinx/util/nodes.py +++ b/sphinx/util/nodes.py @@ -13,19 +13,28 @@ from __future__ import absolute_import import re from six import text_type + from docutils import nodes from sphinx import addnodes from sphinx.locale import pairindextypes +if False: + # For type annotation + from typing import Any, Callable, Iterable, Tuple, Union # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.utils.tags import Tags # NOQA + class WarningStream(object): def __init__(self, warnfunc): + # type: (Callable) -> None self.warnfunc = warnfunc self._re = re.compile(r'\((DEBUG|INFO|WARNING|ERROR|SEVERE)/[0-4]\)') def write(self, text): + # type: (str) -> None text = text.strip() if text: self.warnfunc(self._re.sub(r'\1:', text), None, '') @@ -37,6 +46,7 @@ caption_ref_re = explicit_title_re # b/w compat alias def apply_source_workaround(node): + # type: (nodes.Node) -> None # workaround: nodes.term have wrong rawsource if classifier is specified. # The behavior of docutils-0.11, 0.12 is: # * when ``term text : classifier1 : classifier2`` is specified, @@ -87,6 +97,7 @@ IGNORED_NODES = ( def is_pending_meta(node): + # type: (nodes.Node) -> bool if (isinstance(node, nodes.pending) and isinstance(node.details.get('nodes', [None])[0], addnodes.meta)): return True @@ -95,6 +106,7 @@ def is_pending_meta(node): def is_translatable(node): + # type: (nodes.Node) -> bool if isinstance(node, addnodes.translatable): return True @@ -137,6 +149,7 @@ META_TYPE_NODES = ( def extract_messages(doctree): + # type: (nodes.Node) -> Iterable[Tuple[nodes.Node, unicode]] """Extract translatable messages from a document tree.""" for node in doctree.traverse(is_translatable): if isinstance(node, addnodes.translatable): @@ -164,12 +177,14 @@ def extract_messages(doctree): def find_source_node(node): + # type: (nodes.Node) -> unicode for pnode in traverse_parent(node): if pnode.source: return pnode.source def traverse_parent(node, cls=None): + # type: (nodes.Node, Any) -> Iterable[nodes.Node] while node: if cls is None or isinstance(node, cls): yield node @@ -177,6 +192,7 @@ def traverse_parent(node, cls=None): def traverse_translatable_index(doctree): + # type: (nodes.Node) -> Iterable[Tuple[nodes.Node, List[unicode]]] """Traverse translatable index node from a document tree.""" def is_block_index(node): return isinstance(node, addnodes.index) and \ @@ -190,6 +206,7 @@ def traverse_translatable_index(doctree): def nested_parse_with_titles(state, content, node): + # type: (Any, List[unicode], nodes.Node) -> unicode """Version of state.nested_parse() that allows titles and does not require titles to have the same decoration as the calling document. @@ -209,6 +226,7 @@ def nested_parse_with_titles(state, content, node): def clean_astext(node): + # type: (nodes.Node) -> unicode """Like node.astext(), but ignore images.""" node = node.deepcopy() for img in node.traverse(nodes.image): @@ -217,6 +235,7 @@ def clean_astext(node): def split_explicit_title(text): + # type: (str) -> Tuple[bool, unicode, unicode] """Split role content into title and target, if given.""" match = explicit_title_re.match(text) if match: @@ -230,7 +249,8 @@ indextypes = [ def process_index_entry(entry, targetid): - indexentries = [] + # type: (unicode, unicode) -> List[Tuple[unicode, unicode, unicode, unicode, unicode]] + indexentries = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode]] entry = entry.strip() oentry = entry main = '' @@ -266,6 +286,7 @@ def process_index_entry(entry, targetid): def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc, traversed): + # type: (Builder, Set[unicode], unicode, nodes.Node, Callable, nodes.Node) -> nodes.Node """Inline all toctrees in the *tree*. Record all docnames in *docnameset*, and output docnames with *colorfunc*. @@ -299,6 +320,7 @@ def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc, traversed def make_refnode(builder, fromdocname, todocname, targetid, child, title=None): + # type: (Builder, unicode, unicode, unicode, nodes.Node, unicode) -> nodes.reference """Shortcut to create a reference node.""" node = nodes.reference('', '', internal=True) if fromdocname == todocname: @@ -313,15 +335,18 @@ def make_refnode(builder, fromdocname, todocname, targetid, child, title=None): def set_source_info(directive, node): + # type: (Any, nodes.Node) -> None node.source, node.line = \ directive.state_machine.get_source_and_line(directive.lineno) def set_role_source_info(inliner, lineno, node): + # type: (Any, unicode, nodes.Node) -> None node.source, node.line = inliner.reporter.get_source_and_line(lineno) def process_only_nodes(doctree, tags, warn_node=None): + # type: (nodes.Node, Tags, Callable) -> None # A comment on the comment() nodes being inserted: replacing by [] would # result in a "Losing ids" exception if there is a target node before # the only node, so we make sure docutils can transfer the id to @@ -345,6 +370,7 @@ def process_only_nodes(doctree, tags, warn_node=None): # monkey-patch Element.copy to copy the rawsource and line def _new_copy(self): + # type: (nodes.Node) -> nodes.Node newnode = self.__class__(self.rawsource, **self.attributes) if isinstance(self, nodes.Element): newnode.source = self.source diff --git a/sphinx/util/osutil.py b/sphinx/util/osutil.py index b8fffb220..5561f0ddb 100644 --- a/sphinx/util/osutil.py +++ b/sphinx/util/osutil.py @@ -21,9 +21,12 @@ import filecmp from os import path import contextlib from io import BytesIO, StringIO - from six import PY2, text_type +if False: + # For type annotation + from typing import Any, Iterator, Tuple, Union # NOQA + # Errnos that we need. EEXIST = getattr(errno, 'EEXIST', 0) ENOENT = getattr(errno, 'ENOENT', 0) @@ -39,15 +42,18 @@ SEP = "/" def os_path(canonicalpath): + # type: (unicode) -> unicode return canonicalpath.replace(SEP, path.sep) def canon_path(nativepath): + # type: (unicode) -> unicode """Return path in OS-independent form""" return nativepath.replace(path.sep, SEP) def relative_uri(base, to): + # type: (unicode, unicode) -> unicode """Return a relative URL from ``base`` to ``to``.""" if to.startswith(SEP): return to @@ -71,6 +77,7 @@ def relative_uri(base, to): def ensuredir(path): + # type: (unicode) -> None """Ensure that a path exists.""" try: os.makedirs(path) @@ -84,6 +91,7 @@ def ensuredir(path): # that check UnicodeError. # The customization obstacle to replace the function with the os.walk. def walk(top, topdown=True, followlinks=False): + # type: (unicode, bool, bool) -> Iterator[Tuple[unicode, List[unicode], List[unicode]]] """Backport of os.walk from 2.6, where the *followlinks* argument was added. """ @@ -115,6 +123,7 @@ def walk(top, topdown=True, followlinks=False): def mtimes_of_files(dirnames, suffix): + # type: (List[unicode], unicode) -> Iterator[float] for dirname in dirnames: for root, dirs, files in os.walk(dirname): for sfile in files: @@ -126,6 +135,7 @@ def mtimes_of_files(dirnames, suffix): def movefile(source, dest): + # type: (unicode, unicode) -> None """Move a file, removing the destination if it exists.""" if os.path.exists(dest): try: @@ -136,6 +146,7 @@ def movefile(source, dest): def copytimes(source, dest): + # type: (unicode, unicode) -> None """Copy a file's modification times.""" st = os.stat(source) if hasattr(os, 'utime'): @@ -143,6 +154,7 @@ def copytimes(source, dest): def copyfile(source, dest): + # type: (unicode, unicode) -> None """Copy a file and its modification times, if possible. Note: ``copyfile`` skips copying if the file has not been changed""" @@ -159,10 +171,12 @@ no_fn_re = re.compile(r'[^a-zA-Z0-9_-]') def make_filename(string): + # type: (str) -> unicode return no_fn_re.sub('', string) or 'sphinx' def ustrftime(format, *args): + # type: (unicode, Any) -> unicode # [DEPRECATED] strftime for unicode strings # It will be removed at Sphinx-1.5 if not args: @@ -171,7 +185,7 @@ def ustrftime(format, *args): source_date_epoch = os.getenv('SOURCE_DATE_EPOCH') if source_date_epoch is not None: time_struct = time.gmtime(float(source_date_epoch)) - args = [time_struct] + args = [time_struct] # type: ignore if PY2: # if a locale is set, the time strings are encoded in the encoding # given by LC_TIME; if that is available, use it @@ -188,16 +202,18 @@ def ustrftime(format, *args): def safe_relpath(path, start=None): + # type: (unicode, unicode) -> unicode try: return os.path.relpath(path, start) except ValueError: return path -fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() +fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() # type: unicode def abspath(pathdir): + # type: (unicode) -> unicode pathdir = path.abspath(pathdir) if isinstance(pathdir, bytes): pathdir = pathdir.decode(fs_encoding) @@ -205,6 +221,7 @@ def abspath(pathdir): def getcwd(): + # type: () -> unicode if hasattr(os, 'getcwdu'): return os.getcwdu() return os.getcwd() @@ -212,6 +229,7 @@ def getcwd(): @contextlib.contextmanager def cd(target_dir): + # type: (unicode) -> Iterator[None] cwd = getcwd() try: os.chdir(target_dir) @@ -233,10 +251,12 @@ class FileAvoidWrite(object): Objects can be used as context managers. """ def __init__(self, path): + # type: (unicode) -> None self._path = path - self._io = None + self._io = None # type: Union[StringIO, BytesIO] def write(self, data): + # type: (Union[str, bytes]) -> None if not self._io: if isinstance(data, text_type): self._io = StringIO() @@ -246,6 +266,7 @@ class FileAvoidWrite(object): self._io.write(data) def close(self): + # type: () -> None """Stop accepting writes and write file, if needed.""" if not self._io: raise Exception('FileAvoidWrite does not support empty files.') @@ -288,6 +309,7 @@ class FileAvoidWrite(object): def rmtree(path): + # type: (unicode) -> None if os.path.isdir(path): shutil.rmtree(path) else: diff --git a/sphinx/util/parallel.py b/sphinx/util/parallel.py index bace0b5fd..814af09b1 100644 --- a/sphinx/util/parallel.py +++ b/sphinx/util/parallel.py @@ -13,16 +13,19 @@ import os import time import traceback from math import sqrt +from six import iteritems try: import multiprocessing except ImportError: multiprocessing = None -from six import iteritems - from sphinx.errors import SphinxParallelError +if False: + # For type annotation + from typing import Any, Callable, Sequence # NOQA + # our parallel functionality only works for the forking Process parallel_available = multiprocessing and (os.name == 'posix') @@ -31,9 +34,11 @@ class SerialTasks(object): """Has the same interface as ParallelTasks, but executes tasks directly.""" def __init__(self, nproc=1): + # type: (int) -> None pass def add_task(self, task_func, arg=None, result_func=None): + # type: (Callable, Any, Callable) -> None if arg is not None: res = task_func(arg) else: @@ -42,6 +47,7 @@ class SerialTasks(object): result_func(res) def join(self): + # type: () -> None pass @@ -49,23 +55,25 @@ class ParallelTasks(object): """Executes *nproc* tasks in parallel after forking.""" def __init__(self, nproc): + # type: (int) -> None self.nproc = nproc # (optional) function performed by each task on the result of main task - self._result_funcs = {} + self._result_funcs = {} # type: Dict[int, Callable] # task arguments - self._args = {} + self._args = {} # type: Dict[int, List[Any]] # list of subprocesses (both started and waiting) - self._procs = {} + self._procs = {} # type: Dict[int, multiprocessing.Process] # list of receiving pipe connections of running subprocesses - self._precvs = {} + self._precvs = {} # type: Dict[int, Any] # list of receiving pipe connections of waiting subprocesses - self._precvsWaiting = {} + self._precvsWaiting = {} # type: Dict[int, Any] # number of working subprocesses self._pworking = 0 # task number of each subprocess self._taskid = 0 def _process(self, pipe, func, arg): + # type: (Any, Callable, Any) -> None try: if arg is None: ret = func() @@ -76,6 +84,7 @@ class ParallelTasks(object): pipe.send((True, (err, traceback.format_exc()))) def add_task(self, task_func, arg=None, result_func=None): + # type: (Callable, Any, Callable) -> None tid = self._taskid self._taskid += 1 self._result_funcs[tid] = result_func or (lambda arg: None) @@ -88,10 +97,12 @@ class ParallelTasks(object): self._join_one() def join(self): + # type: () -> None while self._pworking: self._join_one() def _join_one(self): + # type: () -> None for tid, pipe in iteritems(self._precvs): if pipe.poll(): exc, result = pipe.recv() @@ -111,6 +122,7 @@ class ParallelTasks(object): def make_chunks(arguments, nproc, maxbatch=10): + # type: (Sequence[unicode], int, int) -> List[Any] # determine how many documents to read in one go nargs = len(arguments) chunksize = nargs // nproc diff --git a/sphinx/util/pycompat.py b/sphinx/util/pycompat.py index e3b17ef62..3d31abb1e 100644 --- a/sphinx/util/pycompat.py +++ b/sphinx/util/pycompat.py @@ -14,11 +14,13 @@ import sys import codecs import warnings -from six import class_types +from six import PY3, class_types, text_type, exec_ from six.moves import zip_longest from itertools import product -from six import PY3, text_type, exec_ +if False: + # For type annotation + from typing import Any, Callable # NOQA NoneType = type(None) @@ -33,6 +35,7 @@ if PY3: # safely encode a string for printing to the terminal def terminal_safe(s): + # type: (unicode) -> unicode return s.encode('ascii', 'backslashreplace').decode('ascii') # some kind of default system encoding; should be used with a lenient # error handler @@ -40,6 +43,7 @@ if PY3: # support for running 2to3 over config files def convert_with_2to3(filepath): + # type: (unicode) -> unicode from lib2to3.refactor import RefactoringTool, get_fixers_from_package from lib2to3.pgen2.parse import ParseError fixers = get_fixers_from_package('lib2to3.fixes') @@ -68,13 +72,15 @@ else: # Python 2 u = 'u' # no need to refactor on 2.x versions - convert_with_2to3 = None + convert_with_2to3 = None # type: ignore def TextIOWrapper(stream, encoding): + # type: (file, str) -> unicode return codecs.lookup(encoding or 'ascii')[2](stream) # safely encode a string for printing to the terminal def terminal_safe(s): + # type: (unicode) -> unicode return s.encode('ascii', 'backslashreplace') # some kind of default system encoding; should be used with a lenient # error handler @@ -91,6 +97,7 @@ else: # backport from python3 def indent(text, prefix, predicate=None): + # type: (unicode, unicode, Callable) -> unicode if predicate is None: def predicate(line): return line.strip() @@ -102,6 +109,7 @@ else: def execfile_(filepath, _globals, open=open): + # type: (unicode, Any, Callable) -> None from sphinx.util.osutil import fs_encoding # get config source -- 'b' is a no-op under 2.x, while 'U' is # ignored under 3.x (but 3.x compile() accepts \r\n newlines) @@ -132,6 +140,7 @@ def execfile_(filepath, _globals, open=open): class _DeprecationWrapper(object): def __init__(self, mod, deprecated): + # type: (Any, Dict) -> None self._mod = mod self._deprecated = deprecated @@ -145,7 +154,7 @@ class _DeprecationWrapper(object): return getattr(self._mod, attr) -sys.modules[__name__] = _DeprecationWrapper(sys.modules[__name__], dict( +sys.modules[__name__] = _DeprecationWrapper(sys.modules[__name__], dict( # type: ignore zip_longest = zip_longest, product = product, all = all, From 9c66ac71ab244b227aac8781ec3a734692b96076 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA Date: Wed, 9 Nov 2016 11:45:27 +0900 Subject: [PATCH 005/190] Add type-check annotations to sphinx.application --- sphinx/application.py | 132 +++++++++++++++++++++++++++++++----------- 1 file changed, 97 insertions(+), 35 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index 93f12f3b6..08075d8e1 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -22,13 +22,13 @@ from collections import deque from six import iteritems, itervalues, text_type from six.moves import cStringIO + from docutils import nodes from docutils.parsers.rst import convert_directive_function, \ directives, roles import sphinx from sphinx import package_dir, locale -from sphinx.roles import XRefRole from sphinx.config import Config from sphinx.errors import SphinxError, SphinxWarning, ExtensionError, \ VersionRequirementError, ConfigError @@ -36,15 +36,25 @@ from sphinx.domains import ObjType from sphinx.domains.std import GenericObject, Target, StandardDomain from sphinx.environment import BuildEnvironment from sphinx.io import SphinxStandaloneReader +from sphinx.roles import XRefRole from sphinx.util import pycompat # noqa: F401 from sphinx.util import import_object from sphinx.util.tags import Tags from sphinx.util.osutil import ENOENT from sphinx.util.logging import is_suppressed_warning -from sphinx.util.console import bold, lightgray, darkgray, darkred, darkgreen, \ - term_width_line +from sphinx.util.console import ( # type: ignore + bold, lightgray, darkgray, darkred, darkgreen, term_width_line +) from sphinx.util.i18n import find_catalog_source_files +if False: + # For type annotation + from typing import Any, Callable, IO, Iterable, Iterator, Tuple, Type, Union # NOQA + from docutils.parsers import Parser # NOQA + from docutils.transform import Transform # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.domains import Domain # NOQA + # List of all known core events. Maps name to arguments description. events = { 'builder-inited': '', @@ -60,7 +70,7 @@ events = { 'html-collect-pages': 'builder', 'html-page-context': 'pagename, context, doctree or None', 'build-finished': 'exception', -} +} # type: Dict[unicode, unicode] builtin_extensions = ( 'sphinx.builders.applehelp', 'sphinx.builders.changes', @@ -90,14 +100,14 @@ builtin_extensions = ( 'sphinx.directives.other', 'sphinx.directives.patches', 'sphinx.roles', -) +) # type: Tuple[unicode, ...] CONFIG_FILENAME = 'conf.py' ENV_PICKLE_FILENAME = 'environment.pickle' # list of deprecated extensions. Keys are extension name. # Values are Sphinx version that merge the extension. -EXTENSION_BLACKLIST = {"sphinxjp.themecore": "1.2"} +EXTENSION_BLACKLIST = {"sphinxjp.themecore": "1.2"} # type: Dict[unicode, unicode] class Sphinx(object): @@ -106,19 +116,20 @@ class Sphinx(object): confoverrides=None, status=sys.stdout, warning=sys.stderr, freshenv=False, warningiserror=False, tags=None, verbosity=0, parallel=0): + # type: (unicode, unicode, unicode, unicode, unicode, Dict, IO, IO, bool, bool, unicode, int, int) -> None # NOQA self.verbosity = verbosity self.next_listener_id = 0 - self._extensions = {} - self._extension_metadata = {} - self._additional_source_parsers = {} - self._listeners = {} - self._setting_up_extension = ['?'] - self.domains = {} + self._extensions = {} # type: Dict[unicode, Any] + self._extension_metadata = {} # type: Dict[unicode, Dict[unicode, Any]] + self._additional_source_parsers = {} # type: Dict[unicode, Parser] + self._listeners = {} # type: Dict[unicode, Dict[int, Callable]] + self._setting_up_extension = ['?'] # type: List[unicode] + self.domains = {} # type: Dict[unicode, Type[Domain]] self.buildername = buildername - self.builderclasses = {} - self.builder = None - self.env = None - self.enumerable_nodes = {} + self.builderclasses = {} # type: Dict[unicode, Type[Builder]] + self.builder = None # type: Builder + self.env = None # type: BuildEnvironment + self.enumerable_nodes = {} # type: Dict[nodes.Node, Tuple[unicode, Callable]] # NOQA self.srcdir = srcdir self.confdir = confdir @@ -128,24 +139,24 @@ class Sphinx(object): self.parallel = parallel if status is None: - self._status = cStringIO() + self._status = cStringIO() # type: IO self.quiet = True else: self._status = status self.quiet = False if warning is None: - self._warning = cStringIO() + self._warning = cStringIO() # type: IO else: self._warning = warning self._warncount = 0 self.warningiserror = warningiserror self._events = events.copy() - self._translators = {} + self._translators = {} # type: Dict[unicode, nodes.GenericNodeVisitor] # keep last few messages for traceback - self.messagelog = deque(maxlen=10) + self.messagelog = deque(maxlen=10) # type: deque # say hello to the world self.info(bold('Running Sphinx v%s' % sphinx.__display_version__)) @@ -246,6 +257,7 @@ class Sphinx(object): self._init_enumerable_nodes() def _init_i18n(self): + # type: () -> None """Load translated strings from the configured localedirs if enabled in the configuration. """ @@ -271,6 +283,7 @@ class Sphinx(object): self.info('not available for built-in messages') def _init_source_parsers(self): + # type: () -> None for suffix, parser in iteritems(self._additional_source_parsers): if suffix not in self.config.source_suffix: self.config.source_suffix.append(suffix) @@ -278,6 +291,7 @@ class Sphinx(object): self.config.source_parsers[suffix] = parser def _init_env(self, freshenv): + # type: (bool) -> None if freshenv: self.env = BuildEnvironment(self.srcdir, self.doctreedir, self.config) self.env.set_warnfunc(self.warn) @@ -304,6 +318,7 @@ class Sphinx(object): return self._init_env(freshenv=True) def _init_builder(self, buildername): + # type: (unicode) -> None if buildername is None: print('No builder selected, using default: html', file=self._status) buildername = 'html' @@ -315,12 +330,14 @@ class Sphinx(object): self.emit('builder-inited') def _init_enumerable_nodes(self): + # type: () -> None for node, settings in iteritems(self.enumerable_nodes): - self.env.domains['std'].enumerable_nodes[node] = settings + self.env.domains['std'].enumerable_nodes[node] = settings # type: ignore # ---- main "build" method ------------------------------------------------- def build(self, force_all=False, filenames=None): + # type: (bool, List[unicode]) -> None try: if force_all: self.builder.compile_all_catalogs() @@ -354,6 +371,7 @@ class Sphinx(object): # ---- logging handling ---------------------------------------------------- def _log(self, message, wfile, nonl=False): + # type: (unicode, IO, bool) -> None try: wfile.write(message) except UnicodeEncodeError: @@ -367,6 +385,7 @@ class Sphinx(object): def warn(self, message, location=None, prefix='WARNING: ', type=None, subtype=None, colorfunc=darkred): + # type: (unicode, unicode, unicode, unicode, unicode, Callable) -> None """Emit a warning. If *location* is given, it should either be a tuple of (docname, lineno) @@ -399,6 +418,7 @@ class Sphinx(object): self._log(colorfunc(warntext), self._warning, True) def info(self, message='', nonl=False): + # type: (unicode, bool) -> None """Emit an informational message. If *nonl* is true, don't emit a newline at the end (which implies that @@ -407,6 +427,7 @@ class Sphinx(object): self._log(message, self._status, nonl) def verbose(self, message, *args, **kwargs): + # type: (unicode, Any, Any) -> None """Emit a verbose informational message. The message will only be emitted for verbosity levels >= 1 (i.e. at @@ -422,6 +443,7 @@ class Sphinx(object): self._log(message, self._status) def debug(self, message, *args, **kwargs): + # type: (unicode, Any, Any) -> None """Emit a debug-level informational message. The message will only be emitted for verbosity levels >= 2 (i.e. at @@ -437,6 +459,7 @@ class Sphinx(object): self._log(darkgray(message), self._status) def debug2(self, message, *args, **kwargs): + # type: (unicode, Any, Any) -> None """Emit a lowlevel debug-level informational message. The message will only be emitted for verbosity level 3 (i.e. three @@ -452,6 +475,7 @@ class Sphinx(object): self._log(lightgray(message), self._status) def _display_chunk(chunk): + # type: (Any) -> unicode if isinstance(chunk, (list, tuple)): if len(chunk) == 1: return text_type(chunk[0]) @@ -460,6 +484,7 @@ class Sphinx(object): def old_status_iterator(self, iterable, summary, colorfunc=darkgreen, stringify_func=_display_chunk): + # type: (Iterable, unicode, Callable, Callable) -> Iterator l = 0 for item in iterable: if l == 0: @@ -473,6 +498,7 @@ class Sphinx(object): # new version with progress info def status_iterator(self, iterable, summary, colorfunc=darkgreen, length=0, stringify_func=_display_chunk): + # type: (Iterable, unicode, Callable, int, Callable) -> Iterable if length == 0: for item in self.old_status_iterator(iterable, summary, colorfunc, stringify_func): @@ -496,6 +522,7 @@ class Sphinx(object): # ---- general extensibility interface ------------------------------------- def setup_extension(self, extension): + # type: (unicode) -> None """Import and setup a Sphinx extension module. No-op if called twice.""" self.debug('[app] setting up extension: %r', extension) if extension in self._extensions: @@ -543,21 +570,25 @@ class Sphinx(object): self._setting_up_extension.pop() def require_sphinx(self, version): + # type: (unicode) -> None # check the Sphinx version if requested if version > sphinx.__display_version__[:3]: raise VersionRequirementError(version) def import_object(self, objname, source=None): + # type: (str, unicode) -> Any """Import an object from a 'module.name' string.""" return import_object(objname, source=None) # event interface def _validate_event(self, event): + # type: (unicode) -> None if event not in self._events: raise ExtensionError('Unknown event name: %s' % event) def connect(self, event, callback): + # type: (unicode, Callable) -> int self._validate_event(event) listener_id = self.next_listener_id if event not in self._listeners: @@ -570,11 +601,13 @@ class Sphinx(object): return listener_id def disconnect(self, listener_id): + # type: (int) -> None self.debug('[app] disconnecting event: [id=%s]', listener_id) for event in itervalues(self._listeners): event.pop(listener_id, None) def emit(self, event, *args): + # type: (unicode, Any) -> List try: self.debug2('[app] emitting event: %r%s', event, repr(args)[:100]) except Exception: @@ -588,6 +621,7 @@ class Sphinx(object): return results def emit_firstresult(self, event, *args): + # type: (unicode, Any) -> Any for result in self.emit(event, *args): if result is not None: return result @@ -596,6 +630,7 @@ class Sphinx(object): # registering addon parts def add_builder(self, builder): + # type: (Type[Builder]) -> None self.debug('[app] adding builder: %r', builder) if not hasattr(builder, 'name'): raise ExtensionError('Builder class %s has no "name" attribute' @@ -607,8 +642,9 @@ class Sphinx(object): self.builderclasses[builder.name] = builder def add_config_value(self, name, default, rebuild, types=()): + # type: (unicode, Any, Union[bool, unicode], Any) -> None self.debug('[app] adding config value: %r', - (name, default, rebuild) + ((types,) if types else ())) + (name, default, rebuild) + ((types,) if types else ())) # type: ignore if name in self.config.values: raise ExtensionError('Config value %r already present' % name) if rebuild in (False, True): @@ -616,16 +652,19 @@ class Sphinx(object): self.config.values[name] = (default, rebuild, types) def add_event(self, name): + # type: (unicode) -> None self.debug('[app] adding event: %r', name) if name in self._events: raise ExtensionError('Event %r already present' % name) self._events[name] = '' def set_translator(self, name, translator_class): + # type: (unicode, Any) -> None self.info(bold('A Translator for the %s builder is changed.' % name)) self._translators[name] = translator_class def add_node(self, node, **kwds): + # type: (nodes.Node, Any) -> None self.debug('[app] adding node: %r', (node, kwds)) if not kwds.pop('override', False) and \ hasattr(nodes.GenericNodeVisitor, 'visit_' + node.__name__): @@ -644,17 +683,15 @@ class Sphinx(object): if translator is not None: pass elif key == 'html': - from sphinx.writers.html import HTMLTranslator as translator + from sphinx.writers.html import HTMLTranslator as translator # type: ignore elif key == 'latex': - from sphinx.writers.latex import LaTeXTranslator as translator + from sphinx.writers.latex import LaTeXTranslator as translator # type: ignore elif key == 'text': - from sphinx.writers.text import TextTranslator as translator + from sphinx.writers.text import TextTranslator as translator # type: ignore elif key == 'man': - from sphinx.writers.manpage import ManualPageTranslator \ - as translator + from sphinx.writers.manpage import ManualPageTranslator as translator # type: ignore # NOQA elif key == 'texinfo': - from sphinx.writers.texinfo import TexinfoTranslator \ - as translator + from sphinx.writers.texinfo import TexinfoTranslator as translator # type: ignore # NOQA else: # ignore invalid keys for compatibility continue @@ -663,14 +700,16 @@ class Sphinx(object): setattr(translator, 'depart_'+node.__name__, depart) def add_enumerable_node(self, node, figtype, title_getter=None, **kwds): + # type: (nodes.Node, unicode, Callable, Any) -> None self.enumerable_nodes[node] = (figtype, title_getter) self.add_node(node, **kwds) def _directive_helper(self, obj, content=None, arguments=None, **options): + # type: (Any, unicode, Any, Any) -> Any if isinstance(obj, (types.FunctionType, types.MethodType)): - obj.content = content - obj.arguments = arguments or (0, 0, False) - obj.options = options + obj.content = content # type: ignore + obj.arguments = arguments or (0, 0, False) # type: ignore + obj.options = options # type: ignore return convert_directive_function(obj) else: if content or arguments or options: @@ -679,6 +718,7 @@ class Sphinx(object): return obj def add_directive(self, name, obj, content=None, arguments=None, **options): + # type: (unicode, Any, unicode, Any, Any) -> None self.debug('[app] adding directive: %r', (name, obj, content, arguments, options)) if name in directives._directives: @@ -690,6 +730,7 @@ class Sphinx(object): name, self._directive_helper(obj, content, arguments, **options)) def add_role(self, name, role): + # type: (unicode, Any) -> None self.debug('[app] adding role: %r', (name, role)) if name in roles._roles: self.warn('while setting up extension %s: role %r is ' @@ -699,6 +740,7 @@ class Sphinx(object): roles.register_local_role(name, role) def add_generic_role(self, name, nodeclass): + # type: (unicode, Any) -> None # don't use roles.register_generic_role because it uses # register_canonical_role self.debug('[app] adding generic role: %r', (name, nodeclass)) @@ -711,12 +753,14 @@ class Sphinx(object): roles.register_local_role(name, role) def add_domain(self, domain): + # type: (Type[Domain]) -> None self.debug('[app] adding domain: %r', domain) if domain.name in self.domains: raise ExtensionError('domain %s already registered' % domain.name) self.domains[domain.name] = domain def override_domain(self, domain): + # type: (Type[Domain]) -> None self.debug('[app] overriding domain: %r', domain) if domain.name not in self.domains: raise ExtensionError('domain %s not yet registered' % domain.name) @@ -727,6 +771,7 @@ class Sphinx(object): def add_directive_to_domain(self, domain, name, obj, content=None, arguments=None, **options): + # type: (unicode, unicode, Any, unicode, Any, Any) -> None self.debug('[app] adding directive to domain: %r', (domain, name, obj, content, arguments, options)) if domain not in self.domains: @@ -735,12 +780,14 @@ class Sphinx(object): self._directive_helper(obj, content, arguments, **options) def add_role_to_domain(self, domain, name, role): + # type: (unicode, unicode, Any) -> None self.debug('[app] adding role to domain: %r', (domain, name, role)) if domain not in self.domains: raise ExtensionError('domain %s not yet registered' % domain) self.domains[domain].roles[name] = role def add_index_to_domain(self, domain, index): + # type: (unicode, unicode) -> None self.debug('[app] adding index to domain: %r', (domain, index)) if domain not in self.domains: raise ExtensionError('domain %s not yet registered' % domain) @@ -749,15 +796,16 @@ class Sphinx(object): def add_object_type(self, directivename, rolename, indextemplate='', parse_node=None, ref_nodeclass=None, objname='', doc_field_types=[]): + # type: (unicode, unicode, unicode, Callable, nodes.Node, unicode, List) -> None self.debug('[app] adding object type: %r', (directivename, rolename, indextemplate, parse_node, ref_nodeclass, objname, doc_field_types)) StandardDomain.object_types[directivename] = \ ObjType(objname or directivename, rolename) # create a subclass of GenericObject as the new directive - new_directive = type(directivename, (GenericObject, object), + new_directive = type(directivename, (GenericObject, object), # type: ignore {'indextemplate': indextemplate, - 'parse_node': staticmethod(parse_node), + 'parse_node': staticmethod(parse_node), # type: ignore 'doc_field_types': doc_field_types}) StandardDomain.directives[directivename] = new_directive # XXX support more options? @@ -768,23 +816,26 @@ class Sphinx(object): def add_crossref_type(self, directivename, rolename, indextemplate='', ref_nodeclass=None, objname=''): + # type: (unicode, unicode, unicode, nodes.Node, unicode) -> None self.debug('[app] adding crossref type: %r', (directivename, rolename, indextemplate, ref_nodeclass, objname)) StandardDomain.object_types[directivename] = \ ObjType(objname or directivename, rolename) # create a subclass of Target as the new directive - new_directive = type(directivename, (Target, object), + new_directive = type(directivename, (Target, object), # type: ignore {'indextemplate': indextemplate}) StandardDomain.directives[directivename] = new_directive # XXX support more options? StandardDomain.roles[rolename] = XRefRole(innernodeclass=ref_nodeclass) def add_transform(self, transform): + # type: (Transform) -> None self.debug('[app] adding transform: %r', transform) SphinxStandaloneReader.transforms.append(transform) def add_javascript(self, filename): + # type: (unicode) -> None self.debug('[app] adding javascript: %r', filename) from sphinx.builders.html import StandaloneHTMLBuilder if '://' in filename: @@ -794,6 +845,7 @@ class Sphinx(object): posixpath.join('_static', filename)) def add_stylesheet(self, filename): + # type: (unicode) -> None self.debug('[app] adding stylesheet: %r', filename) from sphinx.builders.html import StandaloneHTMLBuilder if '://' in filename: @@ -803,10 +855,12 @@ class Sphinx(object): posixpath.join('_static', filename)) def add_latex_package(self, packagename, options=None): + # type: (unicode, unicode) -> None self.debug('[app] adding latex package: %r', packagename) self.builder.usepackages.append((packagename, options)) def add_lexer(self, alias, lexer): + # type: (unicode, Any) -> None self.debug('[app] adding lexer: %r', (alias, lexer)) from sphinx.highlighting import lexers if lexers is None: @@ -814,23 +868,27 @@ class Sphinx(object): lexers[alias] = lexer def add_autodocumenter(self, cls): + # type: (Any) -> None self.debug('[app] adding autodocumenter: %r', cls) from sphinx.ext import autodoc autodoc.add_documenter(cls) self.add_directive('auto' + cls.objtype, autodoc.AutoDirective) def add_autodoc_attrgetter(self, type, getter): + # type: (Any, Callable) -> None self.debug('[app] adding autodoc attrgetter: %r', (type, getter)) from sphinx.ext import autodoc autodoc.AutoDirective._special_attrgetters[type] = getter def add_search_language(self, cls): + # type: (Any) -> None self.debug('[app] adding search language: %r', cls) from sphinx.search import languages, SearchLanguage assert issubclass(cls, SearchLanguage) languages[cls.lang] = cls def add_source_parser(self, suffix, parser): + # type: (unicode, Parser) -> None self.debug('[app] adding search source_parser: %r, %r', suffix, parser) if suffix in self._additional_source_parsers: self.warn('while setting up extension %s: source_parser for %r is ' @@ -847,6 +905,7 @@ class TemplateBridge(object): """ def init(self, builder, theme=None, dirs=None): + # type: (Builder, unicode, List[unicode]) -> None """Called by the builder to initialize the template system. *builder* is the builder object; you'll probably want to look at the @@ -858,6 +917,7 @@ class TemplateBridge(object): raise NotImplementedError('must be implemented in subclasses') def newest_template_mtime(self): + # type: () -> float """Called by the builder to determine if output files are outdated because of template changes. Return the mtime of the newest template file that was changed. The default implementation returns ``0``. @@ -865,12 +925,14 @@ class TemplateBridge(object): return 0 def render(self, template, context): + # type: (unicode, Dict) -> None """Called by the builder to render a template given as a filename with a specified context (a Python dictionary). """ raise NotImplementedError('must be implemented in subclasses') def render_string(self, template, context): + # type: (unicode, Dict) -> unicode """Called by the builder to render a template given as a string with a specified context (a Python dictionary). """ From 2bdaf5c8a0b0ee07bea0724b1ef38420cf219111 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA Date: Mon, 7 Nov 2016 14:05:55 +0900 Subject: [PATCH 006/190] Add type-check annotations to sphinx.environment --- sphinx/environment/__init__.py | 194 ++++++++++++++------ sphinx/environment/managers/__init__.py | 15 +- sphinx/environment/managers/indexentries.py | 19 +- sphinx/environment/managers/toctree.py | 29 ++- 4 files changed, 193 insertions(+), 64 deletions(-) diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index d750b0284..6dc39e945 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -21,6 +21,7 @@ from glob import glob from six import iteritems, itervalues, class_types, next from six.moves import cPickle as pickle + from docutils import nodes from docutils.io import NullOutput from docutils.core import Publisher @@ -38,7 +39,7 @@ from sphinx.util.osutil import SEP, getcwd, fs_encoding, ensuredir from sphinx.util.images import guess_mimetype from sphinx.util.i18n import find_catalog_files, get_image_filename_for_language, \ search_image_for_language -from sphinx.util.console import bold, purple +from sphinx.util.console import bold, purple # type: ignore from sphinx.util.docutils import sphinx_domains from sphinx.util.matching import compile_matchers from sphinx.util.parallel import ParallelTasks, parallel_available, make_chunks @@ -49,6 +50,14 @@ from sphinx.transforms import SphinxContentsFilter from sphinx.environment.managers.indexentries import IndexEntries from sphinx.environment.managers.toctree import Toctree +if False: + # For type annotation + from typing import Any, Callable, Iterator, Pattern, Tuple, Type, Union # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.config import Config # NOQA + from sphinx.domains import Domain # NOQA + from sphinx.environment.managers import EnvironmentManager # NOQA default_settings = { 'embed_stylesheet': False, @@ -75,7 +84,7 @@ versioning_conditions = { 'none': False, 'text': is_translatable, 'commentable': is_commentable, -} +} # type: Dict[unicode, Union[bool, Callable]] class NoUri(Exception): @@ -90,10 +99,13 @@ class BuildEnvironment(object): transformations to resolve links to them. """ + domains = None # type: Dict[unicode, Domain] + # --------- ENVIRONMENT PERSISTENCE ---------------------------------------- @staticmethod def frompickle(srcdir, config, filename): + # type: (unicode, Config, unicode) -> BuildEnvironment with open(filename, 'rb') as picklefile: env = pickle.load(picklefile) if env.version != ENV_VERSION: @@ -104,6 +116,7 @@ class BuildEnvironment(object): return env def topickle(self, filename): + # type: (unicode) -> None # remove unpicklable attributes warnfunc = self._warnfunc self.set_warnfunc(None) @@ -130,16 +143,17 @@ class BuildEnvironment(object): # --------- ENVIRONMENT INITIALIZATION ------------------------------------- def __init__(self, srcdir, doctreedir, config): + # type: (unicode, unicode, Config) -> None self.doctreedir = doctreedir - self.srcdir = srcdir - self.config = config + self.srcdir = srcdir # type: unicode + self.config = config # type: Config # the method of doctree versioning; see set_versioning_method - self.versioning_condition = None - self.versioning_compare = None + self.versioning_condition = None # type: Union[bool, Callable] + self.versioning_compare = None # type: bool # the application object; only set while update() runs - self.app = None + self.app = None # type: Sphinx # all the registered domains, set by the application self.domains = {} @@ -149,7 +163,7 @@ class BuildEnvironment(object): self.settings['env'] = self # the function to write warning messages with - self._warnfunc = None + self._warnfunc = None # type: Callable # this is to invalidate old pickles self.version = ENV_VERSION @@ -157,43 +171,63 @@ class BuildEnvironment(object): # All "docnames" here are /-separated and relative and exclude # the source suffix. - self.found_docs = set() # contains all existing docnames - self.all_docs = {} # docname -> mtime at the time of reading + self.found_docs = set() # type: Set[unicode] + # contains all existing docnames + self.all_docs = {} # type: Dict[unicode, float] + # docname -> mtime at the time of reading # contains all read docnames - self.dependencies = {} # docname -> set of dependent file + self.dependencies = {} # type: Dict[unicode, Set[unicode]] + # docname -> set of dependent file # names, relative to documentation root - self.included = set() # docnames included from other documents - self.reread_always = set() # docnames to re-read unconditionally on + self.included = set() # type: Set[unicode] + # docnames included from other documents + self.reread_always = set() # type: Set[unicode] + # docnames to re-read unconditionally on # next build # File metadata - self.metadata = {} # docname -> dict of metadata items + self.metadata = {} # type: Dict[unicode, Dict[unicode, Any]] + # docname -> dict of metadata items # TOC inventory - self.titles = {} # docname -> title node - self.longtitles = {} # docname -> title node; only different if + self.titles = {} # type: Dict[unicode, nodes.Node] + # docname -> title node + self.longtitles = {} # type: Dict[unicode, nodes.Node] + # docname -> title node; only different if # set differently with title directive - self.tocs = {} # docname -> table of contents nodetree - self.toc_num_entries = {} # docname -> number of real entries + self.tocs = {} # type: Dict[unicode, nodes.Node] + # docname -> table of contents nodetree + self.toc_num_entries = {} # type: Dict[unicode, int] + # docname -> number of real entries + # used to determine when to show the TOC # in a sidebar (don't show if it's only one item) - self.toc_secnumbers = {} # docname -> dict of sectionid -> number - self.toc_fignumbers = {} # docname -> dict of figtype -> + self.toc_secnumbers = {} # type: Dict[unicode, Dict[unicode, Tuple[int, ...]]] + # docname -> dict of sectionid -> number + self.toc_fignumbers = {} # type: Dict[unicode, Dict[unicode, Dict[unicode, Tuple[int, ...]]]] # NOQA + # docname -> dict of figtype -> # dict of figureid -> number - self.toctree_includes = {} # docname -> list of toctree includefiles - self.files_to_rebuild = {} # docname -> set of files + self.toctree_includes = {} # type: Dict[unicode, List[unicode]] + # docname -> list of toctree includefiles + self.files_to_rebuild = {} # type: Dict[unicode, Set[unicode]] + # docname -> set of files # (containing its TOCs) to rebuild too - self.glob_toctrees = set() # docnames that have :glob: toctrees - self.numbered_toctrees = set() # docnames that have :numbered: toctrees + self.glob_toctrees = set() # type: Set[unicode] + # docnames that have :glob: toctrees + self.numbered_toctrees = set() # type: Set[unicode] + # docnames that have :numbered: toctrees # domain-specific inventories, here to be pickled - self.domaindata = {} # domainname -> domain-specific dict + self.domaindata = {} # type: Dict[unicode, Dict] + # domainname -> domain-specific dict # Other inventories - self.indexentries = {} # docname -> list of - # (type, string, target, aliasname) - self.versionchanges = {} # version -> list of (type, docname, + self.indexentries = {} # type: Dict[unicode, List[Tuple[unicode, unicode, unicode, unicode, unicode]]] # NOQA + # docname -> list of + # (type, unicode, target, aliasname) + self.versionchanges = {} # type: Dict[unicode, List[Tuple[unicode, unicode, int, unicode, unicode, unicode]]] # NOQA + # version -> list of (type, docname, # lineno, module, descname, content) # these map absolute path -> (docnames, unique filename) @@ -201,27 +235,31 @@ class BuildEnvironment(object): self.dlfiles = FilenameUniqDict() # temporary data storage while reading a document - self.temp_data = {} + self.temp_data = {} # type: Dict[unicode, Any] # context for cross-references (e.g. current module or class) # this is similar to temp_data, but will for example be copied to # attributes of "any" cross references - self.ref_context = {} + self.ref_context = {} # type: Dict[unicode, Any] - self.managers = {} + self.managers = {} # type: Dict[unicode, EnvironmentManager] self.init_managers() def init_managers(self): + # type: () -> None managers = {} - for manager_class in [IndexEntries, Toctree]: + manager_class = None # type: Type[EnvironmentManager] + for manager_class in [IndexEntries, Toctree]: # type: ignore managers[manager_class.name] = manager_class(self) self.attach_managers(managers) def attach_managers(self, managers): + # type: (Dict[unicode, EnvironmentManager]) -> None for name, manager in iteritems(managers): self.managers[name] = manager manager.attach(self) def detach_managers(self): + # type: () -> Dict[unicode, EnvironmentManager] managers = self.managers self.managers = {} for _, manager in iteritems(managers): @@ -229,10 +267,12 @@ class BuildEnvironment(object): return managers def set_warnfunc(self, func): + # type: (Callable) -> None self._warnfunc = func self.settings['warning_stream'] = WarningStream(func) def set_versioning_method(self, method, compare): + # type: (unicode, bool) -> None """This sets the doctree versioning method for this environment. Versioning methods are a builder property; only builders with the same @@ -251,6 +291,7 @@ class BuildEnvironment(object): self.versioning_compare = compare def warn(self, docname, msg, lineno=None, **kwargs): + # type: (unicode, unicode, int, Any) -> None """Emit a warning. This differs from using ``app.warn()`` in that the warning may not @@ -261,10 +302,12 @@ class BuildEnvironment(object): self._warnfunc(msg, (docname, lineno), **kwargs) def warn_node(self, msg, node, **kwargs): + # type: (unicode, nodes.Node, Any) -> None """Like :meth:`warn`, but with source information taken from *node*.""" self._warnfunc(msg, '%s:%s' % get_source_line(node), **kwargs) def clear_doc(self, docname): + # type: (unicode) -> None """Remove all traces of a source file in the inventory.""" if docname in self.all_docs: self.all_docs.pop(docname, None) @@ -287,12 +330,13 @@ class BuildEnvironment(object): domain.clear_doc(docname) def merge_info_from(self, docnames, other, app): + # type: (List[unicode], BuildEnvironment, Sphinx) -> None """Merge global information gathered about *docnames* while reading them from the *other* environment. This possibly comes from a parallel build process. """ - docnames = set(docnames) + docnames = set(docnames) # type: ignore for docname in docnames: self.all_docs[docname] = other.all_docs[docname] if docname in other.reread_always: @@ -317,6 +361,7 @@ class BuildEnvironment(object): app.emit('env-merge-info', self, docnames, other) def path2doc(self, filename): + # type: (unicode) -> unicode """Return the docname for the filename if the file is document. *filename* should be absolute or relative to the source directory. @@ -324,13 +369,14 @@ class BuildEnvironment(object): if filename.startswith(self.srcdir): filename = filename[len(self.srcdir) + 1:] for suffix in self.config.source_suffix: - if fnmatch.fnmatch(filename, '*' + suffix): - return filename[:-len(suffix)] + if fnmatch.fnmatch(filename, '*' + suffix): # type: ignore + return filename[:-len(suffix)] # type: ignore else: # the file does not have docname return None def doc2path(self, docname, base=True, suffix=None): + # type: (unicode, Union[bool, unicode], unicode) -> unicode """Return the filename for the document name. If *base* is True, return absolute path under self.srcdir. @@ -340,22 +386,24 @@ class BuildEnvironment(object): """ docname = docname.replace(SEP, path.sep) if suffix is None: - for candidate_suffix in self.config.source_suffix: + candidate_suffix = None # type: unicode + for candidate_suffix in self.config.source_suffix: # type: ignore if path.isfile(path.join(self.srcdir, docname) + candidate_suffix): suffix = candidate_suffix break else: # document does not exist - suffix = self.config.source_suffix[0] + suffix = self.config.source_suffix[0] # type: ignore if base is True: return path.join(self.srcdir, docname) + suffix elif base is None: return docname + suffix else: - return path.join(base, docname) + suffix + return path.join(base, docname) + suffix # type: ignore def relfn2path(self, filename, docname=None): + # type: (unicode, unicode) -> Tuple[unicode, unicode] """Return paths to a file referenced from a document, relative to documentation root and absolute. @@ -380,6 +428,7 @@ class BuildEnvironment(object): return rel_fn, path.abspath(path.join(self.srcdir, enc_rel_fn)) def find_files(self, config): + # type: (Config) -> None """Find all source files in the source dir and put them in self.found_docs. """ @@ -390,7 +439,7 @@ class BuildEnvironment(object): ['**/_sources', '.#*', '**/.#*', '*.lproj/**'] ) self.found_docs = set() - for docname in get_matching_docs(self.srcdir, config.source_suffix, + for docname in get_matching_docs(self.srcdir, config.source_suffix, # type: ignore exclude_matchers=matchers): if os.access(self.doc2path(docname), os.R_OK): self.found_docs.add(docname) @@ -409,12 +458,13 @@ class BuildEnvironment(object): self.dependencies.setdefault(docname, set()).add(filename) def get_outdated_files(self, config_changed): + # type: (bool) -> Tuple[Set[unicode], Set[unicode], Set[unicode]] """Return (added, changed, removed) sets.""" # clear all files no longer present removed = set(self.all_docs) - self.found_docs - added = set() - changed = set() + added = set() # type: Set[unicode] + changed = set() # type: Set[unicode] if config_changed: # config values affect e.g. substitutions @@ -459,6 +509,7 @@ class BuildEnvironment(object): return added, changed, removed def update(self, config, srcdir, doctreedir, app): + # type: (Config, unicode, unicode, Sphinx) -> List[unicode] """(Re-)read all files new or changed since last update. Store all environment docnames in the canonical format (ie using SEP as @@ -561,6 +612,7 @@ class BuildEnvironment(object): return sorted(docnames) def _read_serial(self, docnames, app): + # type: (List[unicode], Sphinx) -> None for docname in app.status_iterator(docnames, 'reading sources... ', purple, len(docnames)): # remove all inventory entries for that file @@ -569,14 +621,16 @@ class BuildEnvironment(object): self.read_doc(docname, app) def _read_parallel(self, docnames, app, nproc): + # type: (List[unicode], Sphinx, int) -> None # clear all outdated docs at once for docname in docnames: app.emit('env-purge-doc', self, docname) self.clear_doc(docname) def read_process(docs): + # type: (List[unicode]) -> BuildEnvironment self.app = app - self.warnings = [] + self.warnings = [] # type: List[Tuple] self.set_warnfunc(lambda *args, **kwargs: self.warnings.append((args, kwargs))) for docname in docs: self.read_doc(docname, app) @@ -589,13 +643,14 @@ class BuildEnvironment(object): return self def merge(docs, otherenv): + # type: (List[unicode], BuildEnvironment) -> None warnings.extend(otherenv.warnings) self.merge_info_from(docs, otherenv, app) tasks = ParallelTasks(nproc) chunks = make_chunks(docnames, nproc) - warnings = [] + warnings = [] # type: List[Tuple] for chunk in app.status_iterator( chunks, 'reading sources... ', purple, len(chunks)): tasks.add_task(read_process, chunk, merge) @@ -608,8 +663,9 @@ class BuildEnvironment(object): self._warnfunc(*warning, **kwargs) def check_dependents(self, already): - to_rewrite = (self.toctree.assign_section_numbers() + - self.toctree.assign_figure_numbers()) + # type: (Set[unicode]) -> Iterator[unicode] + to_rewrite = (self.toctree.assign_section_numbers() + # type: ignore + self.toctree.assign_figure_numbers()) # type: ignore for docname in set(to_rewrite): if docname not in already: yield docname @@ -617,6 +673,7 @@ class BuildEnvironment(object): # --------- SINGLE FILE READING -------------------------------------------- def warn_and_replace(self, error): + # type: (Any) -> Tuple """Custom decoding error handler that warns and replaces.""" linestart = error.object.rfind(b'\n', 0, error.start) lineend = error.object.find(b'\n', error.start) @@ -631,6 +688,7 @@ class BuildEnvironment(object): return (u'?', error.end) def read_doc(self, docname, app=None): + # type: (unicode, Sphinx) -> None """Parse a file and add/update inventory entries for the doctree.""" self.temp_data['docname'] = docname @@ -659,7 +717,7 @@ class BuildEnvironment(object): self.warn(docname, 'default role %s not found' % self.config.default_role) - codecs.register_error('sphinx', self.warn_and_replace) + codecs.register_error('sphinx', self.warn_and_replace) # type: ignore # publish manually reader = SphinxStandaloneReader(self.app, parsers=self.config.source_parsers) @@ -740,11 +798,13 @@ class BuildEnvironment(object): @property def docname(self): + # type: () -> unicode """Returns the docname of the document currently being parsed.""" return self.temp_data['docname'] @property def currmodule(self): + # type () -> None """Backwards compatible alias. Will be removed.""" self.warn(self.docname, 'env.currmodule is being referenced by an ' 'extension; this API will be removed in the future') @@ -752,12 +812,14 @@ class BuildEnvironment(object): @property def currclass(self): + # type: () -> None """Backwards compatible alias. Will be removed.""" self.warn(self.docname, 'env.currclass is being referenced by an ' 'extension; this API will be removed in the future') return self.ref_context.get('py:class') def new_serialno(self, category=''): + # type: (unicode) -> int """Return a serial number, e.g. for index entry targets. The number is guaranteed to be unique in the current document. @@ -768,6 +830,7 @@ class BuildEnvironment(object): return cur def note_dependency(self, filename): + # type: (unicode) -> None """Add *filename* as a dependency of the current document. This means that the document will be rebuilt if this file changes. @@ -777,6 +840,7 @@ class BuildEnvironment(object): self.dependencies.setdefault(self.docname, set()).add(filename) def note_included(self, filename): + # type: (unicode) -> None """Add *filename* as a included from other document. This means the document is not orphaned. @@ -786,12 +850,14 @@ class BuildEnvironment(object): self.included.add(self.path2doc(filename)) def note_reread(self): + # type: () -> None """Add the current document to the list of documents that will automatically be re-read at the next build. """ self.reread_always.add(self.docname) def note_versionchange(self, type, version, node, lineno): + # type: (unicode, unicode, nodes.Node, int) -> None self.versionchanges.setdefault(version, []).append( (type, self.temp_data['docname'], lineno, self.ref_context.get('py:module'), @@ -800,6 +866,7 @@ class BuildEnvironment(object): # post-processing of read doctrees def process_dependencies(self, docname, doctree): + # type: (unicode, nodes.Node) -> None """Process docutils-generated dependency info.""" cwd = getcwd() frompath = path.join(path.normpath(self.srcdir), 'dummy') @@ -816,6 +883,7 @@ class BuildEnvironment(object): self.dependencies.setdefault(docname, set()).add(relpath) def process_downloads(self, docname, doctree): + # type: (unicode, nodes.Node) -> None """Process downloadable file paths. """ for node in doctree.traverse(addnodes.download_reference): targetname = node['reftarget'] @@ -829,9 +897,10 @@ class BuildEnvironment(object): node['filename'] = uniquename def process_images(self, docname, doctree): + # type: (unicode, nodes.Node) -> None """Process and rewrite image URIs.""" def collect_candidates(imgpath, candidates): - globbed = {} + globbed = {} # type: Dict[unicode, List[unicode]] for filename in glob(imgpath): new_imgpath = relative_path(path.join(self.srcdir, 'dummy'), filename) @@ -894,11 +963,13 @@ class BuildEnvironment(object): self.images.add_file(docname, imgpath) def process_metadata(self, docname, doctree): + # type: (unicode, nodes.Node) -> None """Process the docinfo part of the doctree as metadata. Keep processing minimal -- just return what docutils says. """ - self.metadata[docname] = md = {} + self.metadata[docname] = {} + md = self.metadata[docname] try: docinfo = doctree[0] except IndexError: @@ -927,6 +998,7 @@ class BuildEnvironment(object): del doctree[0] def create_title_from(self, docname, document): + # type: (unicode, nodes.Node) -> None """Add a title node to the document (just copy the first section title), and store that title in the environment. """ @@ -950,20 +1022,24 @@ class BuildEnvironment(object): self.longtitles[docname] = longtitlenode def note_toctree(self, docname, toctreenode): + # type: (unicode, addnodes.toctree) -> None """Note a TOC tree directive in a document and gather information about file relations from it. """ - self.toctree.note_toctree(docname, toctreenode) + self.toctree.note_toctree(docname, toctreenode) # type: ignore def get_toc_for(self, docname, builder): + # type: (unicode, Builder) -> addnodes.toctree """Return a TOC nodetree -- for use on the same page only!""" - return self.toctree.get_toc_for(docname, builder) + return self.toctree.get_toc_for(docname, builder) # type: ignore def get_toctree_for(self, docname, builder, collapse, **kwds): + # type: (unicode, Builder, bool, Any) -> addnodes.toctree """Return the global TOC nodetree.""" - return self.toctree.get_toctree_for(docname, builder, collapse, **kwds) + return self.toctree.get_toctree_for(docname, builder, collapse, **kwds) # type: ignore def get_domain(self, domainname): + # type: (unicode) -> Domain """Return the domain instance with the specified name. Raises an ExtensionError if the domain is not registered. @@ -976,6 +1052,7 @@ class BuildEnvironment(object): # --------- RESOLVING REFERENCES AND TOCTREES ------------------------------ def get_doctree(self, docname): + # type: (unicode) -> nodes.Node """Read the doctree for a file from the pickle and return it.""" doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree') with open(doctree_filename, 'rb') as f: @@ -987,6 +1064,7 @@ class BuildEnvironment(object): def get_and_resolve_doctree(self, docname, builder, doctree=None, prune_toctrees=True, includehidden=False): + # type: (unicode, Builder, nodes.Node, bool, bool) -> nodes.Node """Read the doctree from the pickle, resolve cross-references and toctrees and return it. """ @@ -1010,6 +1088,7 @@ class BuildEnvironment(object): def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0, titles_only=False, collapse=False, includehidden=False): + # type: (unicode, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Node """Resolve a *toctree* node into individual bullet lists with titles as items, returning None (if no containing titles are found) or a new node. @@ -1021,11 +1100,12 @@ class BuildEnvironment(object): If *collapse* is True, all branches not containing docname will be collapsed. """ - return self.toctree.resolve_toctree(docname, builder, toctree, prune, + return self.toctree.resolve_toctree(docname, builder, toctree, prune, # type: ignore maxdepth, titles_only, collapse, includehidden) def resolve_references(self, doctree, fromdocname, builder): + # type: (nodes.Node, unicode, Builder) -> None for node in doctree.traverse(addnodes.pending_xref): contnode = node[0].deepcopy() newnode = None @@ -1068,6 +1148,7 @@ class BuildEnvironment(object): builder.app.emit('doctree-resolved', doctree, fromdocname) def _warn_missing_reference(self, refdoc, typ, target, node, domain): + # type: (unicode, unicode, unicode, nodes.Node, Domain) -> None warn = node.get('refwarn') if self.config.nitpicky: warn = True @@ -1093,6 +1174,7 @@ class BuildEnvironment(object): self.warn_node(msg % {'target': target}, node, type='ref', subtype=typ) def _resolve_doc_reference(self, builder, refdoc, node, contnode): + # type: (Builder, unicode, nodes.Node, nodes.Node) -> nodes.Node # directly reference to document by source name; # can be absolute or relative docname = docname_join(refdoc, node['reftarget']) @@ -1110,9 +1192,10 @@ class BuildEnvironment(object): return newnode def _resolve_any_reference(self, builder, refdoc, node, contnode): + # type: (Builder, unicode, nodes.Node, nodes.Node) -> nodes.Node """Resolve reference generated by the "any" role.""" target = node['reftarget'] - results = [] + results = [] # type: List[Tuple[unicode, nodes.Node]] # first, try resolving as :doc: doc_ref = self._resolve_doc_reference(builder, refdoc, node, contnode) if doc_ref: @@ -1153,9 +1236,11 @@ class BuildEnvironment(object): def create_index(self, builder, group_entries=True, _fixre=re.compile(r'(.*) ([(][^()]*[)])')): - return self.indices.create_index(builder, group_entries=group_entries, _fixre=_fixre) + # type: (Builder, bool, Pattern) -> Any + return self.indices.create_index(builder, group_entries=group_entries, _fixre=_fixre) # type: ignore # NOQA def collect_relations(self): + # type: () -> Dict[unicode, List[unicode]] traversed = set() def traverse_toctree(parent, docname): @@ -1188,6 +1273,7 @@ class BuildEnvironment(object): return relations def check_consistency(self): + # type: () -> None """Do consistency checks.""" for docname in sorted(self.all_docs): if docname not in self.files_to_rebuild: diff --git a/sphinx/environment/managers/__init__.py b/sphinx/environment/managers/__init__.py index 963ec54b8..0822f1091 100644 --- a/sphinx/environment/managers/__init__.py +++ b/sphinx/environment/managers/__init__.py @@ -9,29 +9,42 @@ :license: BSD, see LICENSE for details. """ +if False: + # For type annotation + from typing import Any # NOQA + from docutils import nodes # NOQA + from sphinx.environment import BuildEnvironment # NOQA + class EnvironmentManager(object): """Base class for sphinx.environment managers.""" - name = None + name = None # type: unicode + env = None # type: BuildEnvironment def __init__(self, env): + # type: (BuildEnvironment) -> None self.env = env def attach(self, env): + # type: (BuildEnvironment) -> None self.env = env if self.name: setattr(env, self.name, self) def detach(self, env): + # type: (BuildEnvironment) -> None self.env = None if self.name: delattr(env, self.name) def clear_doc(self, docname): + # type: (unicode) -> None raise NotImplementedError def merge_other(self, docnames, other): + # type: (List[unicode], Any) -> None raise NotImplementedError def process_doc(self, docname, doctree): + # type: (unicode, nodes.Node) -> None raise NotImplementedError diff --git a/sphinx/environment/managers/indexentries.py b/sphinx/environment/managers/indexentries.py index c35a161b4..8cf20f480 100644 --- a/sphinx/environment/managers/indexentries.py +++ b/sphinx/environment/managers/indexentries.py @@ -15,28 +15,38 @@ import string from itertools import groupby from six import text_type - from sphinx import addnodes from sphinx.util import iteritems, split_index_msg, split_into from sphinx.locale import _ from sphinx.environment.managers import EnvironmentManager +if False: + # For type annotation + from typing import Pattern, Tuple # NOQA + from docutils import nodes # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.environment import BuildEnvironment # NOQA + class IndexEntries(EnvironmentManager): name = 'indices' def __init__(self, env): + # type: (BuildEnvironment) -> None super(IndexEntries, self).__init__(env) self.data = env.indexentries def clear_doc(self, docname): + # type: (unicode) -> None self.data.pop(docname, None) def merge_other(self, docnames, other): + # type: (List[unicode], BuildEnvironment) -> None for docname in docnames: self.data[docname] = other.indexentries[docname] def process_doc(self, docname, doctree): + # type: (unicode, nodes.Node) -> None entries = self.data[docname] = [] for node in doctree.traverse(addnodes.index): try: @@ -55,10 +65,11 @@ class IndexEntries(EnvironmentManager): def create_index(self, builder, group_entries=True, _fixre=re.compile(r'(.*) ([(][^()]*[)])')): + # type: (Builder, bool, Pattern) -> List[Tuple[unicode, List[Tuple[unicode, List[unicode]]]]] # NOQA """Create the real index from the collected index entries.""" from sphinx.environment import NoUri - new = {} + new = {} # type: Dict[unicode, List] def add_entry(word, subword, main, link=True, dic=new, key=None): # Force the word to be unicode if it's a ASCII bytestring. @@ -131,8 +142,8 @@ class IndexEntries(EnvironmentManager): # func() # (in module foo) # (in module bar) - oldkey = '' - oldsubitems = None + oldkey = '' # type: unicode + oldsubitems = None # type: Dict[unicode, List] i = 0 while i < len(newlist): key, (targets, subitems, _key) = newlist[i] diff --git a/sphinx/environment/managers/toctree.py b/sphinx/environment/managers/toctree.py index d4848a72c..195349d3e 100644 --- a/sphinx/environment/managers/toctree.py +++ b/sphinx/environment/managers/toctree.py @@ -10,6 +10,7 @@ """ from six import iteritems + from docutils import nodes from sphinx import addnodes @@ -18,11 +19,18 @@ from sphinx.util.nodes import clean_astext, process_only_nodes from sphinx.transforms import SphinxContentsFilter from sphinx.environment.managers import EnvironmentManager +if False: + # For type annotation + from typing import Any, Tuple # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.environment import BuildEnvironment # NOQA + class Toctree(EnvironmentManager): name = 'toctree' def __init__(self, env): + # type: (BuildEnvironment) -> None super(Toctree, self).__init__(env) self.tocs = env.tocs @@ -35,6 +43,7 @@ class Toctree(EnvironmentManager): self.numbered_toctrees = env.numbered_toctrees def clear_doc(self, docname): + # type: (unicode) -> None self.tocs.pop(docname, None) self.toc_secnumbers.pop(docname, None) self.toc_fignumbers.pop(docname, None) @@ -49,6 +58,7 @@ class Toctree(EnvironmentManager): del self.files_to_rebuild[subfn] def merge_other(self, docnames, other): + # type: (List[unicode], BuildEnvironment) -> None for docname in docnames: self.tocs[docname] = other.tocs[docname] self.toc_num_entries[docname] = other.toc_num_entries[docname] @@ -63,6 +73,7 @@ class Toctree(EnvironmentManager): self.files_to_rebuild.setdefault(subfn, set()).update(fnset & docnames) def process_doc(self, docname, doctree): + # type: (unicode, nodes.Node) -> None """Build a TOC from the doctree and store it in the inventory.""" numentries = [0] # nonlocal again... @@ -132,6 +143,7 @@ class Toctree(EnvironmentManager): self.toc_num_entries[docname] = numentries[0] def note_toctree(self, docname, toctreenode): + # type: (unicode, addnodes.toctree) -> None """Note a TOC tree directive in a document and gather information about file relations from it. """ @@ -147,6 +159,7 @@ class Toctree(EnvironmentManager): self.toctree_includes.setdefault(docname, []).extend(includefiles) def get_toc_for(self, docname, builder): + # type: (unicode, Builder) -> None """Return a TOC nodetree -- for use on the same page only!""" tocdepth = self.env.metadata[docname].get('tocdepth', 0) try: @@ -162,6 +175,7 @@ class Toctree(EnvironmentManager): return toc def get_toctree_for(self, docname, builder, collapse, **kwds): + # type: (unicode, Builder, bool, Any) -> nodes.Node """Return the global TOC nodetree.""" doctree = self.env.get_doctree(self.env.config.master_doc) toctrees = [] @@ -184,6 +198,7 @@ class Toctree(EnvironmentManager): def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0, titles_only=False, collapse=False, includehidden=False): + # type: (unicode, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Node """Resolve a *toctree* node into individual bullet lists with titles as items, returning None (if no containing titles are found) or a new node. @@ -387,11 +402,12 @@ class Toctree(EnvironmentManager): return newnode def get_toctree_ancestors(self, docname): + # type: (unicode) -> List[unicode] parent = {} for p, children in iteritems(self.toctree_includes): for child in children: parent[child] = p - ancestors = [] + ancestors = [] # type: List[unicode] d = docname while d in parent and d not in ancestors: ancestors.append(d) @@ -399,6 +415,7 @@ class Toctree(EnvironmentManager): return ancestors def _toctree_prune(self, node, depth, maxdepth, collapse=False): + # type: (nodes.Node, int, int, bool) -> None """Utility: Cut a TOC at a specified depth.""" for subnode in node.children[:]: if isinstance(subnode, (addnodes.compact_paragraph, @@ -420,11 +437,12 @@ class Toctree(EnvironmentManager): self._toctree_prune(subnode, depth+1, maxdepth, collapse) def assign_section_numbers(self): + # type: () -> List[unicode] """Assign a section number to each heading under a numbered toctree.""" # a list of all docnames whose section numbers changed rewrite_needed = [] - assigned = set() + assigned = set() # type: Set[unicode] old_secnumbers = self.toc_secnumbers self.toc_secnumbers = self.env.toc_secnumbers = {} @@ -488,14 +506,15 @@ class Toctree(EnvironmentManager): return rewrite_needed def assign_figure_numbers(self): + # type: () -> List[unicode] """Assign a figure number to each figure under a numbered toctree.""" rewrite_needed = [] - assigned = set() + assigned = set() # type: Set[unicode] old_fignumbers = self.toc_fignumbers self.toc_fignumbers = self.env.toc_fignumbers = {} - fignum_counter = {} + fignum_counter = {} # type: Dict[unicode, Dict[Tuple[int], int]] def get_section_number(docname, section): anchorname = '#' + section['ids'][0] @@ -540,7 +559,7 @@ class Toctree(EnvironmentManager): continue - figtype = self.env.domains['std'].get_figtype(subnode) + figtype = self.env.domains['std'].get_figtype(subnode) # type: ignore if figtype and subnode['ids']: register_fignumber(docname, secnum, figtype, subnode) From 8a06a42c311c58dcab1116e06b3afbc2541f49f1 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA Date: Mon, 7 Nov 2016 23:58:40 +0900 Subject: [PATCH 007/190] Add type-check annotations to sphinx.domains --- sphinx/domains/__init__.py | 62 ++-- sphinx/domains/c.py | 35 ++- sphinx/domains/cpp.py | 546 ++++++++++++++++++++++++++++++----- sphinx/domains/javascript.py | 23 +- sphinx/domains/python.py | 72 ++++- sphinx/domains/rst.py | 23 +- sphinx/domains/std.py | 84 ++++-- 7 files changed, 710 insertions(+), 135 deletions(-) diff --git a/sphinx/domains/__init__.py b/sphinx/domains/__init__.py index da7e5d9ae..a90ee84aa 100644 --- a/sphinx/domains/__init__.py +++ b/sphinx/domains/__init__.py @@ -17,6 +17,13 @@ from six import iteritems from sphinx.errors import SphinxError from sphinx.locale import _ +if False: + # For type annotation + from typing import Any, Callable, Iterable, Tuple, Type, Union # NOQA + from docutils import nodes # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.environment import BuildEnvironment # NOQA + class ObjType(object): """ @@ -38,9 +45,10 @@ class ObjType(object): } def __init__(self, lname, *roles, **attrs): - self.lname = lname - self.roles = roles - self.attrs = self.known_attrs.copy() + # type: (unicode, Any, Any) -> None + self.lname = lname # type: unicode + self.roles = roles # type: Tuple + self.attrs = self.known_attrs.copy() # type: Dict self.attrs.update(attrs) @@ -59,17 +67,19 @@ class Index(object): domains using :meth:`~sphinx.application.Sphinx.add_index_to_domain()`. """ - name = None - localname = None - shortname = None + name = None # type: unicode + localname = None # type: unicode + shortname = None # type: unicode def __init__(self, domain): + # type: (Domain) -> None if self.name is None or self.localname is None: raise SphinxError('Index subclass %s has no valid name or localname' % self.__class__.__name__) self.domain = domain def generate(self, docnames=None): + # type: (List[unicode]) -> Tuple[List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool] # NOQA """Return entries for the index given by *name*. If *docnames* is given, restrict to entries referring to these docnames. @@ -128,23 +138,26 @@ class Domain(object): #: domain label: longer, more descriptive (used in messages) label = '' #: type (usually directive) name -> ObjType instance - object_types = {} + object_types = {} # type: Dict[unicode, Any] #: directive name -> directive class - directives = {} + directives = {} # type: Dict[unicode, Any] #: role name -> role callable - roles = {} + roles = {} # type: Dict[unicode, Callable] #: a list of Index subclasses - indices = [] + indices = [] # type: List[Type[Index]] #: role name -> a warning message if reference is missing - dangling_warnings = {} + dangling_warnings = {} # type: Dict[unicode, unicode] #: data value for a fresh environment - initial_data = {} + initial_data = {} # type: Dict + #: data value + data = None # type: Dict #: data version, bump this when the format of `self.data` changes data_version = 0 def __init__(self, env): - self.env = env + # type: (BuildEnvironment) -> None + self.env = env # type: BuildEnvironment if self.name not in env.domaindata: assert isinstance(self.initial_data, dict) new_data = copy.deepcopy(self.initial_data) @@ -154,18 +167,19 @@ class Domain(object): self.data = env.domaindata[self.name] if self.data['version'] != self.data_version: raise IOError('data of %r domain out of date' % self.label) - self._role_cache = {} - self._directive_cache = {} - self._role2type = {} - self._type2role = {} + self._role_cache = {} # type: Dict[unicode, Callable] + self._directive_cache = {} # type: Dict[unicode, Callable] + self._role2type = {} # type: Dict[unicode, List[unicode]] + self._type2role = {} # type: Dict[unicode, unicode] for name, obj in iteritems(self.object_types): for rolename in obj.roles: self._role2type.setdefault(rolename, []).append(name) self._type2role[name] = obj.roles[0] if obj.roles else '' - self.objtypes_for_role = self._role2type.get - self.role_for_objtype = self._type2role.get + self.objtypes_for_role = self._role2type.get # type: Callable[[unicode], List[unicode]] # NOQA + self.role_for_objtype = self._type2role.get # type: Callable[[unicode], unicode] def role(self, name): + # type: (unicode) -> Callable """Return a role adapter function that always gives the registered role its full name ('domain:name') as the first argument. """ @@ -183,6 +197,7 @@ class Domain(object): return role_adapter def directive(self, name): + # type: (unicode) -> Callable """Return a directive adapter class that always gives the registered directive its full name ('domain:name') as ``self.name``. """ @@ -193,7 +208,7 @@ class Domain(object): fullname = '%s:%s' % (self.name, name) BaseDirective = self.directives[name] - class DirectiveAdapter(BaseDirective): + class DirectiveAdapter(BaseDirective): # type: ignore def run(self): self.name = fullname return BaseDirective.run(self) @@ -203,10 +218,12 @@ class Domain(object): # methods that should be overwritten def clear_doc(self, docname): + # type: (unicode) -> None """Remove traces of a document in the domain-specific inventories.""" pass def merge_domaindata(self, docnames, otherdata): + # type: (List[unicode], Dict) -> None """Merge in data regarding *docnames* from a different domaindata inventory (coming from a subprocess in parallel builds). """ @@ -215,11 +232,13 @@ class Domain(object): self.__class__) def process_doc(self, env, docname, document): + # type: (BuildEnvironment, unicode, nodes.Node) -> None """Process a document after it is read by the environment.""" pass def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA """Resolve the pending_xref *node* with the given *typ* and *target*. This method should return a new node, to replace the xref node, @@ -236,6 +255,7 @@ class Domain(object): pass def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA """Resolve the pending_xref *node* with the given *target*. The reference comes from an "any" or similar role, which means that we @@ -252,6 +272,7 @@ class Domain(object): raise NotImplementedError def get_objects(self): + # type: () -> Iterable[Tuple[unicode, unicode, unicode, unicode, unicode, int]] """Return an iterable of "object descriptions", which are tuples with five items: @@ -271,6 +292,7 @@ class Domain(object): return [] def get_type_name(self, type, primary=False): + # type: (ObjType, bool) -> unicode """Return full name for given ObjType.""" if primary: return type.lname diff --git a/sphinx/domains/c.py b/sphinx/domains/c.py index 43e869dbc..cf4c23d5d 100644 --- a/sphinx/domains/c.py +++ b/sphinx/domains/c.py @@ -22,6 +22,13 @@ from sphinx.directives import ObjectDescription from sphinx.util.nodes import make_refnode from sphinx.util.docfields import Field, TypedField +if False: + # For type annotation + from typing import Any, Iterator, Tuple # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.environment import BuildEnvironment # NOQA + # RE to split at word boundaries wsplit_re = re.compile(r'(\W+)') @@ -74,8 +81,9 @@ class CObject(ObjectDescription): )) def _parse_type(self, node, ctype): + # type: (nodes.Node, unicode) -> None # add cross-ref nodes for all words - for part in [_f for _f in wsplit_re.split(ctype) if _f]: + for part in [_f for _f in wsplit_re.split(ctype) if _f]: # type: ignore tnode = nodes.Text(part, part) if part[0] in string.ascii_letters+'_' and \ part not in self.stopwords: @@ -88,11 +96,12 @@ class CObject(ObjectDescription): node += tnode def _parse_arglist(self, arglist): + # type: (unicode) -> Iterator[unicode] while True: - m = c_funcptr_arg_sig_re.match(arglist) + m = c_funcptr_arg_sig_re.match(arglist) # type: ignore if m: yield m.group() - arglist = c_funcptr_arg_sig_re.sub('', arglist) + arglist = c_funcptr_arg_sig_re.sub('', arglist) # type: ignore if ',' in arglist: _, arglist = arglist.split(',', 1) else: @@ -106,11 +115,12 @@ class CObject(ObjectDescription): break def handle_signature(self, sig, signode): + # type: (unicode, addnodes.desc_signature) -> unicode """Transform a C signature into RST nodes.""" # first try the function pointer signature regex, it's more specific - m = c_funcptr_sig_re.match(sig) + m = c_funcptr_sig_re.match(sig) # type: ignore if m is None: - m = c_sig_re.match(sig) + m = c_sig_re.match(sig) # type: ignore if m is None: raise ValueError('no match') rettype, name, arglist, const = m.groups() @@ -151,7 +161,7 @@ class CObject(ObjectDescription): arg = arg.strip() param = addnodes.desc_parameter('', '', noemph=True) try: - m = c_funcptr_arg_sig_re.match(arg) + m = c_funcptr_arg_sig_re.match(arg) # type: ignore if m: self._parse_type(param, m.group(1) + '(') param += nodes.emphasis(m.group(2), m.group(2)) @@ -173,6 +183,7 @@ class CObject(ObjectDescription): return fullname def get_index_text(self, name): + # type: (unicode) -> unicode if self.objtype == 'function': return _('%s (C function)') % name elif self.objtype == 'member': @@ -187,6 +198,7 @@ class CObject(ObjectDescription): return '' def add_target_and_index(self, name, sig, signode): + # type: (unicode, unicode, addnodes.desc_signature) -> None # for C API items we add a prefix since names are usually not qualified # by a module name and so easily clash with e.g. section titles targetname = 'c.' + name @@ -209,6 +221,7 @@ class CObject(ObjectDescription): targetname, '', None)) def before_content(self): + # type: () -> None self.typename_set = False if self.name == 'c:type': if self.names: @@ -216,12 +229,14 @@ class CObject(ObjectDescription): self.typename_set = True def after_content(self): + # type: () -> None if self.typename_set: self.env.ref_context.pop('c:type', None) class CXRefRole(XRefRole): def process_link(self, env, refnode, has_explicit_title, title, target): + # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA if not has_explicit_title: target = target.lstrip('~') # only has a meaning for the title # if the first character is a tilde, don't display the module/class @@ -262,14 +277,16 @@ class CDomain(Domain): } initial_data = { 'objects': {}, # fullname -> docname, objtype - } + } # type: Dict[unicode, Dict[unicode, Tuple[unicode, Any]]] def clear_doc(self, docname): + # type: (unicode) -> None for fullname, (fn, _l) in list(self.data['objects'].items()): if fn == docname: del self.data['objects'][fullname] def merge_domaindata(self, docnames, otherdata): + # type: (List[unicode], Dict) -> None # XXX check duplicates for fullname, (fn, objtype) in otherdata['objects'].items(): if fn in docnames: @@ -277,6 +294,7 @@ class CDomain(Domain): def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA # strip pointer asterisk target = target.rstrip(' *') # becase TypedField can generate xrefs @@ -290,6 +308,7 @@ class CDomain(Domain): def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA # strip pointer asterisk target = target.rstrip(' *') if target not in self.data['objects']: @@ -300,9 +319,11 @@ class CDomain(Domain): contnode, target))] def get_objects(self): + # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]] for refname, (docname, type) in list(self.data['objects'].items()): yield (refname, refname, type, docname, 'c.' + refname, 1) def setup(app): + # type: (Sphinx) -> None app.add_domain(CDomain) diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py index 6c12d6aca..5eeabcb11 100644 --- a/sphinx/domains/cpp.py +++ b/sphinx/domains/cpp.py @@ -13,6 +13,7 @@ import re from copy import deepcopy from six import iteritems, text_type + from docutils import nodes from sphinx import addnodes @@ -25,6 +26,14 @@ from sphinx.util.compat import Directive from sphinx.util.pycompat import UnicodeMixin from sphinx.util.docfields import Field, GroupedField +if False: + # For type annotation + from typing import Any, Iterator, Match, Pattern, Tuple, Union # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.config import Config # NOQA + from sphinx.environment import BuildEnvironment # NOQA + """ Important note on ids ---------------------------------------------------------------------------- @@ -317,7 +326,7 @@ _id_fundamental_v1 = { 'signed long': 'l', 'unsigned long': 'L', 'bool': 'b' -} +} # type: Dict[unicode, unicode] _id_shorthands_v1 = { 'std::string': 'ss', 'std::ostream': 'os', @@ -325,7 +334,7 @@ _id_shorthands_v1 = { 'std::iostream': 'ios', 'std::vector': 'v', 'std::map': 'm' -} +} # type: Dict[unicode, unicode] _id_operator_v1 = { 'new': 'new-operator', 'new[]': 'new-array-operator', @@ -374,7 +383,7 @@ _id_operator_v1 = { '->': 'pointer-operator', '()': 'call-operator', '[]': 'subscript-operator' -} +} # type: Dict[unicode, unicode] # ------------------------------------------------------------------------------ # Id v2 constants @@ -420,7 +429,7 @@ _id_fundamental_v2 = { 'auto': 'Da', 'decltype(auto)': 'Dc', 'std::nullptr_t': 'Dn' -} +} # type: Dict[unicode, unicode] _id_operator_v2 = { 'new': 'nw', 'new[]': 'na', @@ -469,43 +478,50 @@ _id_operator_v2 = { '->': 'pt', '()': 'cl', '[]': 'ix' -} +} # type: Dict[unicode, unicode] class NoOldIdError(UnicodeMixin, Exception): # Used to avoid implementing unneeded id generation for old id schmes. def __init__(self, description=""): + # type: (unicode) -> None self.description = description def __unicode__(self): + # type: () -> unicode return self.description class DefinitionError(UnicodeMixin, Exception): def __init__(self, description): + # type: (unicode) -> None self.description = description def __unicode__(self): + # type: () -> unicode return self.description class _DuplicateSymbolError(UnicodeMixin, Exception): def __init__(self, symbol, candSymbol): + # type: (Symbol, Symbol) -> None assert symbol assert candSymbol self.symbol = symbol self.candSymbol = candSymbol def __unicode__(self): + # type: () -> unicode return "Internal C++ duplicate symbol error:\n%s" % self.symbol.dump(0) class ASTBase(UnicodeMixin): def __eq__(self, other): + # type: (Any) -> bool if type(self) is not type(other): return False try: - for key, value in iteritems(self.__dict__): + for key, value in iteritems(self.__dict__): # type: ignore if value != getattr(other, key): return False except AttributeError: @@ -513,23 +529,28 @@ class ASTBase(UnicodeMixin): return True def __ne__(self, other): + # type: (Any) -> bool return not self.__eq__(other) - __hash__ = None + __hash__ = None # type: None def clone(self): + # type: () -> ASTBase """Clone a definition expression node.""" return deepcopy(self) def get_id_v1(self): + # type: () -> unicode """Return the v1 id for the node.""" raise NotImplementedError(repr(self)) def get_id_v2(self): + # type: () -> unicode """Return the v2 id for the node.""" raise NotImplementedError(repr(self)) def get_name(self): + # type: () -> unicode """Return the name. Returns either `None` or a node with a name you might call @@ -538,10 +559,12 @@ class ASTBase(UnicodeMixin): raise NotImplementedError(repr(self)) def prefix_nested_name(self, prefix): + # type: (unicode) -> unicode """Prefix a name node (a node returned by :meth:`get_name`).""" raise NotImplementedError(repr(self)) def __unicode__(self): + # type: () -> unicode raise NotImplementedError(repr(self)) def __repr__(self): @@ -549,29 +572,35 @@ class ASTBase(UnicodeMixin): def _verify_description_mode(mode): + # type: (unicode) -> None if mode not in ('lastIsName', 'noneIsName', 'markType', 'param'): raise Exception("Description mode '%s' is invalid." % mode) class ASTCPPAttribute(ASTBase): def __init__(self, arg): + # type: (unicode) -> None self.arg = arg def __unicode__(self): + # type: () -> unicode return "[[" + self.arg + "]]" def describe_signature(self, signode): + # type: (addnodes.desc_signature) -> None txt = text_type(self) signode.append(nodes.Text(txt, txt)) class ASTGnuAttribute(ASTBase): def __init__(self, name, args): + # type: (unicode, Any) -> None self.name = name self.args = args def __unicode__(self): - res = [self.name] + # type: () -> unicode + res = [self.name] # type: List[unicode] if self.args: res.append('(') res.append(text_type(self.args)) @@ -581,10 +610,12 @@ class ASTGnuAttribute(ASTBase): class ASTGnuAttributeList(ASTBase): def __init__(self, attrs): + # type: (List[Any]) -> None self.attrs = attrs def __unicode__(self): - res = ['__attribute__(('] + # type: () -> unicode + res = ['__attribute__(('] # type: List[unicode] first = True for attr in self.attrs: if not first: @@ -595,6 +626,7 @@ class ASTGnuAttributeList(ASTBase): return ''.join(res) def describe_signature(self, signode): + # type: (addnodes.desc_signature) -> None txt = text_type(self) signode.append(nodes.Text(txt, txt)) @@ -603,12 +635,15 @@ class ASTIdAttribute(ASTBase): """For simple attributes defined by the user.""" def __init__(self, id): + # type: (unicode) -> None self.id = id def __unicode__(self): + # type: () -> unicode return self.id def describe_signature(self, signode): + # type: (addnodes.desc_signature) -> None signode.append(nodes.Text(self.id, self.id)) @@ -616,29 +651,35 @@ class ASTParenAttribute(ASTBase): """For paren attributes defined by the user.""" def __init__(self, id, arg): + # type: (unicode, unicode) -> None self.id = id self.arg = arg def __unicode__(self): + # type: () -> unicode return self.id + '(' + self.arg + ')' def describe_signature(self, signode): + # type: (addnodes.desc_signature) -> None txt = text_type(self) signode.append(nodes.Text(txt, txt)) class ASTIdentifier(ASTBase): def __init__(self, identifier): + # type: (unicode) -> None assert identifier is not None self.identifier = identifier def get_id_v1(self): + # type: () -> unicode if self.identifier == 'size_t': return 's' else: return self.identifier def get_id_v2(self): + # type: () -> unicode if self.identifier == "std": return 'St' elif self.identifier[0] == "~": @@ -648,9 +689,11 @@ class ASTIdentifier(ASTBase): return text_type(len(self.identifier)) + self.identifier def __unicode__(self): + # type: () -> unicode return self.identifier def describe_signature(self, signode, mode, env, prefix, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None _verify_description_mode(mode) if mode == 'markType': targetText = prefix + self.identifier @@ -673,6 +716,7 @@ class ASTIdentifier(ASTBase): class ASTTemplateKeyParamPackIdDefault(ASTBase): def __init__(self, key, identifier, parameterPack, default): + # type: (unicode, Any, bool, Any) -> None assert key if parameterPack: assert default is None @@ -682,9 +726,11 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase): self.default = default def get_identifier(self): + # type: () -> unicode return self.identifier def get_id_v2(self): + # type: () -> unicode # this is not part of the normal name mangling in C++ res = [] if self.parameterPack: @@ -694,7 +740,8 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase): return ''.join(res) def __unicode__(self): - res = [self.key] + # type: () -> unicode + res = [self.key] # type: List[unicode] if self.parameterPack: if self.identifier: res.append(' ') @@ -709,6 +756,7 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase): return ''.join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None signode += nodes.Text(self.key) if self.parameterPack: if self.identifier: @@ -725,18 +773,22 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase): class ASTTemplateParamType(ASTBase): def __init__(self, data): + # type: (Any) -> None assert data self.data = data @property def name(self): + # type: () -> ASTNestedName id = self.get_identifier() return ASTNestedName([ASTNestedNameElement(id, None)], rooted=False) def get_identifier(self): + # type: () -> unicode return self.data.get_identifier() def get_id_v2(self, objectType=None, symbol=None): + # type: (unicode, Symbol) -> unicode # this is not part of the normal name mangling in C++ if symbol: # the anchor will be our parent @@ -745,14 +797,17 @@ class ASTTemplateParamType(ASTBase): return self.data.get_id_v2() def __unicode__(self): + # type: () -> unicode return text_type(self.data) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None self.data.describe_signature(signode, mode, env, symbol) class ASTTemplateParamTemplateType(ASTBase): def __init__(self, nestedParams, data): + # type: (Any, Any) -> None assert nestedParams assert data self.nestedParams = nestedParams @@ -760,13 +815,16 @@ class ASTTemplateParamTemplateType(ASTBase): @property def name(self): + # type: () -> ASTNestedName id = self.get_identifier() return ASTNestedName([ASTNestedNameElement(id, None)], rooted=False) def get_identifier(self): + # type: () -> unicode return self.data.get_identifier() def get_id_v2(self, objectType=None, symbol=None): + # type: (unicode, Symbol) -> unicode # this is not part of the normal name mangling in C++ if symbol: # the anchor will be our parent @@ -775,9 +833,11 @@ class ASTTemplateParamTemplateType(ASTBase): return self.nestedParams.get_id_v2() + self.data.get_id_v2() def __unicode__(self): + # type: () -> unicode return text_type(self.nestedParams) + text_type(self.data) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None self.nestedParams.describe_signature(signode, 'noneIsName', env, symbol) signode += nodes.Text(' ') self.data.describe_signature(signode, mode, env, symbol) @@ -785,15 +845,18 @@ class ASTTemplateParamTemplateType(ASTBase): class ASTTemplateParamNonType(ASTBase): def __init__(self, param): + # type: (Any) -> None assert param self.param = param @property def name(self): + # type: () -> ASTNestedName id = self.get_identifier() return ASTNestedName([ASTNestedNameElement(id, None)], rooted=False) def get_identifier(self): + # type: () -> unicode name = self.param.name if name: assert len(name.names) == 1 @@ -804,6 +867,7 @@ class ASTTemplateParamNonType(ASTBase): return None def get_id_v2(self, objectType=None, symbol=None): + # type: (unicode, Symbol) -> unicode # this is not part of the normal name mangling in C++ if symbol: # the anchor will be our parent @@ -812,18 +876,22 @@ class ASTTemplateParamNonType(ASTBase): return '_' + self.param.get_id_v2() def __unicode__(self): + # type: () -> unicode return text_type(self.param) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None self.param.describe_signature(signode, mode, env, symbol) class ASTTemplateParams(ASTBase): def __init__(self, params): + # type: (Any) -> None assert params is not None self.params = params def get_id_v2(self): + # type: () -> unicode res = [] res.append("I") for param in self.params: @@ -832,6 +900,7 @@ class ASTTemplateParams(ASTBase): return ''.join(res) def __unicode__(self): + # type: () -> unicode res = [] res.append(u"template<") res.append(u", ".join(text_type(a) for a in self.params)) @@ -839,6 +908,7 @@ class ASTTemplateParams(ASTBase): return ''.join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None signode.sphinx_cpp_tagname = 'templateParams' signode += nodes.Text("template<") first = True @@ -852,13 +922,16 @@ class ASTTemplateParams(ASTBase): class ASTTemplateIntroductionParameter(ASTBase): def __init__(self, identifier, parameterPack): + # type: (Any, Any) -> None self.identifier = identifier self.parameterPack = parameterPack def get_identifier(self): + # type: () -> unicode return self.identifier def get_id_v2(self, objectType=None, symbol=None): + # type: (unicode, Symbol) -> unicode # this is not part of the normal name mangling in C++ if symbol: # the anchor will be our parent @@ -870,6 +943,7 @@ class ASTTemplateIntroductionParameter(ASTBase): return '0' # we need to put something def get_id_v2_as_arg(self): + # type: () -> unicode # used for the implicit requires clause res = self.identifier.get_id_v2() if self.parameterPack: @@ -878,13 +952,15 @@ class ASTTemplateIntroductionParameter(ASTBase): return res def __unicode__(self): - res = [] + # type: () -> unicode + res = [] # type: List[unicode] if self.parameterPack: res.append('...') res.append(text_type(self.identifier)) return ''.join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None if self.parameterPack: signode += nodes.Text('...') self.identifier.describe_signature(signode, mode, env, '', symbol) @@ -892,6 +968,7 @@ class ASTTemplateIntroductionParameter(ASTBase): class ASTTemplateIntroduction(ASTBase): def __init__(self, concept, params): + # type: (Any, List[Any]) -> None assert len(params) > 0 self.concept = concept self.params = params @@ -899,6 +976,7 @@ class ASTTemplateIntroduction(ASTBase): # id_v1 does not exist def get_id_v2(self): + # type: () -> unicode # first do the same as a normal template parameter list res = [] res.append("I") @@ -916,6 +994,7 @@ class ASTTemplateIntroduction(ASTBase): return ''.join(res) def __unicode__(self): + # type: () -> unicode res = [] res.append(text_type(self.concept)) res.append('{') @@ -924,6 +1003,7 @@ class ASTTemplateIntroduction(ASTBase): return ''.join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None signode.sphinx_cpp_tagname = 'templateIntroduction' self.concept.describe_signature(signode, 'markType', env, symbol) signode += nodes.Text('{') @@ -938,6 +1018,7 @@ class ASTTemplateIntroduction(ASTBase): class ASTTemplateDeclarationPrefix(ASTBase): def __init__(self, templates): + # type: (List[Any]) -> None assert templates is not None assert len(templates) > 0 self.templates = templates @@ -945,6 +1026,7 @@ class ASTTemplateDeclarationPrefix(ASTBase): # id_v1 does not exist def get_id_v2(self): + # type: () -> unicode # this is not part of a normal name mangling system res = [] for t in self.templates: @@ -952,12 +1034,14 @@ class ASTTemplateDeclarationPrefix(ASTBase): return u''.join(res) def __unicode__(self): + # type: () -> unicode res = [] for t in self.templates: res.append(text_type(t)) return u''.join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) for t in self.templates: templateNode = addnodes.desc_signature_line() @@ -967,30 +1051,36 @@ class ASTTemplateDeclarationPrefix(ASTBase): class ASTOperatorBuildIn(ASTBase): def __init__(self, op): + # type: (unicode) -> None self.op = op def is_operator(self): + # type: () -> bool return True def get_id_v1(self): + # type: () -> unicode if self.op not in _id_operator_v1: raise Exception('Internal error: Build-in operator "%s" can not ' 'be mapped to an id.' % self.op) return _id_operator_v1[self.op] def get_id_v2(self): + # type: () -> unicode if self.op not in _id_operator_v2: raise Exception('Internal error: Build-in operator "%s" can not ' 'be mapped to an id.' % self.op) return _id_operator_v2[self.op] def __unicode__(self): + # type: () -> unicode if self.op in ('new', 'new[]', 'delete', 'delete[]'): return u'operator ' + self.op else: return u'operator' + self.op def describe_signature(self, signode, mode, env, prefix, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None _verify_description_mode(mode) identifier = text_type(self) if mode == 'lastIsName': @@ -1001,24 +1091,31 @@ class ASTOperatorBuildIn(ASTBase): class ASTOperatorType(ASTBase): def __init__(self, type): + # type: (Any) -> None self.type = type def is_operator(self): + # type: () -> bool return True def get_id_v1(self): + # type: () -> unicode return u'castto-%s-operator' % self.type.get_id_v1() def get_id_v2(self): + # type: () -> unicode return u'cv' + self.type.get_id_v2() def __unicode__(self): + # type: () -> unicode return u''.join(['operator ', text_type(self.type)]) def get_name_no_template(self): + # type: () -> unicode return text_type(self) def describe_signature(self, signode, mode, env, prefix, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None _verify_description_mode(mode) identifier = text_type(self) if mode == 'lastIsName': @@ -1029,21 +1126,27 @@ class ASTOperatorType(ASTBase): class ASTOperatorLiteral(ASTBase): def __init__(self, identifier): + # type: (Any) -> None self.identifier = identifier def is_operator(self): + # type: () -> bool return True def get_id_v1(self): + # type: () -> unicode raise NoOldIdError() def get_id_v2(self): + # type: () -> unicode return u'li' + self.identifier.get_id_v2() def __unicode__(self): + # type: () -> unicode return u'operator""' + text_type(self.identifier) def describe_signature(self, signode, mode, env, prefix, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None _verify_description_mode(mode) identifier = text_type(self) if mode == 'lastIsName': @@ -1054,38 +1157,46 @@ class ASTOperatorLiteral(ASTBase): class ASTTemplateArgConstant(ASTBase): def __init__(self, value): + # type: (Any) -> None self.value = value def __unicode__(self): + # type: () -> unicode return text_type(self.value) def get_id_v1(self): + # type: () -> unicode return text_type(self).replace(u' ', u'-') def get_id_v2(self): + # type: () -> unicode # TODO: doing this properly needs parsing of expressions, let's just # juse it verbatim for now return u'X' + text_type(self) + u'E' def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) signode += nodes.Text(text_type(self)) class ASTTemplateArgs(ASTBase): def __init__(self, args): + # type: (List[Any]) -> None assert args is not None assert len(args) > 0 self.args = args def get_id_v1(self): - res = [] + # type: () -> unicode + res = [] # type: List[unicode] res.append(':') res.append(u'.'.join(a.get_id_v1() for a in self.args)) res.append(':') return u''.join(res) def get_id_v2(self): + # type: () -> unicode res = [] res.append('I') for a in self.args: @@ -1094,10 +1205,12 @@ class ASTTemplateArgs(ASTBase): return u''.join(res) def __unicode__(self): + # type: () -> unicode res = ', '.join(text_type(a) for a in self.args) return '<' + res + '>' def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) signode += nodes.Text('<') first = True @@ -1111,31 +1224,37 @@ class ASTTemplateArgs(ASTBase): class ASTNestedNameElement(ASTBase): def __init__(self, identifier, templateArgs): + # type: (Any, Any) -> None self.identifier = identifier self.templateArgs = templateArgs def is_operator(self): + # type: () -> bool return False def get_id_v1(self): + # type: () -> unicode res = self.identifier.get_id_v1() if self.templateArgs: res += self.templateArgs.get_id_v1() return res def get_id_v2(self): + # type: () -> unicode res = self.identifier.get_id_v2() if self.templateArgs: res += self.templateArgs.get_id_v2() return res def __unicode__(self): + # type: () -> unicode res = text_type(self.identifier) if self.templateArgs: res += text_type(self.templateArgs) return res def describe_signature(self, signode, mode, env, prefix, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None self.identifier.describe_signature(signode, mode, env, prefix, symbol) if self.templateArgs: self.templateArgs.describe_signature(signode, mode, env, symbol) @@ -1143,15 +1262,18 @@ class ASTNestedNameElement(ASTBase): class ASTNestedName(ASTBase): def __init__(self, names, rooted): + # type: (List[Any], bool) -> None assert len(names) > 0 self.names = names self.rooted = rooted @property def name(self): + # type: () -> ASTNestedName return self def num_templates(self): + # type: () -> int count = 0 for n in self.names: if n.is_operator(): @@ -1161,6 +1283,7 @@ class ASTNestedName(ASTBase): return count def get_id_v1(self): + # type: () -> unicode tt = text_type(self) if tt in _id_shorthands_v1: return _id_shorthands_v1[tt] @@ -1168,7 +1291,8 @@ class ASTNestedName(ASTBase): return u'::'.join(n.get_id_v1() for n in self.names) def get_id_v2(self, modifiers=""): - res = [] + # type: (unicode) -> unicode + res = [] # type: List[unicode] if len(self.names) > 1 or len(modifiers) > 0: res.append('N') res.append(modifiers) @@ -1179,7 +1303,8 @@ class ASTNestedName(ASTBase): return u''.join(res) def __unicode__(self): - res = [] + # type: () -> unicode + res = [] # type: List[unicode] if self.rooted: res.append('') for n in self.names: @@ -1187,15 +1312,16 @@ class ASTNestedName(ASTBase): return '::'.join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) # just print the name part, with template args, not template params if mode == 'lastIsName': - addname = [] + addname = [] # type: List[unicode] if self.rooted: addname.append('') for n in self.names[:-1]: addname.append(text_type(n)) - addname = '::'.join(addname) + addname = '::'.join(addname) # type: ignore if len(self.names) > 1: addname += '::' signode += addnodes.desc_addname(addname, addname) @@ -1209,7 +1335,7 @@ class ASTNestedName(ASTBase): # each element should be a pending xref targeting the complete # prefix. however, only the identifier part should be a link, such # that template args can be a link as well. - prefix = '' + prefix = '' # type: unicode first = True for name in self.names: if not first: @@ -1217,7 +1343,7 @@ class ASTNestedName(ASTBase): prefix += '::' first = False if name != '': - name.describe_signature(signode, mode, env, prefix, symbol) + name.describe_signature(signode, mode, env, prefix, symbol) # type: ignore prefix += text_type(name) else: raise Exception('Unknown description mode: %s' % mode) @@ -1225,12 +1351,15 @@ class ASTNestedName(ASTBase): class ASTTrailingTypeSpecFundamental(ASTBase): def __init__(self, name): + # type: (unicode) -> None self.name = name def __unicode__(self): + # type: () -> unicode return self.name def get_id_v1(self): + # type: () -> unicode res = [] for a in self.name.split(' '): if a in _id_fundamental_v1: @@ -1240,6 +1369,7 @@ class ASTTrailingTypeSpecFundamental(ASTBase): return u'-'.join(res) def get_id_v2(self): + # type: () -> unicode if self.name not in _id_fundamental_v2: raise Exception( 'Semi-internal error: Fundamental type "%s" can not be mapped ' @@ -1248,26 +1378,32 @@ class ASTTrailingTypeSpecFundamental(ASTBase): return _id_fundamental_v2[self.name] def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None signode += nodes.Text(text_type(self.name)) class ASTTrailingTypeSpecName(ASTBase): def __init__(self, prefix, nestedName): + # type: (unicode, Any) -> None self.prefix = prefix self.nestedName = nestedName @property def name(self): + # type: () -> Any return self.nestedName def get_id_v1(self): + # type: () -> unicode return self.nestedName.get_id_v1() def get_id_v2(self): + # type: () -> unicode return self.nestedName.get_id_v2() def __unicode__(self): - res = [] + # type: () -> unicode + res = [] # type: List[unicode] if self.prefix: res.append(self.prefix) res.append(' ') @@ -1275,6 +1411,7 @@ class ASTTrailingTypeSpecName(ASTBase): return u''.join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None if self.prefix: signode += addnodes.desc_annotation(self.prefix, self.prefix) signode += nodes.Text(' ') @@ -1283,28 +1420,33 @@ class ASTTrailingTypeSpecName(ASTBase): class ASTFunctinoParameter(ASTBase): def __init__(self, arg, ellipsis=False): + # type: (Any, bool) -> None self.arg = arg self.ellipsis = ellipsis def get_id_v1(self): + # type: () -> unicode if self.ellipsis: return 'z' else: return self.arg.get_id_v1() def get_id_v2(self): + # type: () -> unicode if self.ellipsis: return 'z' else: return self.arg.get_id_v2() def __unicode__(self): + # type: () -> unicode if self.ellipsis: return '...' else: return text_type(self.arg) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) if self.ellipsis: signode += nodes.Text('...') @@ -1315,6 +1457,7 @@ class ASTFunctinoParameter(ASTBase): class ASTParametersQualifiers(ASTBase): def __init__(self, args, volatile, const, refQual, exceptionSpec, override, final, initializer): + # type: (List[Any], bool, bool, unicode, unicode, bool, bool, unicode) -> None self.args = args self.volatile = volatile self.const = const @@ -1327,6 +1470,7 @@ class ASTParametersQualifiers(ASTBase): # Id v1 ------------------------------------------------------------------ def get_modifiers_id_v1(self): + # type: () -> unicode res = [] if self.volatile: res.append('V') @@ -1339,6 +1483,7 @@ class ASTParametersQualifiers(ASTBase): return u''.join(res) def get_param_id_v1(self): + # type: () -> unicode if len(self.args) == 0: return '' else: @@ -1347,6 +1492,7 @@ class ASTParametersQualifiers(ASTBase): # Id v2 ------------------------------------------------------------------ def get_modifiers_id_v2(self): + # type: () -> unicode res = [] if self.volatile: res.append('V') @@ -1359,13 +1505,15 @@ class ASTParametersQualifiers(ASTBase): return u''.join(res) def get_param_id_v2(self): + # type: () -> unicode if len(self.args) == 0: return 'v' else: return u''.join(a.get_id_v2() for a in self.args) def __unicode__(self): - res = [] + # type: () -> unicode + res = [] # type: List[unicode] res.append('(') first = True for a in self.args: @@ -1394,6 +1542,7 @@ class ASTParametersQualifiers(ASTBase): return u''.join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) paramlist = addnodes.desc_parameterlist() for arg in self.args: @@ -1431,6 +1580,7 @@ class ASTParametersQualifiers(ASTBase): class ASTDeclSpecsSimple(ASTBase): def __init__(self, storage, threadLocal, inline, virtual, explicit, constexpr, volatile, const, friend, attrs): + # type: (unicode, bool, bool, bool, bool, bool, bool, bool, bool, List[Any]) -> None self.storage = storage self.threadLocal = threadLocal self.inline = inline @@ -1443,6 +1593,7 @@ class ASTDeclSpecsSimple(ASTBase): self.attrs = attrs def mergeWith(self, other): + # type: (ASTDeclSpecsSimple) -> ASTDeclSpecsSimple if not other: return self return ASTDeclSpecsSimple(self.storage or other.storage, @@ -1457,7 +1608,8 @@ class ASTDeclSpecsSimple(ASTBase): self.attrs + other.attrs) def __unicode__(self): - res = [] + # type: () -> unicode + res = [] # type: List[unicode] res.extend(text_type(attr) for attr in self.attrs) if self.storage: res.append(self.storage) @@ -1480,6 +1632,7 @@ class ASTDeclSpecsSimple(ASTBase): return u' '.join(res) def describe_signature(self, modifiers): + # type: (List[nodes.Node]) -> None def _add(modifiers, text): if len(modifiers) > 0: modifiers.append(nodes.Text(' ')) @@ -1520,9 +1673,11 @@ class ASTDeclSpecs(ASTBase): @property def name(self): + # type: () -> unicode return self.trailingTypeSpec.name def get_id_v1(self): + # type: () -> unicode res = [] res.append(self.trailingTypeSpec.get_id_v1()) if self.allSpecs.volatile: @@ -1532,6 +1687,7 @@ class ASTDeclSpecs(ASTBase): return u''.join(res) def get_id_v2(self): + # type: () -> unicode res = [] if self.leftSpecs.volatile or self.rightSpecs.volatile: res.append('V') @@ -1541,7 +1697,8 @@ class ASTDeclSpecs(ASTBase): return u''.join(res) def __unicode__(self): - res = [] + # type: () -> unicode + res = [] # type: List[unicode] l = text_type(self.leftSpecs) if len(l) > 0: if len(res) > 0: @@ -1559,8 +1716,9 @@ class ASTDeclSpecs(ASTBase): return "".join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) - modifiers = [] + modifiers = [] # type: List[nodes.Node] def _add(modifiers, text): if len(modifiers) > 0: @@ -1586,15 +1744,19 @@ class ASTDeclSpecs(ASTBase): class ASTArray(ASTBase): def __init__(self, size): + # type: (unicode) -> None self.size = size def __unicode__(self): + # type: () -> unicode return u''.join(['[', text_type(self.size), ']']) def get_id_v1(self): + # type: () -> unicode return u'A' def get_id_v2(self): + # type: () -> unicode # TODO: this should maybe be done differently return u'A' + text_type(self.size) + u'_' @@ -1605,6 +1767,7 @@ class ASTArray(ASTBase): class ASTDeclaratorPtr(ASTBase): def __init__(self, next, volatile, const): + # type: (Any, bool, bool) -> None assert next self.next = next self.volatile = volatile @@ -1612,14 +1775,17 @@ class ASTDeclaratorPtr(ASTBase): @property def name(self): + # type: () -> unicode return self.next.name def require_space_after_declSpecs(self): + # type: () -> bool # TODO: if has paramPack, then False ? return True def __unicode__(self): - res = ['*'] + # type: () -> unicode + res = ['*'] # type: List[unicode] if self.volatile: res.append('volatile') if self.const: @@ -1635,12 +1801,15 @@ class ASTDeclaratorPtr(ASTBase): # Id v1 ------------------------------------------------------------------ def get_modifiers_id_v1(self): + # type: () -> unicode return self.next.get_modifiers_id_v1() def get_param_id_v1(self): + # type: () -> unicode return self.next.get_param_id_v1() def get_ptr_suffix_id_v1(self): + # type: () -> unicode res = 'P' if self.volatile: res += 'V' @@ -1651,13 +1820,16 @@ class ASTDeclaratorPtr(ASTBase): # Id v2 ------------------------------------------------------------------ def get_modifiers_id_v2(self): + # type: () -> unicode return self.next.get_modifiers_id_v2() def get_param_id_v2(self): + # type: () -> unicode return self.next.get_param_id_v2() def get_ptr_suffix_id_v2(self): - res = [self.next.get_ptr_suffix_id_v2()] + # type: () -> unicode + res = [self.next.get_ptr_suffix_id_v2()] # type: List[unicode] res.append('P') if self.volatile: res.append('V') @@ -1666,8 +1838,9 @@ class ASTDeclaratorPtr(ASTBase): return u''.join(res) def get_type_id_v2(self, returnTypeId): + # type: (unicode) -> unicode # ReturnType *next, so we are part of the return type of 'next - res = ['P'] + res = ['P'] # type: List[unicode] if self.volatile: res.append('V') if self.const: @@ -1678,9 +1851,11 @@ class ASTDeclaratorPtr(ASTBase): # ------------------------------------------------------------------------ def is_function_type(self): + # type: () -> bool return self.next.is_function_type() def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) signode += nodes.Text("*") @@ -1700,51 +1875,64 @@ class ASTDeclaratorPtr(ASTBase): class ASTDeclaratorRef(ASTBase): def __init__(self, next): + # type: (Any) -> None assert next self.next = next @property def name(self): + # type: () -> unicode return self.next.name def require_space_after_declSpecs(self): + # type: () -> bool return self.next.require_space_after_declSpecs() def __unicode__(self): + # type: () -> unicode return '&' + text_type(self.next) # Id v1 ------------------------------------------------------------------ def get_modifiers_id_v1(self): + # type: () -> unicode return self.next.get_modifiers_id_v1() def get_param_id_v1(self): # only the parameters (if any) + # type: () -> unicode return self.next.get_param_id_v1() def get_ptr_suffix_id_v1(self): + # type: () -> unicode return u'R' + self.next.get_ptr_suffix_id_v1() # Id v2 ------------------------------------------------------------------ def get_modifiers_id_v2(self): + # type: () -> unicode return self.next.get_modifiers_id_v2() def get_param_id_v2(self): # only the parameters (if any) + # type: () -> unicode return self.next.get_param_id_v2() def get_ptr_suffix_id_v2(self): + # type: () -> unicode return self.next.get_ptr_suffix_id_v2() + u'R' def get_type_id_v2(self, returnTypeId): + # type: (unicode) -> unicode # ReturnType &next, so we are part of the return type of 'next return self.next.get_type_id_v2(returnTypeId=u'R' + returnTypeId) # ------------------------------------------------------------------------ def is_function_type(self): + # type: () -> bool return self.next.is_function_type() def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) signode += nodes.Text("&") self.next.describe_signature(signode, mode, env, symbol) @@ -1752,17 +1940,21 @@ class ASTDeclaratorRef(ASTBase): class ASTDeclaratorParamPack(ASTBase): def __init__(self, next): + # type: (Any) -> None assert next self.next = next @property def name(self): + # type: () -> unicode return self.next.name def require_space_after_declSpecs(self): + # type: () -> bool return False def __unicode__(self): + # type: () -> unicode res = text_type(self.next) if self.next.name: res = ' ' + res @@ -1771,35 +1963,43 @@ class ASTDeclaratorParamPack(ASTBase): # Id v1 ------------------------------------------------------------------ def get_modifiers_id_v1(self): + # type: () -> unicode return self.next.get_modifiers_id_v1() def get_param_id_v1(self): # only the parameters (if any) + # type: () -> unicode return self.next.get_param_id_v1() def get_ptr_suffix_id_v1(self): + # type: () -> unicode return 'Dp' + self.next.get_ptr_suffix_id_v2() # Id v2 ------------------------------------------------------------------ def get_modifiers_id_v2(self): + # type: () -> unicode return self.next.get_modifiers_id_v2() def get_param_id_v2(self): # only the parameters (if any) return self.next.get_param_id_v2() def get_ptr_suffix_id_v2(self): + # type: () -> unicode return self.next.get_ptr_suffix_id_v2() + u'Dp' def get_type_id_v2(self, returnTypeId): + # type: (unicode) -> unicode # ReturnType... next, so we are part of the return type of 'next return self.next.get_type_id_v2(returnTypeId=u'Dp' + returnTypeId) # ------------------------------------------------------------------------ def is_function_type(self): + # type: () -> bool return self.next.is_function_type() def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) signode += nodes.Text("...") if self.next.name: @@ -1809,6 +2009,7 @@ class ASTDeclaratorParamPack(ASTBase): class ASTDeclaratorMemPtr(ASTBase): def __init__(self, className, const, volatile, next): + # type: (Any, bool, bool, Any) -> None assert className assert next self.className = className @@ -1818,12 +2019,15 @@ class ASTDeclaratorMemPtr(ASTBase): @property def name(self): + # type: () -> unicode return self.next.name def require_space_after_declSpecs(self): + # type: () -> bool return True def __unicode__(self): + # type: () -> unicode res = [] res.append(text_type(self.className)) res.append('::*') @@ -1839,29 +2043,36 @@ class ASTDeclaratorMemPtr(ASTBase): # Id v1 ------------------------------------------------------------------ def get_modifiers_id_v1(self): + # type: () -> unicode raise NoOldIdError() def get_param_id_v1(self): # only the parameters (if any) + # type: () -> unicode raise NoOldIdError() def get_ptr_suffix_id_v1(self): + # type: () -> unicode raise NoOldIdError() # Id v2 ------------------------------------------------------------------ def get_modifiers_id_v2(self): + # type: () -> unicode return self.next.get_modifiers_id_v2() def get_param_id_v2(self): # only the parameters (if any) + # type: () -> unicode return self.next.get_param_id_v2() def get_ptr_suffix_id_v2(self): + # type: () -> unicode raise NotImplementedError() return self.next.get_ptr_suffix_id_v2() + u'Dp' def get_type_id_v2(self, returnTypeId): + # type: (unicode) -> unicode # ReturnType name::* next, so we are part of the return type of next - nextReturnTypeId = '' + nextReturnTypeId = '' # type: unicode if self.volatile: nextReturnTypeId += 'V' if self.const: @@ -1874,9 +2085,11 @@ class ASTDeclaratorMemPtr(ASTBase): # ------------------------------------------------------------------------ def is_function_type(self): + # type: () -> bool return self.next.is_function_type() def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) self.className.describe_signature(signode, mode, env, symbol) signode += nodes.Text('::*') @@ -1897,6 +2110,7 @@ class ASTDeclaratorMemPtr(ASTBase): class ASTDeclaratorParen(ASTBase): def __init__(self, inner, next): + # type: (Any, Any) -> None assert inner assert next self.inner = inner @@ -1905,13 +2119,16 @@ class ASTDeclaratorParen(ASTBase): @property def name(self): + # type: () -> unicode return self.inner.name def require_space_after_declSpecs(self): + # type: () -> bool return True def __unicode__(self): - res = ['('] + # type: () -> unicode + res = ['('] # type: List[unicode] res.append(text_type(self.inner)) res.append(')') res.append(text_type(self.next)) @@ -1920,12 +2137,15 @@ class ASTDeclaratorParen(ASTBase): # Id v1 ------------------------------------------------------------------ def get_modifiers_id_v1(self): + # type: () -> unicode return self.inner.get_modifiers_id_v1() def get_param_id_v1(self): # only the parameters (if any) + # type: () -> unicode return self.inner.get_param_id_v1() def get_ptr_suffix_id_v1(self): + # type: () -> unicode raise NoOldIdError() # TODO: was this implemented before? return self.next.get_ptr_suffix_id_v2() + \ self.inner.get_ptr_suffix_id_v2() @@ -1933,16 +2153,20 @@ class ASTDeclaratorParen(ASTBase): # Id v2 ------------------------------------------------------------------ def get_modifiers_id_v2(self): + # type: () -> unicode return self.inner.get_modifiers_id_v2() def get_param_id_v2(self): # only the parameters (if any) + # type: () -> unicode return self.inner.get_param_id_v2() def get_ptr_suffix_id_v2(self): + # type: () -> unicode return self.inner.get_ptr_suffix_id_v2() + \ self.next.get_ptr_suffix_id_v2() def get_type_id_v2(self, returnTypeId): + # type: (unicode) -> unicode # ReturnType (inner)next, so 'inner' returns everything outside nextId = self.next.get_type_id_v2(returnTypeId) return self.inner.get_type_id_v2(returnTypeId=nextId) @@ -1950,9 +2174,11 @@ class ASTDeclaratorParen(ASTBase): # ------------------------------------------------------------------------ def is_function_type(self): + # type: () -> bool return self.inner.is_function_type() def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) signode += nodes.Text('(') self.inner.describe_signature(signode, mode, env, symbol) @@ -1962,17 +2188,20 @@ class ASTDeclaratorParen(ASTBase): class ASTDecleratorNameParamQual(ASTBase): def __init__(self, declId, arrayOps, paramQual): + # type: (Any, List[Any], Any) -> None self.declId = declId self.arrayOps = arrayOps self.paramQual = paramQual @property def name(self): + # type: () -> unicode return self.declId # Id v1 ------------------------------------------------------------------ def get_modifiers_id_v1(self): # only the modifiers for a function, e.g., + # type: () -> unicode # cv-qualifiers if self.paramQual: return self.paramQual.get_modifiers_id_v1() @@ -1980,17 +2209,20 @@ class ASTDecleratorNameParamQual(ASTBase): "This should only be called on a function: %s" % text_type(self)) def get_param_id_v1(self): # only the parameters (if any) + # type: () -> unicode if self.paramQual: return self.paramQual.get_param_id_v1() else: return '' def get_ptr_suffix_id_v1(self): # only the array specifiers + # type: () -> unicode return u''.join(a.get_id_v1() for a in self.arrayOps) # Id v2 ------------------------------------------------------------------ def get_modifiers_id_v2(self): # only the modifiers for a function, e.g., + # type: () -> unicode # cv-qualifiers if self.paramQual: return self.paramQual.get_modifiers_id_v2() @@ -1998,15 +2230,18 @@ class ASTDecleratorNameParamQual(ASTBase): "This should only be called on a function: %s" % text_type(self)) def get_param_id_v2(self): # only the parameters (if any) + # type: () -> unicode if self.paramQual: return self.paramQual.get_param_id_v2() else: return '' def get_ptr_suffix_id_v2(self): # only the array specifiers + # type: () -> unicode return u''.join(a.get_id_v2() for a in self.arrayOps) def get_type_id_v2(self, returnTypeId): + # type: (unicode) -> unicode res = [] # TOOD: can we actually have both array ops and paramQual? res.append(self.get_ptr_suffix_id_v2()) @@ -2023,12 +2258,15 @@ class ASTDecleratorNameParamQual(ASTBase): # ------------------------------------------------------------------------ def require_space_after_declSpecs(self): + # type: () -> bool return self.declId is not None def is_function_type(self): + # type: () -> bool return self.paramQual is not None def __unicode__(self): + # type: () -> unicode res = [] if self.declId: res.append(text_type(self.declId)) @@ -2039,6 +2277,7 @@ class ASTDecleratorNameParamQual(ASTBase): return u''.join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) if self.declId: self.declId.describe_signature(signode, mode, env, symbol) @@ -2050,18 +2289,22 @@ class ASTDecleratorNameParamQual(ASTBase): class ASTInitializer(ASTBase): def __init__(self, value): + # type: (unicode) -> None self.value = value def __unicode__(self): + # type: () -> unicode return u''.join([' = ', text_type(self.value)]) def describe_signature(self, signode, mode): + # type: (addnodes.desc_signature, unicode) -> None _verify_description_mode(mode) signode += nodes.Text(text_type(self)) class ASTType(ASTBase): def __init__(self, declSpecs, decl): + # type: (Any, Any) -> None assert declSpecs assert decl self.declSpecs = declSpecs @@ -2069,10 +2312,12 @@ class ASTType(ASTBase): @property def name(self): + # type: () -> unicode name = self.decl.name return name def get_id_v1(self, objectType=None, symbol=None): + # type: (unicode, Symbol) -> unicode res = [] if objectType: # needs the name if objectType == 'function': # also modifiers @@ -2097,6 +2342,7 @@ class ASTType(ASTBase): return u''.join(res) def get_id_v2(self, objectType=None, symbol=None): + # type: (unicode, Symbol) -> unicode res = [] if objectType: # needs the name if objectType == 'function': # also modifiers @@ -2117,6 +2363,7 @@ class ASTType(ASTBase): return u''.join(res) def __unicode__(self): + # type: () -> unicode res = [] declSpecs = text_type(self.declSpecs) res.append(declSpecs) @@ -2126,12 +2373,14 @@ class ASTType(ASTBase): return u''.join(res) def get_type_declaration_prefix(self): + # type: () -> unicode if self.declSpecs.trailingTypeSpec: return 'typedef' else: return 'type' def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) self.declSpecs.describe_signature(signode, 'markType', env, symbol) if (self.decl.require_space_after_declSpecs() and @@ -2142,14 +2391,17 @@ class ASTType(ASTBase): class ASTTypeWithInit(ASTBase): def __init__(self, type, init): + # type: (Any, Any) -> None self.type = type self.init = init @property def name(self): + # type: () -> unicode return self.type.name def get_id_v1(self, objectType=None, symbol=None): + # type: (unicode, Symbol) -> unicode if objectType == 'member': return symbol.get_full_nested_name().get_id_v1() + u'__' \ + self.type.get_id_v1() @@ -2157,12 +2409,14 @@ class ASTTypeWithInit(ASTBase): return self.type.get_id_v1(objectType) def get_id_v2(self, objectType=None, symbol=None): + # type: (unicode, Symbol) -> unicode if objectType == 'member': return symbol.get_full_nested_name().get_id_v2() else: return self.type.get_id_v2() def __unicode__(self): + # type: () -> unicode res = [] res.append(text_type(self.type)) if self.init: @@ -2170,6 +2424,7 @@ class ASTTypeWithInit(ASTBase): return u''.join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) self.type.describe_signature(signode, mode, env, symbol=symbol) if self.init: @@ -2178,16 +2433,20 @@ class ASTTypeWithInit(ASTBase): class ASTTypeUsing(ASTBase): def __init__(self, name, type): + # type: (Any, Any) -> None self.name = name self.type = type def get_id_v1(self, objectType=None, symbol=None): + # type: (unicode, Symbol) -> unicode raise NoOldIdError() def get_id_v2(self, objectType=None, symbol=None): + # type: (unicode, Symbol) -> unicode return symbol.get_full_nested_name().get_id_v2() def __unicode__(self): + # type: () -> unicode res = [] res.append(text_type(self.name)) if self.type: @@ -2196,9 +2455,11 @@ class ASTTypeUsing(ASTBase): return u''.join(res) def get_type_declaration_prefix(self): + # type: () -> unicode return 'using' def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) self.name.describe_signature(signode, mode, env, symbol=symbol) if self.type: @@ -2208,21 +2469,26 @@ class ASTTypeUsing(ASTBase): class ASTConcept(ASTBase): def __init__(self, nestedName, isFunction, initializer): + # type: (Any, bool, Any) -> None self.nestedName = nestedName self.isFunction = isFunction # otherwise it's a variable concept self.initializer = initializer @property def name(self): + # type: () -> unicode return self.nestedName def get_id_v1(self, objectType=None, symbol=None): + # type: (unicode, Symbol) -> unicode raise NoOldIdError() - def get_id_v2(self, objectType, symbol): + def get_id_v2(self, objectType, symbol): # type: ignore + # type: (unicode, Symbol) -> unicode return symbol.get_full_nested_name().get_id_v2() def __unicode__(self): + # type: () -> unicode res = text_type(self.nestedName) if self.isFunction: res += "()" @@ -2231,6 +2497,7 @@ class ASTConcept(ASTBase): return res def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None signode += nodes.Text(text_type("bool ")) self.nestedName.describe_signature(signode, mode, env, symbol) if self.isFunction: @@ -2241,13 +2508,15 @@ class ASTConcept(ASTBase): class ASTBaseClass(ASTBase): def __init__(self, name, visibility, virtual, pack): + # type: (Any, unicode, bool, bool) -> None self.name = name self.visibility = visibility self.virtual = virtual self.pack = pack def __unicode__(self): - res = [] + # type: () -> unicode + res = [] # type: List[unicode] if self.visibility != 'private': res.append(self.visibility) res.append(' ') @@ -2259,6 +2528,7 @@ class ASTBaseClass(ASTBase): return u''.join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) if self.visibility != 'private': signode += addnodes.desc_annotation(self.visibility, @@ -2274,17 +2544,21 @@ class ASTBaseClass(ASTBase): class ASTClass(ASTBase): def __init__(self, name, final, bases): + # type: (Any, bool, List[Any]) -> None self.name = name self.final = final self.bases = bases - def get_id_v1(self, objectType, symbol): + def get_id_v1(self, objectType, symbol): # type: ignore + # type: (unicode, Symbol) -> unicode return symbol.get_full_nested_name().get_id_v1() - def get_id_v2(self, objectType, symbol): + def get_id_v2(self, objectType, symbol): # type: ignore + # type: (unicode, Symbol) -> unicode return symbol.get_full_nested_name().get_id_v2() def __unicode__(self): + # type: () -> unicode res = [] res.append(text_type(self.name)) if self.final: @@ -2300,6 +2574,7 @@ class ASTClass(ASTBase): return u''.join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) self.name.describe_signature(signode, mode, env, symbol=symbol) if self.final: @@ -2315,18 +2590,22 @@ class ASTClass(ASTBase): class ASTEnum(ASTBase): def __init__(self, name, scoped, underlyingType): + # type: (Any, unicode, Any) -> None self.name = name self.scoped = scoped self.underlyingType = underlyingType - def get_id_v1(self, objectType, symbol): + def get_id_v1(self, objectType, symbol): # type: ignore + # type: (unicode, Symbol) -> unicode raise NoOldIdError() - def get_id_v2(self, objectType, symbol): + def get_id_v2(self, objectType, symbol): # type: ignore + # type: (unicode, Symbol) -> unicode return symbol.get_full_nested_name().get_id_v2() def __unicode__(self): - res = [] + # type: () -> unicode + res = [] # type: List[unicode] if self.scoped: res.append(self.scoped) res.append(' ') @@ -2337,6 +2616,7 @@ class ASTEnum(ASTBase): return u''.join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) # self.scoped has been done by the CPPEnumObject self.name.describe_signature(signode, mode, env, symbol=symbol) @@ -2348,16 +2628,20 @@ class ASTEnum(ASTBase): class ASTEnumerator(ASTBase): def __init__(self, name, init): + # type: (Any, Any) -> None self.name = name self.init = init - def get_id_v1(self, objectType, symbol): + def get_id_v1(self, objectType, symbol): # type: ignore + # type: (unicode, Symbol) -> unicode raise NoOldIdError() - def get_id_v2(self, objectType, symbol): + def get_id_v2(self, objectType, symbol): # type: ignore + # type: (unicode, Symbol) -> unicode return symbol.get_full_nested_name().get_id_v2() def __unicode__(self): + # type: () -> unicode res = [] res.append(text_type(self.name)) if self.init: @@ -2365,6 +2649,7 @@ class ASTEnumerator(ASTBase): return u''.join(res) def describe_signature(self, signode, mode, env, symbol): + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) self.name.describe_signature(signode, mode, env, symbol=symbol) if self.init: @@ -2373,16 +2658,18 @@ class ASTEnumerator(ASTBase): class ASTDeclaration(ASTBase): def __init__(self, objectType, visibility, templatePrefix, declaration): + # type: (unicode, unicode, Any, Any) -> None self.objectType = objectType self.visibility = visibility self.templatePrefix = templatePrefix self.declaration = declaration - self.symbol = None + self.symbol = None # type: Symbol # set by CPPObject._add_enumerator_to_parent - self.enumeratorScopedSymbol = None + self.enumeratorScopedSymbol = None # type: Any def clone(self): + # type: () -> ASTDeclaration if self.templatePrefix: templatePrefixClone = self.templatePrefix.clone() else: @@ -2393,9 +2680,11 @@ class ASTDeclaration(ASTBase): @property def name(self): + # type: () -> unicode return self.declaration.name def get_id_v1(self): + # type: () -> unicode if self.templatePrefix: raise NoOldIdError() if self.objectType == 'enumerator' and self.enumeratorScopedSymbol: @@ -2403,6 +2692,7 @@ class ASTDeclaration(ASTBase): return self.declaration.get_id_v1(self.objectType, self.symbol) def get_id_v2(self, prefixed=True): + # type: (bool) -> unicode if self.objectType == 'enumerator' and self.enumeratorScopedSymbol: return self.enumeratorScopedSymbol.declaration.get_id_v2(prefixed) if prefixed: @@ -2415,10 +2705,12 @@ class ASTDeclaration(ASTBase): return u''.join(res) def get_newest_id(self): + # type: () -> unicode return self.get_id_v2() def __unicode__(self): - res = [] + # type: () -> unicode + res = [] # type: List[unicode] if self.visibility and self.visibility != "public": res.append(self.visibility) res.append(u' ') @@ -2428,6 +2720,7 @@ class ASTDeclaration(ASTBase): return u''.join(res) def describe_signature(self, signode, mode, env): + # type: (addnodes.desc_signature, unicode, BuildEnvironment) -> None _verify_description_mode(mode) # The caller of the domain added a desc_signature node. # Always enable multiline: @@ -2459,8 +2752,8 @@ class ASTDeclaration(ASTBase): mainDeclNode += addnodes.desc_annotation('class ', 'class ') elif self.objectType == 'enum': prefix = 'enum ' - if self.scoped: - prefix += self.scoped + if self.scoped: # type: ignore + prefix += self.scoped # type: ignore prefix += ' ' mainDeclNode += addnodes.desc_annotation(prefix, prefix) elif self.objectType == 'enumerator': @@ -2473,12 +2766,14 @@ class ASTDeclaration(ASTBase): class ASTNamespace(ASTBase): def __init__(self, nestedName, templatePrefix): + # type: (Any, Any) -> None self.nestedName = nestedName self.templatePrefix = templatePrefix class Symbol(object): def _assert_invariants(self): + # type: () -> None if not self.parent: # parent == None means global scope, so declaration means a parent assert not self.identifier @@ -2495,6 +2790,7 @@ class Symbol(object): def __init__(self, parent, identifier, templateParams, templateArgs, declaration, docname): + # type: (Any, Any, Any, Any, Any, unicode) -> None self.parent = parent self.identifier = identifier self.templateParams = templateParams # template @@ -2503,7 +2799,7 @@ class Symbol(object): self.docname = docname self._assert_invariants() - self.children = [] + self.children = [] # type: List[Any] if self.parent: self.parent.children.append(self) if self.declaration: @@ -2524,6 +2820,7 @@ class Symbol(object): self._add_symbols(nn, [], decl, docname) def _fill_empty(self, declaration, docname): + # type: (Any, unicode) -> None self._assert_invariants() assert not self.declaration assert not self.docname @@ -2535,6 +2832,7 @@ class Symbol(object): self._assert_invariants() def clear_doc(self, docname): + # type: (unicode) -> None newChildren = [] for sChild in self.children: sChild.clear_doc(docname) @@ -2550,12 +2848,14 @@ class Symbol(object): self.children = newChildren def get_all_symbols(self): + # type: () -> Iterator[Any] yield self for sChild in self.children: for s in sChild.get_all_symbols(): yield s def get_lookup_key(self): + # type: () -> List[Tuple[ASTNestedNameElement, Any]] if not self.parent: # specialise for the root return None @@ -2576,6 +2876,7 @@ class Symbol(object): return key def get_full_nested_name(self): + # type: () -> ASTNestedName names = [] for nne, templateParams in self.get_lookup_key(): names.append(nne) @@ -2584,6 +2885,7 @@ class Symbol(object): def _find_named_symbol(self, identifier, templateParams, templateArgs, operator, templateShorthand, matchSelf): + # type: (Any, Any, Any, Any, Any, bool) -> Symbol assert (identifier is None) != (operator is None) def matches(s): @@ -2624,6 +2926,7 @@ class Symbol(object): return None def _add_symbols(self, nestedName, templateDecls, declaration, docname): + # type: (Any, List[Any], Any, unicode) -> Symbol # This condition should be checked at the parser level. # Each template argument list must have a template parameter list. # But to declare a template there must be an additional template parameter list. @@ -2722,6 +3025,7 @@ class Symbol(object): return symbol def merge_with(self, other, docnames, env): + # type: (Any, List[unicode], BuildEnvironment) -> None assert other is not None for otherChild in other.children: if not otherChild.identifier: @@ -2765,6 +3069,7 @@ class Symbol(object): ourChild.merge_with(otherChild, docnames, env) def add_name(self, nestedName, templatePrefix=None): + # type: (unicode, Any) -> Symbol if templatePrefix: templateDecls = templatePrefix.templates else: @@ -2773,6 +3078,7 @@ class Symbol(object): declaration=None, docname=None) def add_declaration(self, declaration, docname): + # type: (Any, unicode) -> Symbol assert declaration assert docname nestedName = declaration.name @@ -2783,6 +3089,7 @@ class Symbol(object): return self._add_symbols(nestedName, templateDecls, declaration, docname) def find_identifier(self, identifier, matchSelf): + # type: (Any, bool) -> Symbol if matchSelf and self.identifier and self.identifier == identifier: return self for s in self.children: @@ -2791,6 +3098,7 @@ class Symbol(object): return None def direct_lookup(self, key): + # type: (List[Tuple[Any, Any]]) -> Symbol s = self for name, templateParams in key: if name.is_operator(): @@ -2810,6 +3118,7 @@ class Symbol(object): return s def find_name(self, nestedName, templateDecls, templateShorthand, matchSelf): + # type: (Any, Any, Any, bool) -> Symbol # templateShorthand: missing template parameter lists for templates is ok # TODO: unify this with the _add_symbols @@ -2885,7 +3194,8 @@ class Symbol(object): assert False # should have returned in the loop def to_string(self, indent): - res = ['\t'*indent] + # type: (int) -> unicode + res = ['\t'*indent] # type: List[unicode] if not self.parent: res.append('::') else: @@ -2910,6 +3220,7 @@ class Symbol(object): return ''.join(res) def dump(self, indent): + # type: (int) -> unicode res = [self.to_string(indent)] for c in self.children: res.append(c.dump(indent + 1)) @@ -2927,16 +3238,18 @@ class DefinitionParser(object): _prefix_keys = ('class', 'struct', 'enum', 'union', 'typename') def __init__(self, definition, warnEnv, config): + # type: (Any, Any, Config) -> None self.definition = definition.strip() self.pos = 0 self.end = len(self.definition) - self.last_match = None - self._previous_state = (0, None) + self.last_match = None # type: Match + self._previous_state = (0, None) # type: Tuple[int, Match] self.warnEnv = warnEnv self.config = config def _make_multi_error(self, errors, header): + # type: (List[Any], unicode) -> DefinitionError if len(errors) == 1: return DefinitionError(header + '\n' + errors[0][0].description) result = [header, '\n'] @@ -2956,23 +3269,27 @@ class DefinitionParser(object): return DefinitionError(''.join(result)) def status(self, msg): + # type: (unicode) -> unicode # for debugging indicator = '-' * self.pos + '^' print("%s\n%s\n%s" % (msg, self.definition, indicator)) def fail(self, msg): + # type: (unicode) -> None indicator = '-' * self.pos + '^' raise DefinitionError( 'Invalid definition: %s [error at %d]\n %s\n %s' % (msg, self.pos, self.definition, indicator)) def warn(self, msg): + # type: (unicode) -> None if self.warnEnv: self.warnEnv.warn(msg) else: print("Warning: %s" % msg) def match(self, regex): + # type: (Pattern) -> bool match = regex.match(self.definition, self.pos) if match is not None: self._previous_state = (self.pos, self.last_match) @@ -2982,9 +3299,11 @@ class DefinitionParser(object): return False def backout(self): + # type: () -> None self.pos, self.last_match = self._previous_state def skip_string(self, string): + # type: (unicode) -> bool strlen = len(string) if self.definition[self.pos:self.pos + strlen] == string: self.pos += strlen @@ -2992,18 +3311,22 @@ class DefinitionParser(object): return False def skip_word(self, word): + # type: (unicode) -> bool return self.match(re.compile(r'\b%s\b' % re.escape(word))) def skip_ws(self): + # type: (unicode) -> bool return self.match(_whitespace_re) def skip_word_and_ws(self, word): + # type: (unicode) -> bool if self.skip_word(word): self.skip_ws() return True return False def skip_string_and_ws(self, string): + # type: (unicode) -> bool if self.skip_string(string): self.skip_ws() return True @@ -3011,10 +3334,12 @@ class DefinitionParser(object): @property def eof(self): + # type: () -> bool return self.pos >= self.end @property def current_char(self): + # type: () -> unicode try: return self.definition[self.pos] except IndexError: @@ -3022,24 +3347,28 @@ class DefinitionParser(object): @property def matched_text(self): + # type: () -> unicode if self.last_match is not None: return self.last_match.group() def read_rest(self): + # type: () -> unicode rv = self.definition[self.pos:] self.pos = self.end return rv def assert_end(self): + # type: () -> None self.skip_ws() if not self.eof: self.fail('Expected end of definition.') def _parse_balanced_token_seq(self, end): + # type: (List[unicode]) -> unicode # TODO: add handling of string literals and similar - brackets = {'(': ')', '[': ']', '{': '}'} + brackets = {'(': ')', '[': ']', '{': '}'} # type: Dict[unicode, unicode] startPos = self.pos - symbols = [] + symbols = [] # type: List[unicode] while not self.eof: if len(symbols) == 0 and self.current_char in end: break @@ -3056,6 +3385,7 @@ class DefinitionParser(object): return self.definition[startPos:self.pos] def _parse_attribute(self): + # type: () -> Any self.skip_ws() # try C++11 style startPos = self.pos @@ -3115,6 +3445,7 @@ class DefinitionParser(object): return None def _parse_expression(self, end): + # type: (List[unicode]) -> unicode # Stupidly "parse" an expression. # 'end' should be a list of characters which ends the expression. assert end @@ -3124,8 +3455,8 @@ class DefinitionParser(object): value = self.matched_text else: # TODO: add handling of more bracket-like things, and quote handling - brackets = {'(': ')', '[': ']'} - symbols = [] + brackets = {'(': ')', '[': ']'} # type: Dict[unicode, unicode] + symbols = [] # type: List[unicode] while not self.eof: if (len(symbols) == 0 and self.current_char in end): break @@ -3141,6 +3472,7 @@ class DefinitionParser(object): return value.strip() def _parse_operator(self): + # type: () -> Any self.skip_ws() # adapted from the old code # thank god, a regular operator definition @@ -3173,11 +3505,12 @@ class DefinitionParser(object): return ASTOperatorType(type) def _parse_template_argument_list(self): + # type: () -> ASTTemplateArgs self.skip_ws() if not self.skip_string('<'): return None prevErrors = [] - templateArgs = [] + templateArgs = [] # type: List while 1: pos = self.pos parsedComma = False @@ -3216,6 +3549,7 @@ class DefinitionParser(object): return ASTTemplateArgs(templateArgs) def _parse_nested_name(self, memberPointer=False): + # type: (bool) -> ASTNestedName names = [] self.skip_ws() @@ -3240,7 +3574,7 @@ class DefinitionParser(object): self.fail("Expected identifier in nested name, " "got keyword: %s" % identifier) templateArgs = self._parse_template_argument_list() - identifier = ASTIdentifier(identifier) + identifier = ASTIdentifier(identifier) # type: ignore names.append(ASTNestedNameElement(identifier, templateArgs)) self.skip_ws() @@ -3251,6 +3585,7 @@ class DefinitionParser(object): return ASTNestedName(names, rooted) def _parse_trailing_type_spec(self): + # type: () -> Any # fundemental types self.skip_ws() for t in self._simple_fundemental_types: @@ -3296,6 +3631,7 @@ class DefinitionParser(object): return ASTTrailingTypeSpecName(prefix, nestedName) def _parse_parameters_and_qualifiers(self, paramMode): + # type: (unicode) -> ASTParametersQualifiers self.skip_ws() if not self.skip_string('('): if paramMode == 'function': @@ -3385,6 +3721,7 @@ class DefinitionParser(object): initializer) def _parse_decl_specs_simple(self, outer, typed): + # type: (unicode, bool) -> ASTDeclSpecsSimple """Just parse the simple ones.""" storage = None threadLocal = None @@ -3459,6 +3796,7 @@ class DefinitionParser(object): friend, attrs) def _parse_decl_specs(self, outer, typed=True): + # type: (unicode, bool) -> ASTDeclSpecs if outer: if outer not in ('type', 'member', 'function', 'templateParam'): raise Exception('Internal error, unknown outer "%s".' % outer) @@ -3486,6 +3824,7 @@ class DefinitionParser(object): return ASTDeclSpecs(outer, leftSpecs, rightSpecs, trailing) def _parse_declarator_name_param_qual(self, named, paramMode, typed): + # type: (Union[bool, unicode], unicode, bool) -> ASTDecleratorNameParamQual # now we should parse the name, and then suffixes if named == 'maybe': pos = self.pos @@ -3525,6 +3864,7 @@ class DefinitionParser(object): paramQual=paramQual) def _parse_declerator(self, named, paramMode, typed=True): + # type: (Union[bool, unicode], unicode, bool) -> Any # 'typed' here means 'parse return type stuff' if paramMode not in ('type', 'function', 'operatorCast'): raise Exception( @@ -3625,13 +3965,14 @@ class DefinitionParser(object): raise self._make_multi_error(prevErrors, header) def _parse_initializer(self, outer=None): + # type: (unicode) -> ASTInitializer self.skip_ws() # TODO: support paren and brace initialization for memberObject if not self.skip_string('='): return None else: if outer == 'member': - value = self.read_rest().strip() + value = self.read_rest().strip() # type: unicode elif outer == 'templateParam': value = self._parse_expression(end=[',', '>']) elif outer is None: # function parameter @@ -3642,6 +3983,7 @@ class DefinitionParser(object): return ASTInitializer(value) def _parse_type(self, named, outer=None): + # type: (Union[bool, unicode], unicode) -> ASTType """ named=False|'maybe'|True: 'maybe' is e.g., for function objects which doesn't need to name the arguments @@ -3725,6 +4067,7 @@ class DefinitionParser(object): return ASTType(declSpecs, decl) def _parse_type_with_init(self, named, outer): + # type: (Union[bool, unicode], unicode) -> ASTTypeWithInit if outer: assert outer in ('type', 'member', 'function', 'templateParam') type = self._parse_type(outer=outer, named=named) @@ -3732,6 +4075,7 @@ class DefinitionParser(object): return ASTTypeWithInit(type, init) def _parse_type_using(self): + # type: () -> ASTTypeUsing name = self._parse_nested_name() self.skip_ws() if not self.skip_string('='): @@ -3740,6 +4084,7 @@ class DefinitionParser(object): return ASTTypeUsing(name, type) def _parse_concept(self): + # type: () -> ASTConcept nestedName = self._parse_nested_name() isFunction = False @@ -3757,6 +4102,7 @@ class DefinitionParser(object): return ASTConcept(nestedName, isFunction, initializer) def _parse_class(self): + # type: () -> ASTClass name = self._parse_nested_name() self.skip_ws() final = self.skip_word_and_ws('final') @@ -3765,7 +4111,7 @@ class DefinitionParser(object): if self.skip_string(':'): while 1: self.skip_ws() - visibility = 'private' + visibility = 'private' # type: unicode virtual = False pack = False if self.skip_word_and_ws('virtual'): @@ -3787,7 +4133,8 @@ class DefinitionParser(object): return ASTClass(name, final, bases) def _parse_enum(self): - scoped = None # is set by CPPEnumObject + # type: () -> ASTEnum + scoped = None # type: unicode # is set by CPPEnumObject self.skip_ws() name = self._parse_nested_name() self.skip_ws() @@ -3797,6 +4144,7 @@ class DefinitionParser(object): return ASTEnum(name, scoped, underlyingType) def _parse_enumerator(self): + # type: () -> ASTEnumerator name = self._parse_nested_name() self.skip_ws() init = None @@ -3806,9 +4154,10 @@ class DefinitionParser(object): return ASTEnumerator(name, init) def _parse_template_parameter_list(self): + # type: () -> ASTTemplateParams # only: '<' parameter-list '>' # we assume that 'template' has just been parsed - templateParams = [] + templateParams = [] # type: List self.skip_ws() if not self.skip_string("<"): self.fail("Expected '<' after 'template'") @@ -3847,7 +4196,7 @@ class DefinitionParser(object): parameterPack, default) if nestedParams: # template type - param = ASTTemplateParamTemplateType(nestedParams, data) + param = ASTTemplateParamTemplateType(nestedParams, data) # type: Any else: # type param = ASTTemplateParamType(data) @@ -3875,6 +4224,7 @@ class DefinitionParser(object): raise self._make_multi_error(prevErrors, header) def _parse_template_introduction(self): + # type: () -> ASTTemplateIntroduction pos = self.pos try: concept = self._parse_nested_name() @@ -3899,7 +4249,7 @@ class DefinitionParser(object): if identifier in _keywords: self.fail("Expected identifier in template introduction list, " "got keyword: %s" % identifier) - identifier = ASTIdentifier(identifier) + identifier = ASTIdentifier(identifier) # type: ignore params.append(ASTTemplateIntroductionParameter(identifier, parameterPack)) self.skip_ws() @@ -3913,13 +4263,14 @@ class DefinitionParser(object): return ASTTemplateIntroduction(concept, params) def _parse_template_declaration_prefix(self, objectType): - templates = [] + # type: (unicode) -> ASTTemplateDeclarationPrefix + templates = [] # type: List while 1: self.skip_ws() # the saved position is only used to provide a better error message pos = self.pos if self.skip_word("template"): - params = self._parse_template_parameter_list() + params = self._parse_template_parameter_list() # type: Any else: params = self._parse_template_introduction() if not params: @@ -3937,6 +4288,7 @@ class DefinitionParser(object): def _check_template_consistency(self, nestedName, templatePrefix, fullSpecShorthand): + # type: (Any, Any, bool) -> ASTTemplateDeclarationPrefix numArgs = nestedName.num_templates() if not templatePrefix: numParams = 0 @@ -3952,7 +4304,7 @@ class DefinitionParser(object): msg = "Too many template argument lists compared to parameter" \ " lists. Argument lists: %d, Parameter lists: %d," \ " Extra empty parameters lists prepended: %d." \ - % (numArgs, numParams, numExtra) + % (numArgs, numParams, numExtra) # type: unicode msg += " Declaration:\n\t" if templatePrefix: msg += "%s\n\t" % text_type(templatePrefix) @@ -3968,12 +4320,13 @@ class DefinitionParser(object): return templatePrefix def parse_declaration(self, objectType): + # type: (unicode) -> ASTDeclaration if objectType not in ('type', 'concept', 'member', 'function', 'class', 'enum', 'enumerator'): raise Exception('Internal error, unknown objectType "%s".' % objectType) visibility = None templatePrefix = None - declaration = None + declaration = None # type: Any self.skip_ws() if self.match(_visibility_re): @@ -4021,6 +4374,7 @@ class DefinitionParser(object): templatePrefix, declaration) def parse_namespace_object(self): + # type: () -> ASTNamespace templatePrefix = self._parse_template_declaration_prefix(objectType="namespace") name = self._parse_nested_name() templatePrefix = self._check_template_consistency(name, templatePrefix, @@ -4030,6 +4384,7 @@ class DefinitionParser(object): return res def parse_xref_object(self): + # type: () -> ASTNamespace templatePrefix = self._parse_template_declaration_prefix(objectType="xref") name = self._parse_nested_name() templatePrefix = self._check_template_consistency(name, templatePrefix, @@ -4040,6 +4395,7 @@ class DefinitionParser(object): def _make_phony_error_name(): + # type: () -> ASTNestedName nne = ASTNestedNameElement(ASTIdentifier("PhonyNameDueToError"), None) return ASTNestedName([nne], rooted=False) @@ -4062,9 +4418,11 @@ class CPPObject(ObjectDescription): ] def warn(self, msg): + # type: (unicode) -> None self.state_machine.reporter.warning(msg, line=self.lineno) def _add_enumerator_to_parent(self, ast): + # type: (Any) -> None assert ast.objectType == 'enumerator' # find the parent, if it exists && is an enum # && it's unscoped, @@ -4106,6 +4464,7 @@ class CPPObject(ObjectDescription): docname=self.env.docname) def add_target_and_index(self, ast, sig, signode): + # type: (Any, unicode, addnodes.desc_signature) -> None # general note: name must be lstrip(':')'ed, to remove "::" try: id_v1 = ast.get_id_v1() @@ -4152,12 +4511,15 @@ class CPPObject(ObjectDescription): self.state.document.note_explicit_target(signode) def parse_definition(self, parser): + # type: (Any) -> Any raise NotImplementedError() def describe_signature(self, signode, ast, parentScope): + # type: (addnodes.desc_signature, Any, Any) -> None raise NotImplementedError() def handle_signature(self, sig, signode): + # type: (unicode, addnodes.desc_signature) -> Any if 'cpp:parent_symbol' not in self.env.ref_context: root = self.env.domaindata['cpp']['root_symbol'] self.env.ref_context['cpp:parent_symbol'] = root @@ -4191,75 +4553,94 @@ class CPPObject(ObjectDescription): return ast def before_content(self): + # type: () -> None lastSymbol = self.env.ref_context['cpp:last_symbol'] assert lastSymbol self.oldParentSymbol = self.env.ref_context['cpp:parent_symbol'] self.env.ref_context['cpp:parent_symbol'] = lastSymbol def after_content(self): + # type: () -> None self.env.ref_context['cpp:parent_symbol'] = self.oldParentSymbol class CPPTypeObject(CPPObject): def get_index_text(self, name): + # type: (unicode) -> unicode return _('%s (C++ type)') % name def parse_definition(self, parser): + # type: (Any) -> Any return parser.parse_declaration("type") - def describe_signature(self, signode, ast): + def describe_signature(self, signode, ast): # type: ignore + # type: (addnodes.desc_signature, Any) -> None ast.describe_signature(signode, 'lastIsName', self.env) class CPPConceptObject(CPPObject): def get_index_text(self, name): + # type: (unicode) -> unicode return _('%s (C++ concept)') % name def parse_definition(self, parser): + # type: (Any) -> Any return parser.parse_declaration("concept") - def describe_signature(self, signode, ast): + def describe_signature(self, signode, ast): # type: ignore + # type: (addnodes.desc_signature, Any) -> None ast.describe_signature(signode, 'lastIsName', self.env) class CPPMemberObject(CPPObject): def get_index_text(self, name): + # type: (unicode) -> unicode return _('%s (C++ member)') % name def parse_definition(self, parser): + # type: (Any) -> Any return parser.parse_declaration("member") - def describe_signature(self, signode, ast): + def describe_signature(self, signode, ast): # type: ignore + # type: (addnodes.desc_signature, Any) -> None ast.describe_signature(signode, 'lastIsName', self.env) class CPPFunctionObject(CPPObject): def get_index_text(self, name): + # type: (unicode) -> unicode return _('%s (C++ function)') % name def parse_definition(self, parser): + # type: (Any) -> Any return parser.parse_declaration("function") - def describe_signature(self, signode, ast): + def describe_signature(self, signode, ast): # type: ignore + # type: (addnodes.desc_signature, Any) -> None ast.describe_signature(signode, 'lastIsName', self.env) class CPPClassObject(CPPObject): def get_index_text(self, name): + # type: (unicode) -> unicode return _('%s (C++ class)') % name def parse_definition(self, parser): + # type: (Any) -> Any return parser.parse_declaration("class") - def describe_signature(self, signode, ast): + def describe_signature(self, signode, ast): # type: ignore + # type: (addnodes.desc_signature, Any) -> None ast.describe_signature(signode, 'lastIsName', self.env) class CPPEnumObject(CPPObject): def get_index_text(self, name): + # type: (unicode) -> unicode return _('%s (C++ enum)') % name def parse_definition(self, parser): + # type: (Any) -> Any ast = parser.parse_declaration("enum") # self.objtype is set by ObjectDescription in run() if self.objtype == "enum": @@ -4272,18 +4653,22 @@ class CPPEnumObject(CPPObject): assert False return ast - def describe_signature(self, signode, ast): + def describe_signature(self, signode, ast): # type: ignore + # type: (addnodes.desc_signature, Any) -> None ast.describe_signature(signode, 'lastIsName', self.env) class CPPEnumeratorObject(CPPObject): def get_index_text(self, name): + # type: (unicode) -> unicode return _('%s (C++ enumerator)') % name def parse_definition(self, parser): + # type: (Any) -> Any return parser.parse_declaration("enumerator") - def describe_signature(self, signode, ast): + def describe_signature(self, signode, ast): # type: ignore + # type: (addnodes.desc_signature, Any) -> None ast.describe_signature(signode, 'lastIsName', self.env) @@ -4297,17 +4682,19 @@ class CPPNamespaceObject(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True - option_spec = {} + option_spec = {} # type: Dict def warn(self, msg): + # type: (unicode) -> None self.state_machine.reporter.warning(msg, line=self.lineno) def run(self): + # type: () -> List[nodes.Node] env = self.state.document.settings.env rootSymbol = env.domaindata['cpp']['root_symbol'] if self.arguments[0].strip() in ('NULL', '0', 'nullptr'): symbol = rootSymbol - stack = [] + stack = [] # type: List[Symbol] else: parser = DefinitionParser(self.arguments[0], self, env.config) try: @@ -4329,12 +4716,14 @@ class CPPNamespacePushObject(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True - option_spec = {} + option_spec = {} # type: Dict def warn(self, msg): + # type: (unicode) -> None self.state_machine.reporter.warning(msg, line=self.lineno) def run(self): + # type: () -> List[nodes.Node] env = self.state.document.settings.env if self.arguments[0].strip() in ('NULL', '0', 'nullptr'): return @@ -4362,12 +4751,14 @@ class CPPNamespacePopObject(Directive): required_arguments = 0 optional_arguments = 0 final_argument_whitespace = True - option_spec = {} + option_spec = {} # type: Dict def warn(self, msg): + # type: (unicode) -> None self.state_machine.reporter.warning(msg, line=self.lineno) def run(self): + # type: () -> List[nodes.Node] env = self.state.document.settings.env stack = env.temp_data.get('cpp:namespace_stack', None) if not stack or len(stack) == 0: @@ -4386,6 +4777,7 @@ class CPPNamespacePopObject(Directive): class CPPXRefRole(XRefRole): def process_link(self, env, refnode, has_explicit_title, title, target): + # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA parent = env.ref_context.get('cpp:parent_symbol', None) if parent: refnode['cpp:parent_key'] = parent.get_lookup_key() @@ -4455,6 +4847,7 @@ class CPPDomain(Domain): } def clear_doc(self, docname): + # type: (unicode) -> None rootSymbol = self.data['root_symbol'] rootSymbol.clear_doc(docname) for name, nDocname in list(self.data['names'].items()): @@ -4462,12 +4855,14 @@ class CPPDomain(Domain): del self.data['names'][name] def process_doc(self, env, docname, document): + # type: (BuildEnvironment, unicode, nodes.Node) -> None # just for debugging # print(docname) # print(self.data['root_symbol'].dump(0)) pass def merge_domaindata(self, docnames, otherdata): + # type: (List[unicode], Dict) -> None self.data['root_symbol'].merge_with(otherdata['root_symbol'], docnames, self.env) ourNames = self.data['names'] @@ -4483,6 +4878,7 @@ class CPPDomain(Domain): def _resolve_xref_inner(self, env, fromdocname, builder, typ, target, node, contnode, emitWarnings=True): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node, bool) -> nodes.Node # NOQA class Warner(object): def warn(self, msg): if emitWarnings: @@ -4562,11 +4958,13 @@ class CPPDomain(Domain): def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA return self._resolve_xref_inner(env, fromdocname, builder, typ, target, node, contnode)[0] def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA node, objtype = self._resolve_xref_inner(env, fromdocname, builder, 'any', target, node, contnode, emitWarnings=False) @@ -4575,6 +4973,7 @@ class CPPDomain(Domain): return [] def get_objects(self): + # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]] rootSymbol = self.data['root_symbol'] for symbol in rootSymbol.get_all_symbols(): if symbol.declaration is None: @@ -4588,6 +4987,7 @@ class CPPDomain(Domain): def setup(app): + # type: (Sphinx) -> None app.add_domain(CPPDomain) app.add_config_value("cpp_index_common_prefix", [], 'env') app.add_config_value("cpp_id_attributes", [], 'env') diff --git a/sphinx/domains/javascript.py b/sphinx/domains/javascript.py index ade6e4224..5c2eead01 100644 --- a/sphinx/domains/javascript.py +++ b/sphinx/domains/javascript.py @@ -18,6 +18,14 @@ from sphinx.domains.python import _pseudo_parse_arglist from sphinx.util.nodes import make_refnode from sphinx.util.docfields import Field, GroupedField, TypedField +if False: + # For type annotation + from typing import Iterator, Tuple # NOQA + from docutils import nodes # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.environment import BuildEnvironment # NOQA + class JSObject(ObjectDescription): """ @@ -28,9 +36,10 @@ class JSObject(ObjectDescription): has_arguments = False #: what is displayed right before the documentation entry - display_prefix = None + display_prefix = None # type: unicode def handle_signature(self, sig, signode): + # type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode] sig = sig.strip() if '(' in sig and sig[-1:] == ')': prefix, arglist = sig.split('(', 1) @@ -76,6 +85,7 @@ class JSObject(ObjectDescription): return fullname, nameprefix def add_target_and_index(self, name_obj, sig, signode): + # type: (Tuple[unicode, unicode], unicode, addnodes.desc_signature) -> None objectname = self.options.get( 'object', self.env.ref_context.get('js:object')) fullname = name_obj[0] @@ -100,6 +110,7 @@ class JSObject(ObjectDescription): '', None)) def get_index_text(self, objectname, name_obj): + # type: (unicode, Tuple[unicode, unicode]) -> unicode name, obj = name_obj if self.objtype == 'function': if not obj: @@ -139,6 +150,7 @@ class JSConstructor(JSCallable): class JSXRefRole(XRefRole): def process_link(self, env, refnode, has_explicit_title, title, target): + # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA # basically what sphinx.domains.python.PyXRefRole does refnode['js:object'] = env.ref_context.get('js:object') if not has_explicit_title: @@ -180,20 +192,23 @@ class JavaScriptDomain(Domain): } initial_data = { 'objects': {}, # fullname -> docname, objtype - } + } # type: Dict[unicode, Dict[unicode, Tuple[unicode, unicode]]] def clear_doc(self, docname): + # type: (unicode) -> None for fullname, (fn, _l) in list(self.data['objects'].items()): if fn == docname: del self.data['objects'][fullname] def merge_domaindata(self, docnames, otherdata): + # type: (List[unicode], Dict) -> None # XXX check duplicates for fullname, (fn, objtype) in otherdata['objects'].items(): if fn in docnames: self.data['objects'][fullname] = (fn, objtype) def find_obj(self, env, obj, name, typ, searchorder=0): + # type: (BuildEnvironment, unicode, unicode, unicode, int) -> Tuple[unicode, Tuple[unicode, unicode]] # NOQA if name[-2:] == '()': name = name[:-2] objects = self.data['objects'] @@ -212,6 +227,7 @@ class JavaScriptDomain(Domain): def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA objectname = node.get('js:object') searchorder = node.hasattr('refspecific') and 1 or 0 name, obj = self.find_obj(env, objectname, target, typ, searchorder) @@ -222,6 +238,7 @@ class JavaScriptDomain(Domain): def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA objectname = node.get('js:object') name, obj = self.find_obj(env, objectname, target, None, 1) if not obj: @@ -231,10 +248,12 @@ class JavaScriptDomain(Domain): name.replace('$', '_S_'), contnode, name))] def get_objects(self): + # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]] for refname, (docname, type) in list(self.data['objects'].items()): yield refname, refname, type, docname, \ refname.replace('$', '_S_'), 1 def setup(app): + # type: (Sphinx) -> None app.add_domain(JavaScriptDomain) diff --git a/sphinx/domains/python.py b/sphinx/domains/python.py index d37e55fa3..4f0d0f1ae 100644 --- a/sphinx/domains/python.py +++ b/sphinx/domains/python.py @@ -12,6 +12,7 @@ import re from six import iteritems + from docutils import nodes from docutils.parsers.rst import directives @@ -24,6 +25,13 @@ from sphinx.util.nodes import make_refnode from sphinx.util.compat import Directive from sphinx.util.docfields import Field, GroupedField, TypedField +if False: + # For type annotation + from typing import Any, Iterator, Tuple, Union # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.environment import BuildEnvironment # NOQA + # REs for Python signatures py_sig_re = re.compile( @@ -36,6 +44,7 @@ py_sig_re = re.compile( def _pseudo_parse_arglist(signode, arglist): + # type: (addnodes.desc_signature, unicode) -> None """"Parse" a list of arguments separated by commas. Arguments can have "optional" annotations given by enclosing them in @@ -87,7 +96,8 @@ def _pseudo_parse_arglist(signode, arglist): class PyXrefMixin(object): def make_xref(self, rolename, domain, target, innernode=nodes.emphasis, contnode=None): - result = super(PyXrefMixin, self).make_xref(rolename, domain, target, + # type: (unicode, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node + result = super(PyXrefMixin, self).make_xref(rolename, domain, target, # type: ignore innernode, contnode) result['refspecific'] = True if target.startswith(('.', '~')): @@ -103,6 +113,7 @@ class PyXrefMixin(object): def make_xrefs(self, rolename, domain, target, innernode=nodes.emphasis, contnode=None): + # type: (unicode, unicode, unicode, nodes.Node, nodes.Node) -> List[nodes.Node] delims = '(\s*[\[\]\(\),](?:\s*or\s)?\s*|\s+or\s+)' delims_re = re.compile(delims) sub_targets = re.split(delims, target) @@ -114,7 +125,7 @@ class PyXrefMixin(object): if split_contnode: contnode = nodes.Text(sub_target) - if delims_re.match(sub_target): + if delims_re.match(sub_target): # type: ignore results.append(contnode or innernode(sub_target, sub_target)) else: results.append(self.make_xref(rolename, domain, sub_target, @@ -165,18 +176,21 @@ class PyObject(ObjectDescription): ] def get_signature_prefix(self, sig): + # type: (unicode) -> unicode """May return a prefix to put before the object name in the signature. """ return '' def needs_arglist(self): + # type: () -> bool """May return true if an empty argument list is to be generated even if the document contains none. """ return False - def handle_signature(self, sig, signode): + def handle_signature(self, sig, signode): # type: ignore + # type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode] """Transform a Python signature into RST nodes. Return (fully qualified name of the thing, classname if any). @@ -185,7 +199,7 @@ class PyObject(ObjectDescription): * it is stripped from the displayed name if present * it is added to the full name (return value) if not present """ - m = py_sig_re.match(sig) + m = py_sig_re.match(sig) # type: ignore if m is None: raise ValueError name_prefix, name, arglist, retann = m.groups() @@ -256,10 +270,12 @@ class PyObject(ObjectDescription): return fullname, name_prefix def get_index_text(self, modname, name): + # type: (unicode, unicode) -> unicode """Return the text for the index entry of the object.""" raise NotImplementedError('must be implemented in subclasses') def add_target_and_index(self, name_cls, sig, signode): + # type: (unicode, unicode, addnodes.desc_signature) -> None modname = self.options.get( 'module', self.env.ref_context.get('py:module')) fullname = (modname and modname + '.' or '') + name_cls[0] @@ -285,10 +301,12 @@ class PyObject(ObjectDescription): fullname, '', None)) def before_content(self): + # type: () -> None # needed for automatic qualification of members (reset in subclasses) self.clsname_set = False def after_content(self): + # type: () -> None if self.clsname_set: self.env.ref_context.pop('py:class', None) @@ -299,9 +317,11 @@ class PyModulelevel(PyObject): """ def needs_arglist(self): + # type: () -> bool return self.objtype == 'function' def get_index_text(self, modname, name_cls): + # type: (unicode, unicode) -> unicode if self.objtype == 'function': if not modname: return _('%s() (built-in function)') % name_cls[0] @@ -320,9 +340,11 @@ class PyClasslike(PyObject): """ def get_signature_prefix(self, sig): + # type: (unicode) -> unicode return self.objtype + ' ' def get_index_text(self, modname, name_cls): + # type: (unicode, unicode) -> unicode if self.objtype == 'class': if not modname: return _('%s (built-in class)') % name_cls[0] @@ -333,6 +355,7 @@ class PyClasslike(PyObject): return '' def before_content(self): + # type: () -> None PyObject.before_content(self) if self.names: self.env.ref_context['py:class'] = self.names[0][0] @@ -345,9 +368,11 @@ class PyClassmember(PyObject): """ def needs_arglist(self): + # type: () -> bool return self.objtype.endswith('method') def get_signature_prefix(self, sig): + # type: (unicode) -> unicode if self.objtype == 'staticmethod': return 'static ' elif self.objtype == 'classmethod': @@ -355,6 +380,7 @@ class PyClassmember(PyObject): return '' def get_index_text(self, modname, name_cls): + # type: (unicode, unicode) -> unicode name, cls = name_cls add_modules = self.env.config.add_module_names if self.objtype == 'method': @@ -411,6 +437,7 @@ class PyClassmember(PyObject): return '' def before_content(self): + # type: () -> None PyObject.before_content(self) lastname = self.names and self.names[-1][1] if lastname and not self.env.ref_context.get('py:class'): @@ -423,11 +450,13 @@ class PyDecoratorMixin(object): Mixin for decorator directives. """ def handle_signature(self, sig, signode): - ret = super(PyDecoratorMixin, self).handle_signature(sig, signode) + # type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode] + ret = super(PyDecoratorMixin, self).handle_signature(sig, signode) # type: ignore signode.insert(0, addnodes.desc_addname('@', '@')) return ret def needs_arglist(self): + # type: () -> bool return False @@ -436,6 +465,7 @@ class PyDecoratorFunction(PyDecoratorMixin, PyModulelevel): Directive to mark functions meant to be used as decorators. """ def run(self): + # type: () -> List[nodes.Node] # a decorator function is a function after all self.name = 'py:function' return PyModulelevel.run(self) @@ -446,6 +476,7 @@ class PyDecoratorMethod(PyDecoratorMixin, PyClassmember): Directive to mark methods meant to be used as decorators. """ def run(self): + # type: () -> List[nodes.Node] self.name = 'py:method' return PyClassmember.run(self) @@ -467,6 +498,7 @@ class PyModule(Directive): } def run(self): + # type: () -> List[nodes.Node] env = self.state.document.settings.env modname = self.arguments[0].strip() noindex = 'noindex' in self.options @@ -502,9 +534,10 @@ class PyCurrentModule(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = False - option_spec = {} + option_spec = {} # type: Dict def run(self): + # type: () -> List[nodes.Node] env = self.state.document.settings.env modname = self.arguments[0].strip() if modname == 'None': @@ -516,6 +549,7 @@ class PyCurrentModule(Directive): class PyXRefRole(XRefRole): def process_link(self, env, refnode, has_explicit_title, title, target): + # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA refnode['py:module'] = env.ref_context.get('py:module') refnode['py:class'] = env.ref_context.get('py:class') if not has_explicit_title: @@ -546,9 +580,11 @@ class PythonModuleIndex(Index): shortname = l_('modules') def generate(self, docnames=None): - content = {} + # type: (List[unicode]) -> Tuple[List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool] # NOQA + content = {} # type: Dict[unicode, List] # list of prefixes to ignore - ignores = self.domain.env.config['modindex_common_prefix'] + ignores = None # type: List[unicode] + ignores = self.domain.env.config['modindex_common_prefix'] # type: ignore ignores = sorted(ignores, key=len, reverse=True) # list of all modules, sorted by module name modules = sorted(iteritems(self.domain.data['modules']), @@ -601,9 +637,9 @@ class PythonModuleIndex(Index): collapse = len(modules) - num_toplevels < num_toplevels # sort by first letter - content = sorted(iteritems(content)) + sorted_content = sorted(iteritems(content)) - return content, collapse + return sorted_content, collapse class PythonDomain(Domain): @@ -620,7 +656,7 @@ class PythonDomain(Domain): 'staticmethod': ObjType(l_('static method'), 'meth', 'obj'), 'attribute': ObjType(l_('attribute'), 'attr', 'obj'), 'module': ObjType(l_('module'), 'mod', 'obj'), - } + } # type: Dict[unicode, ObjType] directives = { 'function': PyModulelevel, @@ -650,12 +686,13 @@ class PythonDomain(Domain): initial_data = { 'objects': {}, # fullname -> docname, objtype 'modules': {}, # modname -> docname, synopsis, platform, deprecated - } + } # type: Dict[unicode, Dict[unicode, Tuple[Any]]] indices = [ PythonModuleIndex, ] def clear_doc(self, docname): + # type: (unicode) -> None for fullname, (fn, _l) in list(self.data['objects'].items()): if fn == docname: del self.data['objects'][fullname] @@ -664,6 +701,7 @@ class PythonDomain(Domain): del self.data['modules'][modname] def merge_domaindata(self, docnames, otherdata): + # type: (List[unicode], Dict) -> None # XXX check duplicates? for fullname, (fn, objtype) in otherdata['objects'].items(): if fn in docnames: @@ -673,6 +711,7 @@ class PythonDomain(Domain): self.data['modules'][modname] = data def find_obj(self, env, modname, classname, name, type, searchmode=0): + # type: (BuildEnvironment, unicode, unicode, unicode, unicode, int) -> List[Tuple[unicode, Any]] # NOQA """Find a Python object for "name", perhaps using the given module and/or classname. Returns a list of (name, object entry) tuples. """ @@ -684,7 +723,7 @@ class PythonDomain(Domain): return [] objects = self.data['objects'] - matches = [] + matches = [] # type: List[Tuple[unicode, Any]] newname = None if searchmode == 1: @@ -737,6 +776,7 @@ class PythonDomain(Domain): def resolve_xref(self, env, fromdocname, builder, type, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA modname = node.get('py:module') clsname = node.get('py:class') searchmode = node.hasattr('refspecific') and 1 or 0 @@ -760,9 +800,10 @@ class PythonDomain(Domain): def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA modname = node.get('py:module') clsname = node.get('py:class') - results = [] + results = [] # type: List[Tuple[unicode, nodes.Node]] # always search in "refspecific" mode with the :any: role matches = self.find_obj(env, modname, clsname, target, None, 1) @@ -778,6 +819,7 @@ class PythonDomain(Domain): return results def _make_module_refnode(self, builder, fromdocname, name, contnode): + # type: (Builder, unicode, unicode, nodes.Node) -> nodes.Node # get additional info for modules docname, synopsis, platform, deprecated = self.data['modules'][name] title = name @@ -791,6 +833,7 @@ class PythonDomain(Domain): 'module-' + name, contnode, title) def get_objects(self): + # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]] for modname, info in iteritems(self.data['modules']): yield (modname, modname, 'module', info[0], 'module-' + modname, 0) for refname, (docname, type) in iteritems(self.data['objects']): @@ -799,4 +842,5 @@ class PythonDomain(Domain): def setup(app): + # type: (Sphinx) -> None app.add_domain(PythonDomain) diff --git a/sphinx/domains/rst.py b/sphinx/domains/rst.py index 526ae18a7..fa3353aa6 100644 --- a/sphinx/domains/rst.py +++ b/sphinx/domains/rst.py @@ -20,6 +20,14 @@ from sphinx.directives import ObjectDescription from sphinx.roles import XRefRole from sphinx.util.nodes import make_refnode +if False: + # For type annotation + from typing import Iterator, Tuple # NOQA + from docutils import nodes # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.environment import BuildEnvironment # NOQA + dir_sig_re = re.compile(r'\.\. (.+?)::(.*)$') @@ -30,6 +38,7 @@ class ReSTMarkup(ObjectDescription): """ def add_target_and_index(self, name, sig, signode): + # type: (unicode, unicode, addnodes.desc_signature) -> None targetname = self.objtype + '-' + name if targetname not in self.state.document.ids: signode['names'].append(targetname) @@ -51,6 +60,7 @@ class ReSTMarkup(ObjectDescription): targetname, '', None)) def get_index_text(self, objectname, name): + # type: (unicode, unicode) -> unicode if self.objtype == 'directive': return _('%s (directive)') % name elif self.objtype == 'role': @@ -59,6 +69,7 @@ class ReSTMarkup(ObjectDescription): def parse_directive(d): + # type: (unicode) -> Tuple[unicode, unicode] """Parse a directive signature. Returns (directive, arguments) string tuple. If no arguments are given, @@ -68,7 +79,7 @@ def parse_directive(d): if not dir.startswith('.'): # Assume it is a directive without syntax return (dir, '') - m = dir_sig_re.match(dir) + m = dir_sig_re.match(dir) # type: ignore if not m: return (dir, '') parsed_dir, parsed_args = m.groups() @@ -80,6 +91,7 @@ class ReSTDirective(ReSTMarkup): Description of a reST directive. """ def handle_signature(self, sig, signode): + # type: (unicode, addnodes.desc_signature) -> unicode name, args = parse_directive(sig) desc_name = '.. %s::' % name signode += addnodes.desc_name(desc_name, desc_name) @@ -93,6 +105,7 @@ class ReSTRole(ReSTMarkup): Description of a reST role. """ def handle_signature(self, sig, signode): + # type: (unicode, addnodes.desc_signature) -> unicode signode += addnodes.desc_name(':%s:' % sig, ':%s:' % sig) return sig @@ -116,14 +129,16 @@ class ReSTDomain(Domain): } initial_data = { 'objects': {}, # fullname -> docname, objtype - } + } # type: Dict[unicode, Dict[unicode, Tuple[unicode, ObjType]]] def clear_doc(self, docname): + # type: (unicode) -> None for (typ, name), doc in list(self.data['objects'].items()): if doc == docname: del self.data['objects'][typ, name] def merge_domaindata(self, docnames, otherdata): + # type: (List[unicode], Dict) -> None # XXX check duplicates for (typ, name), doc in otherdata['objects'].items(): if doc in docnames: @@ -131,6 +146,7 @@ class ReSTDomain(Domain): def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA objects = self.data['objects'] objtypes = self.objtypes_for_role(typ) for objtype in objtypes: @@ -142,6 +158,7 @@ class ReSTDomain(Domain): def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[nodes.Node] # NOQA objects = self.data['objects'] results = [] for objtype in self.object_types: @@ -154,9 +171,11 @@ class ReSTDomain(Domain): return results def get_objects(self): + # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]] for (typ, name), docname in iteritems(self.data['objects']): yield name, name, typ, docname, typ + '-' + name, 1 def setup(app): + # type: (Sphinx) -> None app.add_domain(ReSTDomain) diff --git a/sphinx/domains/std.py b/sphinx/domains/std.py index b7f2597d4..6044b5d59 100644 --- a/sphinx/domains/std.py +++ b/sphinx/domains/std.py @@ -12,7 +12,8 @@ import re import unicodedata -from six import iteritems +from six import PY3, iteritems + from docutils import nodes from docutils.parsers.rst import directives from docutils.statemachine import ViewList @@ -26,6 +27,21 @@ from sphinx.util import ws_re from sphinx.util.nodes import clean_astext, make_refnode from sphinx.util.compat import Directive +if False: + # For type annotation + from typing import Any, Callable, Dict, Iterator, List, Tuple, Type, Union # NOQA + from docutils.parsers.rst.states import Inliner # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.environment import BuildEnvironment # NOQA + from sphinx.util.typing import Role # NOQA + + if PY3: + unicode = str + + RoleFunction = Callable[[unicode, unicode, unicode, int, Inliner, Dict, List[unicode]], + Tuple[List[nodes.Node], List[nodes.Node]]] + # RE for option descriptions option_desc_re = re.compile(r'((?:/|--|-|\+)?[-\.?@#_a-zA-Z0-9]+)(=?\s*.*)') @@ -38,9 +54,10 @@ class GenericObject(ObjectDescription): A generic x-ref directive registered with Sphinx.add_object_type(). """ indextemplate = '' - parse_node = None + parse_node = None # type: Callable[[GenericObject, BuildEnvironment, unicode, addnodes.desc_signature], unicode] # NOQA def handle_signature(self, sig, signode): + # type: (unicode, addnodes.desc_signature) -> unicode if self.parse_node: name = self.parse_node(self.env, sig, signode) else: @@ -51,6 +68,7 @@ class GenericObject(ObjectDescription): return name def add_target_and_index(self, name, sig, signode): + # type: (unicode, unicode, addnodes.desc_signature) -> None targetname = '%s-%s' % (self.objtype, name) signode['ids'].append(targetname) self.state.document.note_explicit_target(signode) @@ -78,6 +96,7 @@ class EnvVarXRefRole(XRefRole): """ def result_nodes(self, document, env, node, is_ref): + # type: (nodes.Node, BuildEnvironment, nodes.Node, bool) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA if not is_ref: return [node], [] varname = node['reftarget'] @@ -102,9 +121,10 @@ class Target(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True - option_spec = {} + option_spec = {} # type: Dict def run(self): + # type: () -> List[nodes.Node] env = self.state.document.settings.env # normalize whitespace in fullname like XRefRole does fullname = ws_re.sub(' ', self.arguments[0].strip()) @@ -136,12 +156,13 @@ class Cmdoption(ObjectDescription): """ def handle_signature(self, sig, signode): + # type: (unicode, addnodes.desc_signature) -> unicode """Transform an option description into RST nodes.""" count = 0 firstname = '' for potential_option in sig.split(', '): potential_option = potential_option.strip() - m = option_desc_re.match(potential_option) + m = option_desc_re.match(potential_option) # type: ignore if not m: self.env.warn( self.env.docname, @@ -166,6 +187,7 @@ class Cmdoption(ObjectDescription): return firstname def add_target_and_index(self, firstname, sig, signode): + # type: (unicode, unicode, addnodes.desc_signature) -> None currprogram = self.env.ref_context.get('std:program') for optname in signode.get('allnames', []): targetname = optname.replace('/', '-') @@ -197,9 +219,10 @@ class Program(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True - option_spec = {} + option_spec = {} # type: Dict def run(self): + # type: () -> List[nodes.Node] env = self.state.document.settings.env program = ws_re.sub('-', self.arguments[0].strip()) if program == 'None': @@ -211,17 +234,20 @@ class Program(Directive): class OptionXRefRole(XRefRole): def process_link(self, env, refnode, has_explicit_title, title, target): + # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA refnode['std:program'] = env.ref_context.get('std:program') return title, target def split_term_classifiers(line): + # type: (unicode) -> List[Union[unicode, None]] # split line into a term and classifiers. if no classifier, None is used.. parts = re.split(' +: +', line) + [None] return parts def make_glossary_term(env, textnodes, index_key, source, lineno, new_id=None): + # type: (BuildEnvironment, List[nodes.Node], unicode, unicode, int, unicode) -> nodes.term # get a text-only representation of the term and register it # as a cross-reference target term = nodes.term('', '', *textnodes) @@ -265,6 +291,7 @@ class Glossary(Directive): } def run(self): + # type: () -> List[nodes.Node] env = self.state.document.settings.env node = addnodes.glossary() node.document = self.state.document @@ -275,7 +302,7 @@ class Glossary(Directive): # be* a definition list. # first, collect single entries - entries = [] + entries = [] # type: List[Tuple[List[Tuple[unicode, unicode, int]], ViewList]] in_definition = True was_empty = True messages = [] @@ -329,7 +356,7 @@ class Glossary(Directive): for terms, definition in entries: termtexts = [] termnodes = [] - system_messages = [] + system_messages = [] # type: List[unicode] for line, source, lineno in terms: parts = split_term_classifiers(line) # parse the term with inline markup @@ -365,9 +392,10 @@ class Glossary(Directive): def token_xrefs(text): + # type: (unicode) -> List[nodes.Node] retnodes = [] pos = 0 - for m in token_re.finditer(text): + for m in token_re.finditer(text): # type: ignore if m.start() > pos: txt = text[pos:m.start()] retnodes.append(nodes.Text(txt, txt)) @@ -390,13 +418,14 @@ class ProductionList(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True - option_spec = {} + option_spec = {} # type: Dict def run(self): + # type: () -> List[nodes.Node] env = self.state.document.settings.env objects = env.domaindata['std']['objects'] node = addnodes.productionlist() - messages = [] + messages = [] # type: List[nodes.Node] i = 0 for rule in self.arguments[0].split('\n'): @@ -437,7 +466,7 @@ class StandardDomain(Domain): searchprio=-1), 'envvar': ObjType(l_('environment variable'), 'envvar'), 'cmdoption': ObjType(l_('program option'), 'option'), - } + } # type: Dict[unicode, ObjType] directives = { 'program': Program, @@ -446,7 +475,7 @@ class StandardDomain(Domain): 'envvar': EnvVar, 'glossary': Glossary, 'productionlist': ProductionList, - } + } # type: Dict[unicode, Type[Directive]] roles = { 'option': OptionXRefRole(warn_dangling=True), 'envvar': EnvVarXRefRole(), @@ -463,7 +492,7 @@ class StandardDomain(Domain): warn_dangling=True), # links to labels, without a different title 'keyword': XRefRole(warn_dangling=True), - } + } # type: Dict[unicode, Union[RoleFunction, XRefRole]] initial_data = { 'progoptions': {}, # (program, name) -> docname, labelid @@ -495,9 +524,10 @@ class StandardDomain(Domain): nodes.figure: ('figure', None), nodes.table: ('table', None), nodes.container: ('code-block', None), - } + } # type: Dict[nodes.Node, Tuple[unicode, Callable]] def clear_doc(self, docname): + # type: (unicode) -> None for key, (fn, _l) in list(self.data['progoptions'].items()): if fn == docname: del self.data['progoptions'][key] @@ -515,6 +545,7 @@ class StandardDomain(Domain): del self.data['anonlabels'][key] def merge_domaindata(self, docnames, otherdata): + # type: (List[unicode], Dict) -> None # XXX duplicates? for key, data in otherdata['progoptions'].items(): if data[0] in docnames: @@ -533,10 +564,12 @@ class StandardDomain(Domain): self.data['anonlabels'][key] = data def process_doc(self, env, docname, document): + # type: (BuildEnvironment, unicode, nodes.Node) -> None self.note_citations(env, docname, document) self.note_labels(env, docname, document) def note_citations(self, env, docname, document): + # type: (BuildEnvironment, unicode, nodes.Node) -> None for node in document.traverse(nodes.citation): label = node[0].astext() if label in self.data['citations']: @@ -546,6 +579,7 @@ class StandardDomain(Domain): self.data['citations'][label] = (docname, node['ids'][0]) def note_labels(self, env, docname, document): + # type: (BuildEnvironment, unicode, nodes.Node) -> None labels, anonlabels = self.data['labels'], self.data['anonlabels'] for name, explicit in iteritems(document.nametypes): if not explicit: @@ -585,6 +619,7 @@ class StandardDomain(Domain): def build_reference_node(self, fromdocname, builder, docname, labelid, sectname, rolename, **options): + # type: (unicode, Builder, unicode, unicode, unicode, unicode, Any) -> nodes.Node nodeclass = options.pop('nodeclass', nodes.reference) newnode = nodeclass('', '', internal=True, **options) innernode = nodes.inline(sectname, sectname) @@ -608,6 +643,7 @@ class StandardDomain(Domain): return newnode def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA if typ == 'ref': resolver = self._resolve_ref_xref elif typ == 'numref': @@ -624,6 +660,7 @@ class StandardDomain(Domain): return resolver(env, fromdocname, builder, typ, target, node, contnode) def _resolve_ref_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA if node['refexplicit']: # reference to anonymous label; the reference uses # the supplied link caption @@ -641,6 +678,7 @@ class StandardDomain(Domain): docname, labelid, sectname, 'ref') def _resolve_numref_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA if target in self.data['labels']: docname, labelid, figname = self.data['labels'].get(target, ('', '', '')) else: @@ -700,6 +738,7 @@ class StandardDomain(Domain): title=title) def _resolve_keyword_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA # keywords are oddballs: they are referenced by named labels docname, labelid, _ = self.data['labels'].get(target, ('', '', '')) if not docname: @@ -708,13 +747,14 @@ class StandardDomain(Domain): labelid, contnode) def _resolve_option_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA progname = node.get('std:program') target = target.strip() docname, labelid = self.data['progoptions'].get((progname, target), ('', '')) if not docname: commands = [] - while ws_re.search(target): - subcommand, target = ws_re.split(target, 1) + while ws_re.search(target): # type: ignore + subcommand, target = ws_re.split(target, 1) # type: ignore commands.append(subcommand) progname = "-".join(commands) @@ -729,6 +769,7 @@ class StandardDomain(Domain): labelid, contnode) def _resolve_citation_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA from sphinx.environment import NoUri docname, labelid = self.data['citations'].get(target, ('', '')) @@ -751,6 +792,7 @@ class StandardDomain(Domain): raise def _resolve_obj_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA objtypes = self.objtypes_for_role(typ) or [] for objtype in objtypes: if (objtype, target) in self.data['objects']: @@ -764,7 +806,8 @@ class StandardDomain(Domain): labelid, contnode) def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): - results = [] + # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA + results = [] # type: List[Tuple[unicode, nodes.Node]] ltarget = target.lower() # :ref: lowercases its target automatically for role in ('ref', 'option'): # do not try "keyword" res = self.resolve_xref(env, fromdocname, builder, role, @@ -785,6 +828,7 @@ class StandardDomain(Domain): return results def get_objects(self): + # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]] # handle the special 'doc' reference here for doc in self.env.all_docs: yield (doc, clean_astext(self.env.titles[doc]), 'doc', doc, '', -1) @@ -802,13 +846,16 @@ class StandardDomain(Domain): yield (name, name, 'label', info[0], info[1], -1) def get_type_name(self, type, primary=False): + # type: (ObjType, bool) -> unicode # never prepend "Default" return type.lname def is_enumerable_node(self, node): + # type: (nodes.Node) -> bool return node.__class__ in self.enumerable_nodes def get_numfig_title(self, node): + # type: (nodes.Node) -> unicode """Get the title of enumerable nodes to refer them using its title""" if self.is_enumerable_node(node): _, title_getter = self.enumerable_nodes.get(node.__class__, (None, None)) @@ -822,6 +869,7 @@ class StandardDomain(Domain): return None def get_figtype(self, node): + # type: (nodes.Node) -> unicode """Get figure type of nodes.""" def has_child(node, cls): return any(isinstance(child, cls) for child in node) @@ -838,6 +886,7 @@ class StandardDomain(Domain): return figtype def get_fignumber(self, env, builder, figtype, docname, target_node): + # type: (BuildEnvironment, Builder, unicode, unicode, nodes.Node) -> Tuple[int, ...] if figtype == 'section': if builder.name == 'latex': return tuple() @@ -861,4 +910,5 @@ class StandardDomain(Domain): def setup(app): + # type: (Sphinx) -> None app.add_domain(StandardDomain) From 23b1c3d5f203972d1d7a254594f0e7ca4baf7b59 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA Date: Tue, 8 Nov 2016 17:47:52 +0900 Subject: [PATCH 008/190] Add type-check annotations to sphinx.builders --- sphinx/builders/__init__.py | 61 ++++++++++++++---- sphinx/builders/applehelp.py | 12 +++- sphinx/builders/changes.py | 29 ++++++--- sphinx/builders/devhelp.py | 18 +++++- sphinx/builders/epub.py | 79 ++++++++++++++++------- sphinx/builders/gettext.py | 53 ++++++++++++---- sphinx/builders/html.py | 119 +++++++++++++++++++++++++++-------- sphinx/builders/latex.py | 34 +++++++--- sphinx/builders/linkcheck.py | 47 ++++++++++---- sphinx/builders/manpage.py | 18 +++++- sphinx/builders/qthelp.py | 37 +++++++---- sphinx/builders/texinfo.py | 27 ++++++-- sphinx/builders/text.py | 2 + 13 files changed, 410 insertions(+), 126 deletions(-) diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index fe0c9c665..78ce7d89e 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -22,7 +22,7 @@ from docutils import nodes from sphinx.util import i18n, path_stabilize from sphinx.util.osutil import SEP, relative_uri from sphinx.util.i18n import find_catalog -from sphinx.util.console import bold, darkgreen +from sphinx.util.console import bold, darkgreen # type: ignore from sphinx.util.parallel import ParallelTasks, SerialTasks, make_chunks, \ parallel_available @@ -30,6 +30,15 @@ from sphinx.util.parallel import ParallelTasks, SerialTasks, make_chunks, \ from sphinx import roles # noqa from sphinx import directives # noqa +if False: + # For type annotation + from typing import Any, Callable, Iterable, Sequence, Tuple, Union # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.config import Config # NOQA + from sphinx.environment import BuildEnvironment # NOQA + from sphinx.util.i18n import CatalogInfo # NOQA + from sphinx.util.tags import Tags # NOQA + class Builder(object): """ @@ -47,7 +56,8 @@ class Builder(object): allow_parallel = False def __init__(self, app): - self.env = app.env + # type: (Sphinx) -> None + self.env = app.env # type: BuildEnvironment self.env.set_versioning_method(self.versioning_method, self.versioning_compare) self.srcdir = app.srcdir @@ -57,11 +67,11 @@ class Builder(object): if not path.isdir(self.doctreedir): os.makedirs(self.doctreedir) - self.app = app - self.warn = app.warn - self.info = app.info - self.config = app.config - self.tags = app.tags + self.app = app # type: Sphinx + self.warn = app.warn # type: Callable + self.info = app.info # type: Callable + self.config = app.config # type: Config + self.tags = app.tags # type: Tags self.tags.add(self.format) self.tags.add(self.name) self.tags.add("format_%s" % self.format) @@ -71,7 +81,7 @@ class Builder(object): self.old_status_iterator = app.old_status_iterator # images that need to be copied over (source -> dest) - self.images = {} + self.images = {} # type: Dict[unicode, unicode] # basename of images directory self.imagedir = "" # relative path to image directory from current docname (used at writing docs) @@ -79,7 +89,7 @@ class Builder(object): # these get set later self.parallel_ok = False - self.finish_tasks = None + self.finish_tasks = None # type: Any # load default translator class self.translator_class = app._translators.get(self.name) @@ -88,12 +98,14 @@ class Builder(object): # helper methods def init(self): + # type: () -> None """Load necessary templates and perform initialization. The default implementation does nothing. """ pass def create_template_bridge(self): + # type: () -> None """Return the template bridge configured.""" if self.config.template_bridge: self.templates = self.app.import_object( @@ -103,6 +115,7 @@ class Builder(object): self.templates = BuiltinTemplateLoader() def get_target_uri(self, docname, typ=None): + # type: (unicode, unicode) -> unicode """Return the target URI for a document name. *typ* can be used to qualify the link characteristic for individual @@ -111,6 +124,7 @@ class Builder(object): raise NotImplementedError def get_relative_uri(self, from_, to, typ=None): + # type: (unicode, unicode, unicode) -> unicode """Return a relative URI between two source filenames. May raise environment.NoUri if there's no way to return a sensible URI. @@ -119,6 +133,7 @@ class Builder(object): self.get_target_uri(to, typ)) def get_outdated_docs(self): + # type: () -> Union[unicode, Iterable[unicode]] """Return an iterable of output files that are outdated, or a string describing what an update build will build. @@ -128,9 +143,10 @@ class Builder(object): """ raise NotImplementedError - supported_image_types = [] + supported_image_types = [] # type: List[unicode] def post_process_images(self, doctree): + # type: (nodes.Node) -> None """Pick the best candidate for all image URIs.""" for node in doctree.traverse(nodes.image): if '?' in node['candidates']: @@ -157,6 +173,7 @@ class Builder(object): # compile po methods def compile_catalogs(self, catalogs, message): + # type: (Set[CatalogInfo], unicode) -> None if not self.config.gettext_auto_build: return @@ -170,6 +187,7 @@ class Builder(object): catalog.write_mo(self.config.language) def compile_all_catalogs(self): + # type: () -> None catalogs = i18n.find_catalog_source_files( [path.join(self.srcdir, x) for x in self.config.locale_dirs], self.config.language, @@ -180,6 +198,7 @@ class Builder(object): self.compile_catalogs(catalogs, message) def compile_specific_catalogs(self, specified_files): + # type: (List[unicode]) -> None def to_domain(fpath): docname, _ = path.splitext(path_stabilize(fpath)) dom = find_catalog(docname, self.config.gettext_compact) @@ -196,6 +215,7 @@ class Builder(object): self.compile_catalogs(catalogs, message) def compile_update_catalogs(self): + # type: () -> None catalogs = i18n.find_catalog_source_files( [path.join(self.srcdir, x) for x in self.config.locale_dirs], self.config.language, @@ -207,16 +227,19 @@ class Builder(object): # build methods def build_all(self): + # type: () -> None """Build all source files.""" self.build(None, summary='all source files', method='all') def build_specific(self, filenames): + # type: (List[unicode]) -> None """Only rebuild as much as needed for changes in the *filenames*.""" # bring the filenames to the canonical format, that is, # relative to the source directory and without source_suffix. dirlen = len(self.srcdir) + 1 to_write = [] - suffixes = tuple(self.config.source_suffix) + suffixes = None # type: Tuple[unicode] + suffixes = tuple(self.config.source_suffix) # type: ignore for filename in filenames: filename = path.normpath(path.abspath(filename)) if not filename.startswith(self.srcdir): @@ -240,6 +263,7 @@ class Builder(object): 'line' % len(to_write)) def build_update(self): + # type: () -> None """Only rebuild what was changed or added since last build.""" to_build = self.get_outdated_docs() if isinstance(to_build, str): @@ -251,6 +275,7 @@ class Builder(object): 'out of date' % len(to_build)) def build(self, docnames, summary=None, method='update'): + # type: (Iterable[unicode], unicode, unicode) -> None """Main build method. First updates the environment, and then calls :meth:`write`. @@ -328,6 +353,7 @@ class Builder(object): self.finish_tasks.join() def write(self, build_docnames, updated_docnames, method='update'): + # type: (Iterable[unicode], Sequence[unicode], unicode) -> None if build_docnames is None or build_docnames == ['__all__']: # build_all build_docnames = self.env.found_docs @@ -349,7 +375,7 @@ class Builder(object): self.prepare_writing(docnames) self.info('done') - warnings = [] + warnings = [] # type: List[Tuple[Tuple, Dict]] self.env.set_warnfunc(lambda *args, **kwargs: warnings.append((args, kwargs))) if self.parallel_ok: # number of subprocesses is parallel-1 because the main process @@ -361,6 +387,7 @@ class Builder(object): self.env.set_warnfunc(self.warn) def _write_serial(self, docnames, warnings): + # type: (Sequence[unicode], List[Tuple[Tuple, Dict]]) -> None for docname in self.app.status_iterator( docnames, 'writing output... ', darkgreen, len(docnames)): doctree = self.env.get_and_resolve_doctree(docname, self) @@ -370,7 +397,9 @@ class Builder(object): self.warn(*warning, **kwargs) def _write_parallel(self, docnames, warnings, nproc): + # type: (Iterable[unicode], List[Tuple[Tuple, Dict]], int) -> None def write_process(docs): + # type: (List[Tuple[unicode, nodes.Node]]) -> List[Tuple[Tuple, Dict]] local_warnings = [] def warnfunc(*args, **kwargs): @@ -384,7 +413,7 @@ class Builder(object): warnings.extend(wlist) # warm up caches/compile templates using the first document - firstname, docnames = docnames[0], docnames[1:] + firstname, docnames = docnames[0], docnames[1:] # type: ignore doctree = self.env.get_and_resolve_doctree(firstname, self) self.write_doc_serialized(firstname, doctree) self.write_doc(firstname, doctree) @@ -409,20 +438,24 @@ class Builder(object): self.warn(*warning, **kwargs) def prepare_writing(self, docnames): + # type: (Set[unicode]) -> None """A place where you can add logic before :meth:`write_doc` is run""" raise NotImplementedError def write_doc(self, docname, doctree): + # type: (unicode, nodes.Node) -> None """Where you actually write something to the filesystem.""" raise NotImplementedError def write_doc_serialized(self, docname, doctree): + # type: (unicode, nodes.Node) -> None """Handle parts of write_doc that must be called in the main process if parallel build is active. """ pass def finish(self): + # type: () -> None """Finish the building process. The default implementation does nothing. @@ -430,6 +463,7 @@ class Builder(object): pass def cleanup(self): + # type: () -> None """Cleanup any resources. The default implementation does nothing. @@ -437,6 +471,7 @@ class Builder(object): pass def get_builder_config(self, option, default): + # type: (unicode, unicode) -> Any """Return a builder specific option. This method allows customization of common builder settings by diff --git a/sphinx/builders/applehelp.py b/sphinx/builders/applehelp.py index 7db086953..c674204fc 100644 --- a/sphinx/builders/applehelp.py +++ b/sphinx/builders/applehelp.py @@ -19,7 +19,7 @@ import shlex from sphinx.builders.html import StandaloneHTMLBuilder from sphinx.config import string_classes from sphinx.util.osutil import copyfile, ensuredir, make_filename -from sphinx.util.console import bold +from sphinx.util.console import bold # type: ignore from sphinx.util.fileutil import copy_asset from sphinx.util.pycompat import htmlescape from sphinx.util.matching import Matcher @@ -28,10 +28,13 @@ from sphinx.errors import SphinxError import plistlib import subprocess +if False: + # For type annotation + from sphinx.application import Sphinx # NOQA # Use plistlib.dump in 3.4 and above try: - write_plist = plistlib.dump + write_plist = plistlib.dump # type: ignore except AttributeError: write_plist = plistlib.writePlist @@ -83,6 +86,7 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): search = False def init(self): + # type: () -> None super(AppleHelpBuilder, self).init() # the output files for HTML help must be .html only self.out_suffix = '.html' @@ -101,12 +105,14 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): self.config.applehelp_locale + '.lproj') def handle_finish(self): + # type: () -> None super(AppleHelpBuilder, self).handle_finish() self.finish_tasks.add_task(self.copy_localized_files) self.finish_tasks.add_task(self.build_helpbook) def copy_localized_files(self): + # type: () -> None source_dir = path.join(self.confdir, self.config.applehelp_locale + '.lproj') target_dir = self.outdir @@ -120,6 +126,7 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): self.info('done') def build_helpbook(self): + # type: () -> None contents_dir = path.join(self.bundle_path, 'Contents') resources_dir = path.join(contents_dir, 'Resources') language_dir = path.join(resources_dir, @@ -264,6 +271,7 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): def setup(app): + # type: (Sphinx) -> None app.setup_extension('sphinx.builders.html') app.add_builder(AppleHelpBuilder) diff --git a/sphinx/builders/changes.py b/sphinx/builders/changes.py index 1bccb67d9..034722929 100644 --- a/sphinx/builders/changes.py +++ b/sphinx/builders/changes.py @@ -19,10 +19,15 @@ from sphinx.locale import _ from sphinx.theming import Theme from sphinx.builders import Builder from sphinx.util.osutil import ensuredir, os_path -from sphinx.util.console import bold +from sphinx.util.console import bold # type: ignore from sphinx.util.fileutil import copy_asset_file from sphinx.util.pycompat import htmlescape +if False: + # For type annotation + from typing import Any, Tuple # NOQA + from sphinx.application import Sphinx # NOQA + class ChangesBuilder(Builder): """ @@ -31,6 +36,7 @@ class ChangesBuilder(Builder): name = 'changes' def init(self): + # type: () -> None self.create_template_bridge() Theme.init_themes(self.confdir, self.config.html_theme_path, warn=self.warn) @@ -38,19 +44,21 @@ class ChangesBuilder(Builder): self.templates.init(self, self.theme) def get_outdated_docs(self): + # type: () -> unicode return self.outdir typemap = { 'versionadded': 'added', 'versionchanged': 'changed', 'deprecated': 'deprecated', - } + } # type: Dict[unicode, unicode] def write(self, *ignored): + # type: (Any) -> None version = self.config.version - libchanges = {} - apichanges = [] - otherchanges = {} + libchanges = {} # type: Dict[unicode, List[Tuple[unicode, unicode, int]]] + apichanges = [] # type: List[Tuple[unicode, unicode, int]] + otherchanges = {} # type: Dict[Tuple[unicode, unicode], List[Tuple[unicode, unicode, int]]] # NOQA if version not in self.env.versionchanges: self.info(bold('no changes in version %s.' % version)) return @@ -101,9 +109,9 @@ class ChangesBuilder(Builder): 'show_copyright': self.config.html_show_copyright, 'show_sphinx': self.config.html_show_sphinx, } - with codecs.open(path.join(self.outdir, 'index.html'), 'w', 'utf8') as f: + with codecs.open(path.join(self.outdir, 'index.html'), 'w', 'utf8') as f: # type: ignore # NOQA f.write(self.templates.render('changes/frameset.html', ctx)) - with codecs.open(path.join(self.outdir, 'changes.html'), 'w', 'utf8') as f: + with codecs.open(path.join(self.outdir, 'changes.html'), 'w', 'utf8') as f: # type: ignore # NOQA f.write(self.templates.render('changes/versionchanges.html', ctx)) hltext = ['.. versionadded:: %s' % version, @@ -120,7 +128,7 @@ class ChangesBuilder(Builder): self.info(bold('copying source files...')) for docname in self.env.all_docs: - with codecs.open(self.env.doc2path(docname), 'r', + with codecs.open(self.env.doc2path(docname), 'r', # type: ignore self.env.config.source_encoding) as f: try: lines = f.readlines() @@ -129,7 +137,7 @@ class ChangesBuilder(Builder): continue targetfn = path.join(self.outdir, 'rst', os_path(docname)) + '.html' ensuredir(path.dirname(targetfn)) - with codecs.open(targetfn, 'w', 'utf-8') as f: + with codecs.open(targetfn, 'w', 'utf-8') as f: # type: ignore text = ''.join(hl(i+1, line) for (i, line) in enumerate(lines)) ctx = { 'filename': self.env.doc2path(docname, None), @@ -144,6 +152,7 @@ class ChangesBuilder(Builder): self.outdir) def hl(self, text, version): + # type: (unicode, unicode) -> unicode text = htmlescape(text) for directive in ['versionchanged', 'versionadded', 'deprecated']: text = text.replace('.. %s:: %s' % (directive, version), @@ -151,8 +160,10 @@ class ChangesBuilder(Builder): return text def finish(self): + # type: () -> None pass def setup(app): + # type: (Sphinx) -> None app.add_builder(ChangesBuilder) diff --git a/sphinx/builders/devhelp.py b/sphinx/builders/devhelp.py index fd6f3400e..f0b313e7f 100644 --- a/sphinx/builders/devhelp.py +++ b/sphinx/builders/devhelp.py @@ -25,7 +25,12 @@ from sphinx.builders.html import StandaloneHTMLBuilder try: import xml.etree.ElementTree as etree except ImportError: - import lxml.etree as etree + import lxml.etree as etree # type: ignore + +if False: + # For type annotation + from typing import Any # NOQA + from sphinx.application import Sphinx # NOQA class DevhelpBuilder(StandaloneHTMLBuilder): @@ -44,14 +49,17 @@ class DevhelpBuilder(StandaloneHTMLBuilder): embedded = True def init(self): + # type: () -> None StandaloneHTMLBuilder.init(self) self.out_suffix = '.html' self.link_suffix = '.html' def handle_finish(self): + # type: () -> None self.build_devhelp(self.outdir, self.config.devhelp_basename) def build_devhelp(self, outdir, outname): + # type: (unicode, unicode) -> None self.info('dumping devhelp index...') # Basic info @@ -69,6 +77,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder): self.config.master_doc, self, prune_toctrees=False) def write_toc(node, parent): + # type: (nodes.Node, nodes.Node) -> None if isinstance(node, addnodes.compact_paragraph) or \ isinstance(node, nodes.bullet_list): for subnode in node: @@ -82,6 +91,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder): parent.attrib['name'] = node.astext() def istoctree(node): + # type: (nodes.Node) -> bool return isinstance(node, addnodes.compact_paragraph) and \ 'toctree' in node @@ -93,6 +103,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder): index = self.env.create_index(self) def write_index(title, refs, subitems): + # type: (unicode, List[Any], Any) -> None if len(refs) == 0: pass elif len(refs) == 1: @@ -105,7 +116,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder): link=ref[1]) if subitems: - parent_title = re.sub(r'\s*\(.*\)\s*$', '', title) + parent_title = re.sub(r'\s*\(.*\)\s*$', '', title) # type: ignore for subitem in subitems: write_index("%s %s" % (parent_title, subitem[0]), subitem[1], []) @@ -116,11 +127,12 @@ class DevhelpBuilder(StandaloneHTMLBuilder): # Dump the XML file xmlfile = path.join(outdir, outname + '.devhelp.gz') - with gzip.open(xmlfile, 'w') as f: + with gzip.open(xmlfile, 'w') as f: # type: ignore tree.write(f, 'utf-8') def setup(app): + # type: (Sphinx) -> None app.setup_extension('sphinx.builders.html') app.add_builder(DevhelpBuilder) diff --git a/sphinx/builders/epub.py b/sphinx/builders/epub.py index b4b657468..f9abd53fb 100644 --- a/sphinx/builders/epub.py +++ b/sphinx/builders/epub.py @@ -31,7 +31,12 @@ from sphinx import addnodes from sphinx.builders.html import StandaloneHTMLBuilder from sphinx.util.osutil import ensuredir, copyfile, make_filename, EEXIST from sphinx.util.smartypants import sphinx_smarty_pants as ssp -from sphinx.util.console import brown +from sphinx.util.console import brown # type: ignore + +if False: + # For type annotation + from typing import Any, Tuple # NOQA + from sphinx.application import Sphinx # NOQA # (Fragment) templates from which the metainfo files content.opf, toc.ncx, @@ -159,7 +164,7 @@ MEDIA_TYPES = { '.otf': 'application/x-font-otf', '.ttf': 'application/x-font-ttf', '.woff': 'application/font-woff', -} +} # type: Dict[unicode, unicode] VECTOR_GRAPHICS_EXTENSIONS = ('.svg',) @@ -221,6 +226,7 @@ class EpubBuilder(StandaloneHTMLBuilder): refuri_re = REFURI_RE def init(self): + # type: () -> None StandaloneHTMLBuilder.init(self) # the output files for epub must be .html only self.out_suffix = '.xhtml' @@ -230,10 +236,12 @@ class EpubBuilder(StandaloneHTMLBuilder): self.use_index = self.get_builder_config('use_index', 'epub') def get_theme_config(self): + # type: () -> Tuple[unicode, Dict] return self.config.epub_theme, self.config.epub_theme_options # generic support functions def make_id(self, name, id_cache={}): + # type: (unicode, Dict[unicode, unicode]) -> unicode # id_cache is intentionally mutable """Return a unique id for name.""" id = id_cache.get(name) @@ -243,6 +251,7 @@ class EpubBuilder(StandaloneHTMLBuilder): return id def esc(self, name): + # type: (unicode) -> unicode """Replace all characters not allowed in text an attribute values.""" # Like cgi.escape, but also replace apostrophe name = name.replace('&', '&') @@ -253,6 +262,7 @@ class EpubBuilder(StandaloneHTMLBuilder): return name def get_refnodes(self, doctree, result): + # type: (nodes.Node, List[Dict[unicode, Any]]) -> List[Dict[unicode, Any]] """Collect section titles, their depth in the toc and the refuri.""" # XXX: is there a better way than checking the attribute # toctree-l[1-8] on the parent node? @@ -276,6 +286,7 @@ class EpubBuilder(StandaloneHTMLBuilder): return result def get_toc(self): + # type: () -> None """Get the total table of contents, containing the master_doc and pre and post files not managed by sphinx. """ @@ -291,6 +302,7 @@ class EpubBuilder(StandaloneHTMLBuilder): self.toc_add_files(self.refnodes) def toc_add_files(self, refnodes): + # type: (List[nodes.Node]) -> None """Add the master_doc, pre and post files to a list of refnodes. """ refnodes.insert(0, { @@ -313,10 +325,12 @@ class EpubBuilder(StandaloneHTMLBuilder): }) def fix_fragment(self, prefix, fragment): + # type: (unicode, unicode) -> unicode """Return a href/id attribute with colons replaced by hyphens.""" return prefix + fragment.replace(':', '-') def fix_ids(self, tree): + # type: (nodes.Node) -> None """Replace colons with hyphens in href and id attributes. Some readers crash because they interpret the part as a @@ -337,9 +351,11 @@ class EpubBuilder(StandaloneHTMLBuilder): node.attributes['ids'] = newids def add_visible_links(self, tree, show_urls='inline'): + # type: (nodes.Node, unicode) -> None """Add visible link targets for external links""" def make_footnote_ref(doc, label): + # type: (nodes.Node, unicode) -> nodes.footnote_reference """Create a footnote_reference node with children""" footnote_ref = nodes.footnote_reference('[#]_') footnote_ref.append(nodes.Text(label)) @@ -347,6 +363,7 @@ class EpubBuilder(StandaloneHTMLBuilder): return footnote_ref def make_footnote(doc, label, uri): + # type: (nodes.Node, unicode, unicode) -> nodes.footnote """Create a footnote node with children""" footnote = nodes.footnote(uri) para = nodes.paragraph() @@ -357,6 +374,7 @@ class EpubBuilder(StandaloneHTMLBuilder): return footnote def footnote_spot(tree): + # type: (nodes.Node) -> Tuple[nodes.Node, int] """Find or create a spot to place footnotes. The function returns the tuple (parent, index).""" @@ -406,6 +424,7 @@ class EpubBuilder(StandaloneHTMLBuilder): fn_idx += 1 def write_doc(self, docname, doctree): + # type: (unicode, nodes.Node) -> None """Write one document file. This method is overwritten in order to fix fragment identifiers @@ -416,6 +435,7 @@ class EpubBuilder(StandaloneHTMLBuilder): return StandaloneHTMLBuilder.write_doc(self, docname, doctree) def fix_genindex(self, tree): + # type: (nodes.Node) -> None """Fix href attributes for genindex pages.""" # XXX: modifies tree inline # Logic modeled from themes/basic/genindex.html @@ -434,11 +454,13 @@ class EpubBuilder(StandaloneHTMLBuilder): self.fix_fragment(m.group(1), m.group(2))) def is_vector_graphics(self, filename): + # type: (unicode) -> bool """Does the filename extension indicate a vector graphic format?""" ext = path.splitext(filename)[-1] return ext in VECTOR_GRAPHICS_EXTENSIONS def copy_image_files_pil(self): + # type: () -> None """Copy images using the PIL. The method tries to read and write the files with the PIL, converting the format and resizing the image if necessary/possible. @@ -477,6 +499,7 @@ class EpubBuilder(StandaloneHTMLBuilder): (path.join(self.srcdir, src), err)) def copy_image_files(self): + # type: () -> None """Copy image files to destination directory. This overwritten method can use the PIL to convert image files. """ @@ -491,10 +514,12 @@ class EpubBuilder(StandaloneHTMLBuilder): super(EpubBuilder, self).copy_image_files() def copy_download_files(self): + # type: () -> None pass def handle_page(self, pagename, addctx, templatename='page.html', outfilename=None, event_arg=None): + # type: (unicode, Dict, unicode, unicode, Any) -> None """Create a rendered page. This method is overwritten for genindex pages in order to fix href link @@ -510,6 +535,7 @@ class EpubBuilder(StandaloneHTMLBuilder): # Finish by building the epub file def handle_finish(self): + # type: () -> None """Create the metainfo files and finally the epub.""" self.get_toc() self.build_mimetype(self.outdir, 'mimetype') @@ -519,12 +545,14 @@ class EpubBuilder(StandaloneHTMLBuilder): self.build_epub(self.outdir, self.config.epub_basename + '.epub') def build_mimetype(self, outdir, outname): + # type: (unicode, unicode) -> None """Write the metainfo file mimetype.""" self.info('writing %s file...' % outname) - with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f: + with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f: # type: ignore f.write(self.mimetype_template) def build_container(self, outdir, outname): + # type: (unicode, unicode) -> None """Write the metainfo file META-INF/cointainer.xml.""" self.info('writing %s file...' % outname) fn = path.join(outdir, outname) @@ -533,14 +561,15 @@ class EpubBuilder(StandaloneHTMLBuilder): except OSError as err: if err.errno != EEXIST: raise - with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f: - f.write(self.container_template) + with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f: # type: ignore + f.write(self.container_template) # type: ignore def content_metadata(self, files, spine, guide): + # type: (List[unicode], Any, Any) -> Dict[unicode, Any] """Create a dictionary with all metadata for the content.opf file properly escaped. """ - metadata = {} + metadata = {} # type: Dict[unicode, Any] metadata['title'] = self.esc(self.config.epub_title) metadata['author'] = self.esc(self.config.epub_author) metadata['uid'] = self.esc(self.config.epub_uid) @@ -556,6 +585,7 @@ class EpubBuilder(StandaloneHTMLBuilder): return metadata def build_content(self, outdir, outname): + # type: (unicode, unicode) -> None """Write the metainfo file content.opf It contains bibliographic data, a file list and the spine (the reading order). """ @@ -565,8 +595,8 @@ class EpubBuilder(StandaloneHTMLBuilder): if not outdir.endswith(os.sep): outdir += os.sep olen = len(outdir) - projectfiles = [] - self.files = [] + projectfiles = [] # type: List[unicode] + self.files = [] # type: List[unicode] self.ignored_files = ['.buildinfo', 'mimetype', 'content.opf', 'toc.ncx', 'META-INF/container.xml', 'Thumbs.db', 'ehthumbs.db', '.DS_Store', @@ -679,16 +709,17 @@ class EpubBuilder(StandaloneHTMLBuilder): 'title': self.guide_titles['toc'], 'uri': self.esc(self.refnodes[0]['refuri']) }) - projectfiles = '\n'.join(projectfiles) - spine = '\n'.join(spine) - guide = '\n'.join(guide) + projectfiles = '\n'.join(projectfiles) # type: ignore + spine = '\n'.join(spine) # type: ignore + guide = '\n'.join(guide) # type: ignore # write the project file - with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f: - f.write(content_tmpl % + with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f: # type: ignore + f.write(content_tmpl % # type: ignore self.content_metadata(projectfiles, spine, guide)) def new_navpoint(self, node, level, incr=True): + # type: (nodes.Node, int, bool) -> unicode """Create a new entry in the toc from the node at given level.""" # XXX Modifies the node if incr: @@ -700,6 +731,7 @@ class EpubBuilder(StandaloneHTMLBuilder): return self.navpoint_template % node def insert_subnav(self, node, subnav): + # type: (nodes.Node, unicode) -> unicode """Insert nested navpoints for given node. The node and subnav are already rendered to text. @@ -709,6 +741,7 @@ class EpubBuilder(StandaloneHTMLBuilder): return '\n'.join(nlist) def build_navpoints(self, nodes): + # type: (nodes.Node) -> unicode """Create the toc navigation structure. Subelements of a node are nested inside the navpoint. For nested nodes @@ -752,10 +785,11 @@ class EpubBuilder(StandaloneHTMLBuilder): return '\n'.join(navlist) def toc_metadata(self, level, navpoints): + # type: (int, List[unicode]) -> Dict[unicode, Any] """Create a dictionary with all metadata for the toc.ncx file properly escaped. """ - metadata = {} + metadata = {} # type: Dict[unicode, Any] metadata['uid'] = self.config.epub_uid metadata['title'] = self.config.epub_title metadata['level'] = level @@ -763,6 +797,7 @@ class EpubBuilder(StandaloneHTMLBuilder): return metadata def build_toc(self, outdir, outname): + # type: (unicode, unicode) -> None """Write the metainfo file toc.ncx.""" self.info('writing %s file...' % outname) @@ -778,29 +813,31 @@ class EpubBuilder(StandaloneHTMLBuilder): navpoints = self.build_navpoints(refnodes) level = max(item['level'] for item in self.refnodes) level = min(level, self.config.epub_tocdepth) - with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f: - f.write(self.toc_template % self.toc_metadata(level, navpoints)) + with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f: # type: ignore + f.write(self.toc_template % self.toc_metadata(level, navpoints)) # type: ignore def build_epub(self, outdir, outname): + # type: (unicode, unicode) -> None """Write the epub file. It is a zip file with the mimetype file stored uncompressed as the first entry. """ self.info('writing %s file...' % outname) - projectfiles = ['META-INF/container.xml', 'content.opf', 'toc.ncx'] \ - + self.files - epub = zipfile.ZipFile(path.join(outdir, outname), 'w', + projectfiles = ['META-INF/container.xml', 'content.opf', 'toc.ncx'] # type: List[unicode] # NOQA + projectfiles.extend(self.files) + epub = zipfile.ZipFile(path.join(outdir, outname), 'w', # type: ignore zipfile.ZIP_DEFLATED) - epub.write(path.join(outdir, 'mimetype'), 'mimetype', + epub.write(path.join(outdir, 'mimetype'), 'mimetype', # type: ignore zipfile.ZIP_STORED) for file in projectfiles: fp = path.join(outdir, file) - epub.write(fp, file, zipfile.ZIP_DEFLATED) + epub.write(fp, file, zipfile.ZIP_DEFLATED) # type: ignore epub.close() def setup(app): + # type: (Sphinx) -> None app.setup_extension('sphinx.builders.html') app.add_builder(EpubBuilder) diff --git a/sphinx/builders/gettext.py b/sphinx/builders/gettext.py index e118cde99..dbe696a0b 100644 --- a/sphinx/builders/gettext.py +++ b/sphinx/builders/gettext.py @@ -26,9 +26,16 @@ from sphinx.util.tags import Tags from sphinx.util.nodes import extract_messages, traverse_translatable_index from sphinx.util.osutil import safe_relpath, ensuredir, canon_path from sphinx.util.i18n import find_catalog -from sphinx.util.console import darkgreen, purple, bold +from sphinx.util.console import darkgreen, purple, bold # type: ignore from sphinx.locale import pairindextypes +if False: + # For type annotation + from typing import Any, Iterable, Tuple # NOQA + from docutils import nodes # NOQA + from sphinx.util.i18n import CatalogInfo # NOQA + from sphinx.application import Sphinx # NOQA + POHEADER = r""" # SOME DESCRIPTIVE TITLE. # Copyright (C) %(copyright)s @@ -55,10 +62,14 @@ class Catalog(object): """Catalog of translatable messages.""" def __init__(self): - self.messages = [] # retain insertion order, a la OrderedDict - self.metadata = {} # msgid -> file, line, uid + # type: () -> None + self.messages = [] # type: List[unicode] + # retain insertion order, a la OrderedDict + self.metadata = {} # type: Dict[unicode, List[Tuple[unicode, int, unicode]]] + # msgid -> file, line, uid def add(self, msg, origin): + # type: (unicode, MsgOrigin) -> None if not hasattr(origin, 'uid'): # Nodes that are replicated like todo don't have a uid, # however i18n is also unnecessary. @@ -75,6 +86,7 @@ class MsgOrigin(object): """ def __init__(self, source, line): + # type: (unicode, int) -> None self.source = source self.line = line self.uid = uuid4().hex @@ -87,6 +99,7 @@ class I18nTags(Tags): always returns True value even if no tags are defined. """ def eval_condition(self, condition): + # type: (Any) -> bool return True @@ -99,27 +112,34 @@ class I18nBuilder(Builder): versioning_compare = None # be set by `gettext_uuid` def __init__(self, app): + # type: (Sphinx) -> None self.versioning_compare = app.env.config.gettext_uuid super(I18nBuilder, self).__init__(app) def init(self): + # type: () -> None Builder.init(self) self.tags = I18nTags() - self.catalogs = defaultdict(Catalog) + self.catalogs = defaultdict(Catalog) # type: defaultdict[unicode, Catalog] def get_target_uri(self, docname, typ=None): + # type: (unicode, unicode) -> unicode return '' def get_outdated_docs(self): + # type: () -> Set[unicode] return self.env.found_docs def prepare_writing(self, docnames): + # type: (Set[unicode]) -> None return def compile_catalogs(self, catalogs, message): + # type: (Set[CatalogInfo], unicode) -> None return def write_doc(self, docname, doctree): + # type: (unicode, nodes.Node) -> None catalog = self.catalogs[find_catalog(docname, self.config.gettext_compact)] @@ -153,13 +173,16 @@ if source_date_epoch is not None: class LocalTimeZone(tzinfo): def __init__(self, *args, **kw): - super(LocalTimeZone, self).__init__(*args, **kw) + # type: (Any, Any) -> None + super(LocalTimeZone, self).__init__(*args, **kw) # type: ignore self.tzdelta = tzdelta def utcoffset(self, dt): + # type: (datetime) -> timedelta return self.tzdelta def dst(self, dt): + # type: (datetime) -> timedelta return timedelta(0) @@ -173,11 +196,13 @@ class MessageCatalogBuilder(I18nBuilder): name = 'gettext' def init(self): + # type: () -> None I18nBuilder.init(self) self.create_template_bridge() self.templates.init(self) def _collect_templates(self): + # type: () -> Set[unicode] template_files = set() for template_path in self.config.templates_path: tmpl_abs_path = path.join(self.app.srcdir, template_path) @@ -189,6 +214,7 @@ class MessageCatalogBuilder(I18nBuilder): return template_files def _extract_from_template(self): + # type: () -> None files = self._collect_templates() self.info(bold('building [%s]: ' % self.name), nonl=1) self.info('targets for %d template files' % len(files)) @@ -197,23 +223,25 @@ class MessageCatalogBuilder(I18nBuilder): for template in self.app.status_iterator( files, 'reading templates... ', purple, len(files)): - with open(template, 'r', encoding='utf-8') as f: + with open(template, 'r', encoding='utf-8') as f: # type: ignore context = f.read() for line, meth, msg in extract_translations(context): origin = MsgOrigin(template, line) self.catalogs['sphinx'].add(msg, origin) def build(self, docnames, summary=None, method='update'): + # type: (Iterable[unicode], unicode, unicode) -> None self._extract_from_template() I18nBuilder.build(self, docnames, summary, method) def finish(self): + # type: () -> None I18nBuilder.finish(self) data = dict( version = self.config.version, copyright = self.config.copyright, project = self.config.project, - ctime = datetime.fromtimestamp( + ctime = datetime.fromtimestamp( # type: ignore timestamp, ltz).strftime('%Y-%m-%d %H:%M%z'), ) for textdomain, catalog in self.app.status_iterator( @@ -224,31 +252,32 @@ class MessageCatalogBuilder(I18nBuilder): ensuredir(path.join(self.outdir, path.dirname(textdomain))) pofn = path.join(self.outdir, textdomain + '.pot') - with open(pofn, 'w', encoding='utf-8') as pofile: - pofile.write(POHEADER % data) + with open(pofn, 'w', encoding='utf-8') as pofile: # type: ignore + pofile.write(POHEADER % data) # type: ignore for message in catalog.messages: positions = catalog.metadata[message] if self.config.gettext_location: # generate "#: file1:line1\n#: file2:line2 ..." - pofile.write("#: %s\n" % "\n#: ".join( + pofile.write("#: %s\n" % "\n#: ".join( # type: ignore "%s:%s" % (canon_path( safe_relpath(source, self.outdir)), line) for source, line, _ in positions)) if self.config.gettext_uuid: # generate "# uuid1\n# uuid2\n ..." - pofile.write("# %s\n" % "\n# ".join( + pofile.write("# %s\n" % "\n# ".join( # type: ignore uid for _, _, uid in positions)) # message contains *one* line of text ready for translation message = message.replace('\\', r'\\'). \ replace('"', r'\"'). \ replace('\n', '\\n"\n"') - pofile.write('msgid "%s"\nmsgstr ""\n\n' % message) + pofile.write('msgid "%s"\nmsgstr ""\n\n' % message) # type: ignore def setup(app): + # type: (Sphinx) -> None app.add_builder(MessageCatalogBuilder) app.add_config_value('gettext_compact', True, 'gettext') diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index e13d752d7..d5d522dd0 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -19,6 +19,7 @@ from hashlib import md5 from six import iteritems, text_type, string_types from six.moves import cPickle as pickle + from docutils import nodes from docutils.io import DocTreeInput, StringOutput from docutils.core import Publisher @@ -41,10 +42,16 @@ from sphinx.theming import Theme from sphinx.builders import Builder from sphinx.application import ENV_PICKLE_FILENAME from sphinx.highlighting import PygmentsBridge -from sphinx.util.console import bold, darkgreen, brown +from sphinx.util.console import bold, darkgreen, brown # type: ignore from sphinx.writers.html import HTMLWriter, HTMLTranslator, \ SmartyPantsHTMLTranslator +if False: + # For type annotation + from typing import Any, Iterable, Iterator, Tuple, Union # NOQA + from sphinx.domains import Domain, Index # NOQA + from sphinx.application import Sphinx # NOQA + #: the filename for the inventory of objects INVENTORY_FILENAME = 'objects.inv' #: the filename for the "last build" file (for serializing builders) @@ -52,6 +59,7 @@ LAST_BUILD_FILENAME = 'last_build' def get_stable_hash(obj): + # type: (Any) -> unicode """ Return a stable hash for a Python data structure. We can't just use the md5 of str(obj) since for example dictionary items are enumerated @@ -85,13 +93,17 @@ class StandaloneHTMLBuilder(Builder): allow_sharp_as_current_path = True embedded = False # for things like HTML help or Qt help: suppresses sidebar search = True # for things like HTML help and Apple help: suppress search + use_index = False download_support = True # enable download role # This is a class attribute because it is mutated by Sphinx.add_javascript. script_files = ['_static/jquery.js', '_static/underscore.js', - '_static/doctools.js'] + '_static/doctools.js'] # type: List[unicode] # Dito for this one. - css_files = [] + css_files = [] # type: List[unicode] + + imgpath = None # type: unicode + domain_indices = [] # type: List[Tuple[unicode, Index, unicode, bool]] default_sidebars = ['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'] @@ -100,15 +112,16 @@ class StandaloneHTMLBuilder(Builder): _publisher = None def init(self): + # type: () -> None # a hash of all config values that, if changed, cause a full rebuild - self.config_hash = '' - self.tags_hash = '' + self.config_hash = '' # type: unicode + self.tags_hash = '' # type: unicode # basename of images directory self.imagedir = '_images' # section numbers for headings in the currently visited document - self.secnumbers = {} + self.secnumbers = {} # type: Dict[unicode, Tuple[int, ...]] # currently written docname - self.current_docname = None + self.current_docname = None # type: unicode self.init_templates() self.init_highlighter() @@ -127,6 +140,7 @@ class StandaloneHTMLBuilder(Builder): self.use_index = self.get_builder_config('use_index', 'html') def _get_translations_js(self): + # type: () -> unicode candidates = [path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js'), path.join(sys.prefix, 'share/sphinx/locale', @@ -140,9 +154,11 @@ class StandaloneHTMLBuilder(Builder): return None def get_theme_config(self): + # type: () -> Tuple[unicode, Dict] return self.config.html_theme, self.config.html_theme_options def init_templates(self): + # type: () -> None Theme.init_themes(self.confdir, self.config.html_theme_path, warn=self.warn) themename, themeoptions = self.get_theme_config() @@ -152,6 +168,7 @@ class StandaloneHTMLBuilder(Builder): self.templates.init(self, self.theme) def init_highlighter(self): + # type: () -> None # determine Pygments style and create the highlighter if self.config.pygments_style is not None: style = self.config.pygments_style @@ -163,18 +180,20 @@ class StandaloneHTMLBuilder(Builder): self.config.trim_doctest_flags) def init_translator_class(self): + # type: () -> None if self.translator_class is None: if self.config.html_use_smartypants: self.translator_class = SmartyPantsHTMLTranslator else: self.translator_class = HTMLTranslator - def get_outdated_docs(self): + def get_outdated_docs(self): # type: ignore + # type: () -> Iterator[unicode] cfgdict = dict((name, self.config[name]) for (name, desc) in iteritems(self.config.values) if desc[1] == 'html') self.config_hash = get_stable_hash(cfgdict) - self.tags_hash = get_stable_hash(sorted(self.tags)) + self.tags_hash = get_stable_hash(sorted(self.tags)) # type: ignore old_config_hash = old_tags_hash = '' try: with open(path.join(self.outdir, '.buildinfo')) as fp: @@ -222,6 +241,7 @@ class StandaloneHTMLBuilder(Builder): pass def render_partial(self, node): + # type: (nodes.Nodes) -> Dict[unicode, unicode] """Utility: Render a lone doctree node.""" if node is None: return {'fragment': ''} @@ -247,6 +267,7 @@ class StandaloneHTMLBuilder(Builder): return pub.writer.parts def prepare_writing(self, docnames): + # type: (Iterable[unicode]) -> nodes.Node # create the search indexer self.indexer = None if self.search: @@ -272,6 +293,7 @@ class StandaloneHTMLBuilder(Builder): indices_config = self.config.html_domain_indices if indices_config: for domain_name in sorted(self.env.domains): + domain = None # type: Domain domain = self.env.domains[domain_name] for indexcls in domain.indices: indexname = '%s-%s' % (domain.name, indexcls.name) @@ -313,7 +335,7 @@ class StandaloneHTMLBuilder(Builder): rellinks = [] if self.use_index: rellinks.append(('genindex', _('General Index'), 'I', _('index'))) - for indexname, indexcls, content, collapse in self.domain_indices: + for indexname, indexcls, content, collapse in self.domain_indices: # type: ignore # if it has a short name if indexcls.shortname: rellinks.append((indexname, indexcls.localname, @@ -353,7 +375,7 @@ class StandaloneHTMLBuilder(Builder): parents = [], logo = logo, favicon = favicon, - ) + ) # type: Dict[unicode, Any] if self.theme: self.globalcontext.update( ('theme_' + key, val) for (key, val) in @@ -361,6 +383,7 @@ class StandaloneHTMLBuilder(Builder): self.globalcontext.update(self.config.html_context) def get_doc_context(self, docname, body, metatags): + # type: (unicode, unicode, Dict) -> Dict[unicode, Any] """Collect items for the template context of a page.""" # find out relations prev = next = None @@ -441,6 +464,7 @@ class StandaloneHTMLBuilder(Builder): ) def write_doc(self, docname, doctree): + # type: (unicode, nodes.Node) -> None destination = StringOutput(encoding='utf-8') doctree.settings = self.docsettings @@ -458,6 +482,7 @@ class StandaloneHTMLBuilder(Builder): self.handle_page(docname, ctx, event_arg=doctree) def write_doc_serialized(self, docname, doctree): + # type: (unicode, nodes.Node) -> None self.imgpath = relative_uri(self.get_target_uri(docname), self.imagedir) self.post_process_images(doctree) title = self.env.longtitles.get(docname) @@ -465,6 +490,7 @@ class StandaloneHTMLBuilder(Builder): self.index_page(docname, doctree, title) def finish(self): + # type: () -> None self.finish_tasks.add_task(self.gen_indices) self.finish_tasks.add_task(self.gen_additional_pages) self.finish_tasks.add_task(self.copy_image_files) @@ -477,6 +503,7 @@ class StandaloneHTMLBuilder(Builder): self.handle_finish() def gen_indices(self): + # type: () -> None self.info(bold('generating indices...'), nonl=1) # the global general index @@ -489,6 +516,7 @@ class StandaloneHTMLBuilder(Builder): self.info() def gen_additional_pages(self): + # type: () -> None # pages from extensions for pagelist in self.app.emit('html-collect-pages'): for pagename, context, template in pagelist: @@ -515,6 +543,7 @@ class StandaloneHTMLBuilder(Builder): self.info() def write_genindex(self): + # type: () -> None # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = self.env.create_index(self) @@ -544,6 +573,7 @@ class StandaloneHTMLBuilder(Builder): self.handle_page('genindex', genindexcontext, 'genindex.html') def write_domain_indices(self): + # type: () -> None for indexname, indexcls, content, collapse in self.domain_indices: indexcontext = dict( indextitle = indexcls.localname, @@ -554,6 +584,7 @@ class StandaloneHTMLBuilder(Builder): self.handle_page(indexname, indexcontext, 'domainindex.html') def copy_image_files(self): + # type: () -> None # copy image files if self.images: ensuredir(path.join(self.outdir, self.imagedir)) @@ -568,6 +599,7 @@ class StandaloneHTMLBuilder(Builder): (path.join(self.srcdir, src), err)) def copy_download_files(self): + # type: () -> None def to_relpath(f): return relative_path(self.srcdir, f) # copy downloadable files @@ -586,6 +618,7 @@ class StandaloneHTMLBuilder(Builder): (path.join(self.srcdir, src), err)) def copy_static_files(self): + # type: () -> None # copy static files self.info(bold('copying static files... '), nonl=True) ensuredir(path.join(self.outdir, '_static')) @@ -646,6 +679,7 @@ class StandaloneHTMLBuilder(Builder): self.info('done') def copy_extra_files(self): + # type: () -> None # copy html_extra_path files self.info(bold('copying extra files... '), nonl=True) excluded = Matcher(self.config.exclude_patterns) @@ -660,6 +694,7 @@ class StandaloneHTMLBuilder(Builder): self.info('done') def write_buildinfo(self): + # type: () -> None # write build info file with open(path.join(self.outdir, '.buildinfo'), 'w') as fp: fp.write('# Sphinx build info version 1\n' @@ -669,11 +704,13 @@ class StandaloneHTMLBuilder(Builder): (self.config_hash, self.tags_hash)) def cleanup(self): + # type: () -> None # clean up theme stuff if self.theme: self.theme.cleanup() def post_process_images(self, doctree): + # type: (nodes.Node) -> None """Pick the best candidate for an image and link down-scaled images to their high res version. """ @@ -699,15 +736,16 @@ class StandaloneHTMLBuilder(Builder): reference.append(node) def load_indexer(self, docnames): + # type: (Set[unicode]) -> None keep = set(self.env.all_docs) - set(docnames) try: searchindexfn = path.join(self.outdir, self.searchindex_filename) if self.indexer_dumps_unicode: - f = codecs.open(searchindexfn, 'r', encoding='utf-8') + f = codecs.open(searchindexfn, 'r', encoding='utf-8') # type: ignore else: - f = open(searchindexfn, 'rb') + f = open(searchindexfn, 'rb') # type: ignore with f: - self.indexer.load(f, self.indexer_format) + self.indexer.load(f, self.indexer_format) # type: ignore except (IOError, OSError, ValueError): if keep: self.warn('search index couldn\'t be loaded, but not all ' @@ -717,6 +755,7 @@ class StandaloneHTMLBuilder(Builder): self.indexer.prune(keep) def index_page(self, pagename, doctree, title): + # type: (unicode, nodes.Node, unicode) -> None # only index pages with title if self.indexer is not None and title: filename = self.env.doc2path(pagename, base=None) @@ -727,15 +766,18 @@ class StandaloneHTMLBuilder(Builder): self.indexer.feed(pagename, title, doctree) def _get_local_toctree(self, docname, collapse=True, **kwds): + # type: (unicode, bool, Any) -> unicode if 'includehidden' not in kwds: kwds['includehidden'] = False return self.render_partial(self.env.get_toctree_for( docname, self, collapse, **kwds))['fragment'] def get_outfilename(self, pagename): + # type: (unicode) -> unicode return path.join(self.outdir, os_path(pagename) + self.out_suffix) def add_sidebars(self, pagename, ctx): + # type: (unicode, Dict) -> None def has_wildcard(pattern): return any(char in pattern for char in '*?[') sidebars = None @@ -768,10 +810,12 @@ class StandaloneHTMLBuilder(Builder): # --------- these are overwritten by the serialization builder def get_target_uri(self, docname, typ=None): + # type: (unicode, unicode) -> unicode return docname + self.link_suffix def handle_page(self, pagename, addctx, templatename='page.html', outfilename=None, event_arg=None): + # type: (unicode, Dict, unicode, unicode, Any) -> None ctx = self.globalcontext.copy() # current_page_name is backwards compatibility ctx['pagename'] = ctx['current_page_name'] = pagename @@ -828,7 +872,7 @@ class StandaloneHTMLBuilder(Builder): # outfilename's path is in general different from self.outdir ensuredir(path.dirname(outfilename)) try: - with codecs.open(outfilename, 'w', encoding, 'xmlcharrefreplace') as f: + with codecs.open(outfilename, 'w', encoding, 'xmlcharrefreplace') as f: # type: ignore # NOQA f.write(output) except (IOError, OSError) as err: self.warn("error writing file %s: %s" % (outfilename, err)) @@ -840,11 +884,13 @@ class StandaloneHTMLBuilder(Builder): copyfile(self.env.doc2path(pagename), source_name) def handle_finish(self): + # type: () -> None if self.indexer: self.finish_tasks.add_task(self.dump_search_index) self.finish_tasks.add_task(self.dump_inventory) def dump_inventory(self): + # type: () -> None self.info(bold('dumping object inventory... '), nonl=True) with open(path.join(self.outdir, INVENTORY_FILENAME), 'wb') as f: f.write((u'# Sphinx inventory version 2\n' @@ -871,6 +917,7 @@ class StandaloneHTMLBuilder(Builder): self.info('done') def dump_search_index(self): + # type: () -> None self.info( bold('dumping search index in %s ... ' % self.indexer.label()), nonl=True) @@ -879,11 +926,11 @@ class StandaloneHTMLBuilder(Builder): # first write to a temporary file, so that if dumping fails, # the existing index won't be overwritten if self.indexer_dumps_unicode: - f = codecs.open(searchindexfn + '.tmp', 'w', encoding='utf-8') + f = codecs.open(searchindexfn + '.tmp', 'w', encoding='utf-8') # type: ignore else: - f = open(searchindexfn + '.tmp', 'wb') + f = open(searchindexfn + '.tmp', 'wb') # type: ignore with f: - self.indexer.dump(f, self.indexer_format) + self.indexer.dump(f, self.indexer_format) # type: ignore movefile(searchindexfn + '.tmp', searchindexfn) self.info('done') @@ -897,6 +944,7 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder): name = 'dirhtml' def get_target_uri(self, docname, typ=None): + # type: (unicode, unicode) -> unicode if docname == 'index': return '' if docname.endswith(SEP + 'index'): @@ -904,6 +952,7 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder): return docname + SEP def get_outfilename(self, pagename): + # type: (unicode) -> unicode if pagename == 'index' or pagename.endswith(SEP + 'index'): outfilename = path.join(self.outdir, os_path(pagename) + self.out_suffix) @@ -914,6 +963,7 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder): return outfilename def prepare_writing(self, docnames): + # type: (Iterable[unicode]) -> None StandaloneHTMLBuilder.prepare_writing(self, docnames) self.globalcontext['no_search_suffix'] = True @@ -926,10 +976,12 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder): name = 'singlehtml' copysource = False - def get_outdated_docs(self): + def get_outdated_docs(self): # type: ignore + # type: () -> Union[unicode, List[unicode]] return 'all documents' def get_target_uri(self, docname, typ=None): + # type: (unicode, unicode) -> unicode if docname in self.env.all_docs: # all references are on the same page... return self.config.master_doc + self.out_suffix + \ @@ -939,10 +991,12 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder): return docname + self.out_suffix def get_relative_uri(self, from_, to, typ=None): + # type: (unicode, unicode, unicode) -> unicode # ignore source return self.get_target_uri(to, typ) def fix_refuris(self, tree): + # type: (nodes.Node) -> None # fix refuris with double anchor fname = self.config.master_doc + self.out_suffix for refnode in tree.traverse(nodes.reference): @@ -957,6 +1011,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder): refnode['refuri'] = fname + refuri[hashindex:] def _get_local_toctree(self, docname, collapse=True, **kwds): + # type: (unicode, bool, Any) -> unicode if 'includehidden' not in kwds: kwds['includehidden'] = False toctree = self.env.get_toctree_for(docname, self, collapse, **kwds) @@ -964,6 +1019,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder): return self.render_partial(toctree)['fragment'] def assemble_doctree(self): + # type: () -> nodes.Node master = self.config.master_doc tree = self.env.get_doctree(master) tree = inline_all_toctrees(self, set(), master, tree, darkgreen, [master]) @@ -973,6 +1029,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder): return tree def assemble_toc_secnumbers(self): + # type: () -> Dict[unicode, Dict[Tuple[unicode, unicode], Tuple[int, ...]]] # Assemble toc_secnumbers to resolve section numbers on SingleHTML. # Merge all secnumbers to single secnumber. # @@ -990,6 +1047,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder): return {self.config.master_doc: new_secnumbers} def assemble_toc_fignumbers(self): + # type: () -> Dict[unicode, Dict[Tuple[unicode, unicode], Dict[unicode, Tuple[int, ...]]]] # NOQA # Assemble toc_fignumbers to resolve figure numbers on SingleHTML. # Merge all fignumbers to single fignumber. # @@ -999,7 +1057,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder): # # There are related codes in inline_all_toctres() and # HTMLTranslter#add_fignumber(). - new_fignumbers = {} + new_fignumbers = {} # type: Dict[Tuple[unicode, unicode], Dict[unicode, Tuple[int, ...]]] # NOQA # {u'foo': {'figure': {'id2': (2,), 'id1': (1,)}}, u'bar': {'figure': {'id1': (3,)}}} for docname, fignumlist in iteritems(self.env.toc_fignumbers): for figtype, fignums in iteritems(fignumlist): @@ -1010,8 +1068,9 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder): return {self.config.master_doc: new_fignumbers} def get_doc_context(self, docname, body, metatags): + # type: (unicode, unicode, Dict) -> Dict # no relation links... - toc = self.env.get_toctree_for(self.config.master_doc, self, False) + toc = self.env.get_toctree_for(self.config.master_doc, self, False) # type: Any # if there is no toctree, toc is None if toc: self.fix_refuris(toc) @@ -1036,6 +1095,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder): ) def write(self, *ignored): + # type: (Any) -> None docnames = self.env.all_docs self.info(bold('preparing documents... '), nonl=True) @@ -1053,6 +1113,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder): self.info('done') def finish(self): + # type: () -> None # no indices or search pages are supported self.info(bold('writing additional files...'), nonl=1) @@ -1083,18 +1144,19 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder): #: the serializing implementation to use. Set this to a module that #: implements a `dump`, `load`, `dumps` and `loads` functions #: (pickle, simplejson etc.) - implementation = None + implementation = None # type: Any implementation_dumps_unicode = False #: additional arguments for dump() additional_dump_args = () #: the filename for the global context file - globalcontext_filename = None + globalcontext_filename = None # type: unicode supported_image_types = ['image/svg+xml', 'image/png', 'image/gif', 'image/jpeg'] def init(self): + # type: () -> None self.config_hash = '' self.tags_hash = '' self.imagedir = '_images' @@ -1107,6 +1169,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder): self.use_index = self.get_builder_config('use_index', 'html') def get_target_uri(self, docname, typ=None): + # type: (unicode, unicode) -> unicode if docname == 'index': return '' if docname.endswith(SEP + 'index'): @@ -1114,15 +1177,17 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder): return docname + SEP def dump_context(self, context, filename): + # type: (Dict, unicode) -> None if self.implementation_dumps_unicode: - f = codecs.open(filename, 'w', encoding='utf-8') + f = codecs.open(filename, 'w', encoding='utf-8') # type: ignore else: - f = open(filename, 'wb') + f = open(filename, 'wb') # type: ignore with f: self.implementation.dump(context, f, *self.additional_dump_args) def handle_page(self, pagename, ctx, templatename='page.html', outfilename=None, event_arg=None): + # type: (unicode, Dict, unicode, unicode, Any) -> None ctx['current_page_name'] = pagename self.add_sidebars(pagename, ctx) @@ -1146,6 +1211,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder): copyfile(self.env.doc2path(pagename), source_name) def handle_finish(self): + # type: () -> None # dump the global context outfilename = path.join(self.outdir, self.globalcontext_filename) self.dump_context(self.globalcontext, outfilename) @@ -1196,16 +1262,19 @@ class JSONHTMLBuilder(SerializingHTMLBuilder): searchindex_filename = 'searchindex.json' def init(self): + # type: () -> None SerializingHTMLBuilder.init(self) def validate_config_values(app): + # type: (Sphinx) -> None if app.config.html_translator_class: app.warn('html_translator_class is deprecated. ' 'Use Sphinx.set_translator() API instead.') def setup(app): + # type: (Sphinx) -> None # builders app.add_builder(StandaloneHTMLBuilder) app.add_builder(DirectoryHTMLBuilder) diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py index 0ef0d70d4..92eb39b74 100644 --- a/sphinx/builders/latex.py +++ b/sphinx/builders/latex.py @@ -13,6 +13,7 @@ import os from os import path from six import iteritems + from docutils import nodes from docutils.io import FileOutput from docutils.utils import new_document @@ -28,9 +29,14 @@ from sphinx.environment import NoUri from sphinx.util.nodes import inline_all_toctrees from sphinx.util.fileutil import copy_asset_file from sphinx.util.osutil import SEP, make_filename -from sphinx.util.console import bold, darkgreen +from sphinx.util.console import bold, darkgreen # type: ignore from sphinx.writers.latex import LaTeXWriter +if False: + # For type annotation + from typing import Any, Iterable, Tuple, Union # NOQA + from sphinx.application import Sphinx # NOQA + class LaTeXBuilder(Builder): """ @@ -41,44 +47,50 @@ class LaTeXBuilder(Builder): supported_image_types = ['application/pdf', 'image/png', 'image/jpeg'] def init(self): - self.docnames = [] - self.document_data = [] - self.usepackages = [] + # type: () -> None + self.docnames = [] # type: Iterable[unicode] + self.document_data = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode, bool]] # NOQA + self.usepackages = [] # type: List[unicode] texescape.init() def get_outdated_docs(self): + # type: () -> Union[unicode, List[unicode]] return 'all documents' # for now def get_target_uri(self, docname, typ=None): + # type: (unicode, unicode) -> unicode if docname not in self.docnames: raise NoUri else: return '%' + docname def get_relative_uri(self, from_, to, typ=None): + # type: (unicode, unicode, unicode) -> unicode # ignore source path return self.get_target_uri(to, typ) def init_document_data(self): + # type: () -> None preliminary_document_data = [list(x) for x in self.config.latex_documents] if not preliminary_document_data: self.warn('no "latex_documents" config value found; no documents ' 'will be written') return # assign subdirs to titles - self.titles = [] + self.titles = [] # type: List[Tuple[unicode, unicode]] for entry in preliminary_document_data: docname = entry[0] if docname not in self.env.all_docs: self.warn('"latex_documents" config value references unknown ' 'document %s' % docname) continue - self.document_data.append(entry) + self.document_data.append(entry) # type: ignore if docname.endswith(SEP+'index'): docname = docname[:-5] self.titles.append((docname, entry[2])) def write_stylesheet(self): + # type: () -> None highlighter = highlighting.PygmentsBridge( 'latex', self.config.pygments_style, self.config.trim_doctest_flags) stylesheet = path.join(self.outdir, 'sphinxhighlight.sty') @@ -89,6 +101,7 @@ class LaTeXBuilder(Builder): f.write(highlighter.get_stylesheet()) def write(self, *ignored): + # type: (Any) -> None docwriter = LaTeXWriter(self) docsettings = OptionParser( defaults=self.env.settings, @@ -131,6 +144,7 @@ class LaTeXBuilder(Builder): self.info("done") def get_contentsname(self, indexfile): + # type: (unicode) -> unicode tree = self.env.get_doctree(indexfile) contentsname = None for toctree in tree.traverse(addnodes.toctree): @@ -141,6 +155,7 @@ class LaTeXBuilder(Builder): return contentsname def assemble_doctree(self, indexfile, toctree_only, appendices): + # type: (unicode, bool, List[unicode]) -> nodes.Node self.docnames = set([indexfile] + appendices) self.info(darkgreen(indexfile) + " ", nonl=1) tree = self.env.get_doctree(indexfile) @@ -184,6 +199,7 @@ class LaTeXBuilder(Builder): return largetree def finish(self): + # type: () -> None # copy image files if self.images: self.info(bold('copying images...'), nonl=1) @@ -220,17 +236,18 @@ class LaTeXBuilder(Builder): def validate_config_values(app): + # type: (Sphinx) -> None if app.config.latex_toplevel_sectioning not in (None, 'part', 'chapter', 'section'): app.warn('invalid latex_toplevel_sectioning, ignored: %s' % app.config.latex_toplevel_sectioning) - app.config.latex_toplevel_sectioning = None + app.config.latex_toplevel_sectioning = None # type: ignore if app.config.latex_use_parts: if app.config.latex_toplevel_sectioning: app.warn('latex_use_parts conflicts with latex_toplevel_sectioning, ignored.') else: app.warn('latex_use_parts is deprecated. Use latex_toplevel_sectioning instead.') - app.config.latex_toplevel_sectioning = 'parts' + app.config.latex_toplevel_sectioning = 'parts' # type: ignore if app.config.latex_use_modindex is not True: # changed by user app.warn('latex_use_modeindex is deprecated. Use latex_domain_indices instead.') @@ -269,6 +286,7 @@ def validate_config_values(app): def setup(app): + # type: (Sphinx) -> None app.add_builder(LaTeXBuilder) app.connect('builder-inited', validate_config_values) diff --git a/sphinx/builders/linkcheck.py b/sphinx/builders/linkcheck.py index f49f4f9a3..0a3b2fe9d 100644 --- a/sphinx/builders/linkcheck.py +++ b/sphinx/builders/linkcheck.py @@ -16,9 +16,10 @@ import threading from os import path from requests.exceptions import HTTPError -from six.moves import queue +from six.moves import queue # type: ignore from six.moves.urllib.parse import unquote from six.moves.html_parser import HTMLParser + from docutils import nodes # 2015-06-25 barry@python.org. This exception was deprecated in Python 3.3 and @@ -26,28 +27,36 @@ from docutils import nodes # going to just remove it. If it doesn't exist, define an exception that will # never be caught but leaves the code in check_anchor() intact. try: - from six.moves.html_parser import HTMLParseError + from six.moves.html_parser import HTMLParseError # type: ignore except ImportError: - class HTMLParseError(Exception): + class HTMLParseError(Exception): # type: ignore pass from sphinx.builders import Builder from sphinx.util import encode_uri -from sphinx.util.console import purple, red, darkgreen, darkgray, \ - darkred, turquoise +from sphinx.util.console import ( # type: ignore + purple, red, darkgreen, darkgray, darkred, turquoise +) from sphinx.util.requests import requests, useragent_header, is_ssl_error +if False: + # For type annotation + from typing import Any, Tuple, Union # NOQA + from sphinx.application import Sphinx # NOQA + class AnchorCheckParser(HTMLParser): """Specialized HTML parser that looks for a specific anchor.""" def __init__(self, search_anchor): + # type: (unicode) -> None HTMLParser.__init__(self) self.search_anchor = search_anchor self.found = False def handle_starttag(self, tag, attrs): + # type: (Any, Dict[unicode, unicode]) -> None for key, value in attrs: if key in ('id', 'name') and value == self.search_anchor: self.found = True @@ -55,6 +64,7 @@ class AnchorCheckParser(HTMLParser): def check_anchor(response, anchor): + # type: (requests.Response, unicode) -> bool """Reads HTML data from a response object `response` searching for `anchor`. Returns True if anchor was found, False otherwise. """ @@ -81,12 +91,13 @@ class CheckExternalLinksBuilder(Builder): name = 'linkcheck' def init(self): + # type: () -> None self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore] self.anchors_ignore = [re.compile(x) for x in self.app.config.linkcheck_anchors_ignore] - self.good = set() - self.broken = {} - self.redirected = {} + self.good = set() # type: Set[unicode] + self.broken = {} # type: Dict[unicode, unicode] + self.redirected = {} # type: Dict[unicode, Tuple[unicode, int]] self.headers = dict(useragent_header) # set a timeout for non-responding servers socket.setdefaulttimeout(5.0) @@ -96,7 +107,7 @@ class CheckExternalLinksBuilder(Builder): # create queues and worker threads self.wqueue = queue.Queue() self.rqueue = queue.Queue() - self.workers = [] + self.workers = [] # type: List[threading.Thread] for i in range(self.app.config.linkcheck_workers): thread = threading.Thread(target=self.check_thread) thread.setDaemon(True) @@ -104,6 +115,7 @@ class CheckExternalLinksBuilder(Builder): self.workers.append(thread) def check_thread(self): + # type: () -> None kwargs = {} if self.app.config.linkcheck_timeout: kwargs['timeout'] = self.app.config.linkcheck_timeout @@ -111,6 +123,7 @@ class CheckExternalLinksBuilder(Builder): kwargs['allow_redirects'] = True def check_uri(): + # type: () -> Tuple[unicode, unicode, int] # split off anchor if '#' in uri: req_url, anchor = uri.split('#', 1) @@ -172,6 +185,7 @@ class CheckExternalLinksBuilder(Builder): return 'redirected', new_url, code def check(): + # type: () -> Tuple[unicode, unicode, int] # check for various conditions without bothering the network if len(uri) == 0 or uri.startswith(('#', 'mailto:', 'ftp:')): return 'unchecked', '', 0 @@ -210,6 +224,7 @@ class CheckExternalLinksBuilder(Builder): self.rqueue.put((uri, docname, lineno, status, info, code)) def process_result(self, result): + # type: (Tuple[unicode, unicode, int, unicode, unicode, int]) -> None uri, docname, lineno, status, info, code = result if status == 'unchecked': return @@ -247,15 +262,19 @@ class CheckExternalLinksBuilder(Builder): self.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info)) def get_target_uri(self, docname, typ=None): + # type: (unicode, unicode) -> unicode return '' def get_outdated_docs(self): + # type: () -> Set[unicode] return self.env.found_docs def prepare_writing(self, docnames): + # type: (nodes.Node) -> None return def write_doc(self, docname, doctree): + # type: (unicode, nodes.Node) -> None self.info() n = 0 for node in doctree.traverse(nodes.reference): @@ -279,17 +298,19 @@ class CheckExternalLinksBuilder(Builder): self.app.statuscode = 1 def write_entry(self, what, docname, line, uri): - output = codecs.open(path.join(self.outdir, 'output.txt'), 'a', 'utf-8') - output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None), - line, what, uri)) - output.close() + # type: (unicode, unicode, int, unicode) -> None + with codecs.open(path.join(self.outdir, 'output.txt'), 'a', 'utf-8') as output: # type: ignore # NOQA + output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None), + line, what, uri)) def finish(self): + # type: () -> None for worker in self.workers: self.wqueue.put((None, None, None), False) def setup(app): + # type: (Sphinx) -> None app.add_builder(CheckExternalLinksBuilder) app.add_config_value('linkcheck_ignore', [], None) diff --git a/sphinx/builders/manpage.py b/sphinx/builders/manpage.py index 248ed40b2..a208ac74e 100644 --- a/sphinx/builders/manpage.py +++ b/sphinx/builders/manpage.py @@ -12,6 +12,7 @@ from os import path from six import string_types + from docutils.io import FileOutput from docutils.frontend import OptionParser @@ -20,9 +21,14 @@ from sphinx.builders import Builder from sphinx.environment import NoUri from sphinx.util.nodes import inline_all_toctrees from sphinx.util.osutil import make_filename -from sphinx.util.console import bold, darkgreen +from sphinx.util.console import bold, darkgreen # type: ignore from sphinx.writers.manpage import ManualPageWriter +if False: + # For type annotation + from typing import Any, Union # NOQA + from sphinx.application import Sphinx # NOQA + class ManualPageBuilder(Builder): """ @@ -30,22 +36,26 @@ class ManualPageBuilder(Builder): """ name = 'man' format = 'man' - supported_image_types = [] + supported_image_types = [] # type: List[unicode] def init(self): + # type: () -> None if not self.config.man_pages: self.warn('no "man_pages" config value found; no manual pages ' 'will be written') def get_outdated_docs(self): + # type: () -> Union[unicode, List[unicode]] return 'all manpages' # for now def get_target_uri(self, docname, typ=None): + # type: (unicode, unicode) -> unicode if typ == 'token': return '' raise NoUri def write(self, *ignored): + # type: (Any) -> None docwriter = ManualPageWriter(self) docsettings = OptionParser( defaults=self.env.settings, @@ -69,7 +79,7 @@ class ManualPageBuilder(Builder): encoding='utf-8') tree = self.env.get_doctree(docname) - docnames = set() + docnames = set() # type: Set[unicode] largetree = inline_all_toctrees(self, docnames, docname, tree, darkgreen, [docname]) self.info('} ', nonl=True) @@ -88,10 +98,12 @@ class ManualPageBuilder(Builder): self.info() def finish(self): + # type: () -> None pass def setup(app): + # type: (Sphinx) -> None app.add_builder(ManualPageBuilder) app.add_config_value('man_pages', diff --git a/sphinx/builders/qthelp.py b/sphinx/builders/qthelp.py index c53b56657..18764b7fb 100644 --- a/sphinx/builders/qthelp.py +++ b/sphinx/builders/qthelp.py @@ -16,6 +16,7 @@ import posixpath from os import path from six import text_type + from docutils import nodes from sphinx import addnodes @@ -24,6 +25,11 @@ from sphinx.util import force_decode from sphinx.util.osutil import make_filename from sphinx.util.pycompat import htmlescape +if False: + # For type annotation + from typing import Any, Tuple # NOQA + from sphinx.application import Sphinx # NOQA + _idpattern = re.compile( r'(?P.+) (\((class in )?(?P<id>[\w\.]+)( (?P<descr>\w+))?\))$') @@ -115,6 +121,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder): search = False def init(self): + # type: () -> None StandaloneHTMLBuilder.init(self) # the output files for HTML help must be .html only self.out_suffix = '.html' @@ -122,12 +129,15 @@ class QtHelpBuilder(StandaloneHTMLBuilder): # self.config.html_style = 'traditional.css' def get_theme_config(self): + # type: () -> Tuple[unicode, Dict] return self.config.qthelp_theme, self.config.qthelp_theme_options def handle_finish(self): + # type: () -> None self.build_qhp(self.outdir, self.config.qthelp_basename) def build_qhp(self, outdir, outname): + # type: (unicode, unicode) -> None self.info('writing project file...') # sections @@ -153,7 +163,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder): new_sections.append(force_decode(section, None)) else: new_sections.append(section) - sections = u'\n'.join(new_sections) + sections = u'\n'.join(new_sections) # type: ignore # keywords keywords = [] @@ -161,7 +171,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder): for (key, group) in index: for title, (refs, subitems, key_) in group: keywords.extend(self.build_keywords(title, refs, subitems)) - keywords = u'\n'.join(keywords) + keywords = u'\n'.join(keywords) # type: ignore # files if not outdir.endswith(os.sep): @@ -179,7 +189,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder): filename = path.join(root, fn)[olen:] projectfiles.append(file_template % {'filename': htmlescape(filename)}) - projectfiles = '\n'.join(projectfiles) + projectfiles = '\n'.join(projectfiles) # type: ignore # it seems that the "namespace" may not contain non-alphanumeric # characters, and more than one successive dot, or leading/trailing @@ -190,8 +200,8 @@ class QtHelpBuilder(StandaloneHTMLBuilder): nspace = nspace.lower() # write the project file - with codecs.open(path.join(outdir, outname+'.qhp'), 'w', 'utf-8') as f: - f.write(project_template % { + with codecs.open(path.join(outdir, outname+'.qhp'), 'w', 'utf-8') as f: # type: ignore + f.write(project_template % { # type: ignore 'outname': htmlescape(outname), 'title': htmlescape(self.config.html_title), 'version': htmlescape(self.config.version), @@ -207,14 +217,15 @@ class QtHelpBuilder(StandaloneHTMLBuilder): startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html') self.info('writing collection project file...') - with codecs.open(path.join(outdir, outname+'.qhcp'), 'w', 'utf-8') as f: - f.write(collection_template % { + with codecs.open(path.join(outdir, outname+'.qhcp'), 'w', 'utf-8') as f: # type: ignore # NOQA + f.write(collection_template % { # type: ignore 'outname': htmlescape(outname), 'title': htmlescape(self.config.html_short_title), 'homepage': htmlescape(homepage), 'startpage': htmlescape(startpage)}) def isdocnode(self, node): + # type: (nodes.Node) -> bool if not isinstance(node, nodes.list_item): return False if len(node.children) != 2: @@ -228,8 +239,9 @@ class QtHelpBuilder(StandaloneHTMLBuilder): return True def write_toc(self, node, indentlevel=4): + # type: (nodes.Node, int) -> List[unicode] # XXX this should return a Unicode string, not a bytestring - parts = [] + parts = [] # type: List[unicode] if self.isdocnode(node): refnode = node.children[0][0] link = refnode['refuri'] @@ -247,7 +259,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder): link = node['refuri'] title = htmlescape(node.astext()).replace('"', '"') item = section_template % {'title': title, 'ref': link} - item = u' ' * 4 * indentlevel + item + item = u' ' * 4 * indentlevel + item # type: ignore parts.append(item.encode('ascii', 'xmlcharrefreplace')) elif isinstance(node, nodes.bullet_list): for subnode in node: @@ -259,7 +271,8 @@ class QtHelpBuilder(StandaloneHTMLBuilder): return parts def keyword_item(self, name, ref): - matchobj = _idpattern.match(name) + # type: (unicode, Any) -> unicode + matchobj = _idpattern.match(name) # type: ignore if matchobj: groupdict = matchobj.groupdict() shortname = groupdict['title'] @@ -280,7 +293,8 @@ class QtHelpBuilder(StandaloneHTMLBuilder): return item def build_keywords(self, title, refs, subitems): - keywords = [] + # type: (unicode, List[Any], Any) -> List[unicode] + keywords = [] # type: List[unicode] title = htmlescape(title) # if len(refs) == 0: # XXX @@ -304,6 +318,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder): def setup(app): + # type: (Sphinx) -> None app.setup_extension('sphinx.builders.html') app.add_builder(QtHelpBuilder) diff --git a/sphinx/builders/texinfo.py b/sphinx/builders/texinfo.py index f070840b6..61b5d4e77 100644 --- a/sphinx/builders/texinfo.py +++ b/sphinx/builders/texinfo.py @@ -12,6 +12,7 @@ from os import path from six import iteritems + from docutils import nodes from docutils.io import FileOutput from docutils.utils import new_document @@ -23,9 +24,14 @@ from sphinx.builders import Builder from sphinx.environment import NoUri from sphinx.util.nodes import inline_all_toctrees from sphinx.util.osutil import SEP, copyfile, make_filename -from sphinx.util.console import bold, darkgreen +from sphinx.util.console import bold, darkgreen # type: ignore from sphinx.writers.texinfo import TexinfoWriter +if False: + # For type annotation + from sphinx.application import Sphinx # NOQA + from typing import Any, Iterable, Tuple, Union # NOQA + TEXINFO_MAKEFILE = '''\ # Makefile for Sphinx Texinfo output @@ -91,47 +97,53 @@ class TexinfoBuilder(Builder): 'image/gif'] def init(self): - self.docnames = [] - self.document_data = [] + # type: () -> None + self.docnames = [] # type: Iterable[unicode] + self.document_data = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode, unicode, unicode, bool]] # NOQA def get_outdated_docs(self): + # type: () -> Union[unicode, List[unicode]] return 'all documents' # for now def get_target_uri(self, docname, typ=None): + # type: (unicode, unicode) -> unicode if docname not in self.docnames: raise NoUri else: return '%' + docname def get_relative_uri(self, from_, to, typ=None): + # type: (unicode, unicode, unicode) -> unicode # ignore source path return self.get_target_uri(to, typ) def init_document_data(self): + # type: () -> None preliminary_document_data = [list(x) for x in self.config.texinfo_documents] if not preliminary_document_data: self.warn('no "texinfo_documents" config value found; no documents ' 'will be written') return # assign subdirs to titles - self.titles = [] + self.titles = [] # type: List[Tuple[unicode, unicode]] for entry in preliminary_document_data: docname = entry[0] if docname not in self.env.all_docs: self.warn('"texinfo_documents" config value references unknown ' 'document %s' % docname) continue - self.document_data.append(entry) + self.document_data.append(entry) # type: ignore if docname.endswith(SEP+'index'): docname = docname[:-5] self.titles.append((docname, entry[2])) def write(self, *ignored): + # type: (Any) -> None self.init_document_data() for entry in self.document_data: docname, targetname, title, author = entry[:4] targetname += '.texi' - direntry = description = category = '' + direntry = description = category = '' # type: unicode if len(entry) > 6: direntry, description, category = entry[4:7] toctree_only = False @@ -164,6 +176,7 @@ class TexinfoBuilder(Builder): self.info("done") def assemble_doctree(self, indexfile, toctree_only, appendices): + # type: (unicode, bool, List[unicode]) -> nodes.Node self.docnames = set([indexfile] + appendices) self.info(darkgreen(indexfile) + " ", nonl=1) tree = self.env.get_doctree(indexfile) @@ -206,6 +219,7 @@ class TexinfoBuilder(Builder): return largetree def finish(self): + # type: () -> None # copy image files if self.images: self.info(bold('copying images...'), nonl=1) @@ -228,6 +242,7 @@ class TexinfoBuilder(Builder): def setup(app): + # type: (Sphinx) -> None app.add_builder(TexinfoBuilder) app.add_config_value('texinfo_documents', diff --git a/sphinx/builders/text.py b/sphinx/builders/text.py index 2daf8b043..e14ce215b 100644 --- a/sphinx/builders/text.py +++ b/sphinx/builders/text.py @@ -25,6 +25,8 @@ class TextBuilder(Builder): out_suffix = '.txt' allow_parallel = True + current_docname = None # type: unicode + def init(self): pass From 0d1875e2b5458e0f1f12aa4bfa538ca9c27fb908 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Fri, 11 Nov 2016 19:37:14 +0900 Subject: [PATCH 009/190] Add type-check annotations to sphinx.ext --- sphinx/ext/autodoc.py | 192 ++++++++++++++++++++++------- sphinx/ext/autosummary/__init__.py | 54 ++++++-- sphinx/ext/autosummary/generate.py | 42 +++++-- sphinx/ext/coverage.py | 30 +++-- sphinx/ext/doctest.py | 84 +++++++++---- sphinx/ext/graphviz.py | 25 +++- sphinx/ext/ifconfig.py | 14 ++- sphinx/ext/imgmath.py | 21 +++- sphinx/ext/inheritance_diagram.py | 39 ++++-- sphinx/ext/intersphinx.py | 65 +++++++--- sphinx/ext/linkcode.py | 9 +- sphinx/ext/mathbase.py | 36 +++++- sphinx/ext/napoleon/__init__.py | 17 ++- sphinx/ext/napoleon/docstring.py | 165 +++++++++++++++++-------- sphinx/ext/napoleon/iterators.py | 21 +++- sphinx/ext/pngmath.py | 21 +++- sphinx/ext/todo.py | 31 +++-- sphinx/ext/viewcode.py | 39 ++++-- 18 files changed, 684 insertions(+), 221 deletions(-) diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index 59e585678..9385c5c02 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -19,6 +19,7 @@ from types import FunctionType, BuiltinFunctionType, MethodType from six import PY2, iterkeys, iteritems, itervalues, text_type, class_types, \ string_types, StringIO + from docutils import nodes from docutils.utils import assemble_option_dict from docutils.statemachine import ViewList @@ -34,11 +35,18 @@ from sphinx.util.inspect import getargspec, isdescriptor, safe_getmembers, \ safe_getattr, object_description, is_builtin_class_method, isenumattribute from sphinx.util.docstrings import prepare_docstring +if False: + # For type annotation + from typing import Any, Callable, Iterator, Sequence, Tuple, Type, Union # NOQA + from types import ModuleType # NOQA + from docutils.utils import Reporter # NOQA + from sphinx.application import Sphinx # NOQA + try: if sys.version_info >= (3,): import typing else: - typing = None + typing = None # type: ignore except ImportError: typing = None @@ -56,28 +64,33 @@ py_ext_sig_re = re.compile( class DefDict(dict): """A dict that returns a default on nonexisting keys.""" def __init__(self, default): + # type: (Any) -> None dict.__init__(self) self.default = default def __getitem__(self, key): + # type: (Any) -> Any try: return dict.__getitem__(self, key) except KeyError: return self.default def __bool__(self): + # type: () -> bool # docutils check "if option_spec" return True __nonzero__ = __bool__ # for python2 compatibility def identity(x): + # type: (Any) -> Any return x class Options(dict): """A dict/attribute hybrid that returns None on nonexisting keys.""" def __getattr__(self, name): + # type: (unicode) -> Any try: return self[name.replace('_', '-')] except KeyError: @@ -90,22 +103,26 @@ class _MockModule(object): __path__ = '/dev/null' def __init__(self, *args, **kwargs): - self.__all__ = [] + # type: (Any, Any) -> None + self.__all__ = [] # type: List[str] def __call__(self, *args, **kwargs): + # type: (Any, Any) -> _MockModule if args and type(args[0]) in [FunctionType, MethodType]: # Appears to be a decorator, pass through unchanged return args[0] return _MockModule() def _append_submodule(self, submod): + # type: (str) -> None self.__all__.append(submod) @classmethod def __getattr__(cls, name): + # type: (unicode) -> Any if name[0] == name[0].upper(): # Not very good, we assume Uppercase names are classes... - mocktype = type(name, (), {}) + mocktype = type(name, (), {}) # type: ignore mocktype.__module__ = __name__ return mocktype else: @@ -113,15 +130,16 @@ class _MockModule(object): def mock_import(modname): + # type: (str) -> None if '.' in modname: pkg, _n, mods = modname.rpartition('.') mock_import(pkg) if isinstance(sys.modules[pkg], _MockModule): - sys.modules[pkg]._append_submodule(mods) + sys.modules[pkg]._append_submodule(mods) # type: ignore if modname not in sys.modules: mod = _MockModule() - sys.modules[modname] = mod + sys.modules[modname] = mod # type: ignore ALL = object() @@ -129,6 +147,7 @@ INSTANCEATTR = object() def members_option(arg): + # type: (Any) -> Union[object, List[unicode]] """Used to convert the :members: option to auto directives.""" if arg is None: return ALL @@ -136,6 +155,7 @@ def members_option(arg): def members_set_option(arg): + # type: (Any) -> Union[object, Set[unicode]] """Used to convert the :members: option to auto directives.""" if arg is None: return ALL @@ -146,6 +166,7 @@ SUPPRESS = object() def annotation_option(arg): + # type: (Any) -> Any if arg is None: # suppress showing the representation of the object return SUPPRESS @@ -154,6 +175,7 @@ def annotation_option(arg): def bool_option(arg): + # type: (Any) -> bool """Used to convert flag options to auto directives. (Instead of directives.flag(), which returns None). """ @@ -166,13 +188,16 @@ class AutodocReporter(object): and line number to a system message, as recorded in a ViewList. """ def __init__(self, viewlist, reporter): + # type: (ViewList, Reporter) -> None self.viewlist = viewlist self.reporter = reporter def __getattr__(self, name): + # type: (unicode) -> Any return getattr(self.reporter, name) def system_message(self, level, message, *children, **kwargs): + # type: (int, unicode, Any, Any) -> nodes.system_message if 'line' in kwargs and 'source' not in kwargs: try: source, line = self.viewlist.items[kwargs['line']] @@ -185,25 +210,31 @@ class AutodocReporter(object): *children, **kwargs) def debug(self, *args, **kwargs): + # type: (Any, Any) -> nodes.system_message if self.reporter.debug_flag: return self.system_message(0, *args, **kwargs) def info(self, *args, **kwargs): + # type: (Any, Any) -> nodes.system_message return self.system_message(1, *args, **kwargs) def warning(self, *args, **kwargs): + # type: (Any, Any) -> nodes.system_message return self.system_message(2, *args, **kwargs) def error(self, *args, **kwargs): + # type: (Any, Any) -> nodes.system_message return self.system_message(3, *args, **kwargs) def severe(self, *args, **kwargs): + # type: (Any, Any) -> nodes.system_message return self.system_message(4, *args, **kwargs) # Some useful event listener factories for autodoc-process-docstring. def cut_lines(pre, post=0, what=None): + # type: (int, int, unicode) -> Callable """Return a listener that removes the first *pre* and last *post* lines of every docstring. If *what* is a sequence of strings, only docstrings of a type in *what* will be processed. @@ -216,6 +247,7 @@ def cut_lines(pre, post=0, what=None): This can (and should) be used in place of :confval:`automodule_skip_lines`. """ def process(app, what_, name, obj, options, lines): + # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None if what and what_ not in what: return del lines[:pre] @@ -231,6 +263,7 @@ def cut_lines(pre, post=0, what=None): def between(marker, what=None, keepempty=False, exclude=False): + # type: (unicode, Sequence[unicode], bool, bool) -> Callable """Return a listener that either keeps, or if *exclude* is True excludes, lines between lines that match the *marker* regular expression. If no line matches, the resulting docstring would be empty, so no change will be made @@ -242,6 +275,7 @@ def between(marker, what=None, keepempty=False, exclude=False): marker_re = re.compile(marker) def process(app, what_, name, obj, options, lines): + # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None if what and what_ not in what: return deleted = 0 @@ -265,6 +299,7 @@ def between(marker, what=None, keepempty=False, exclude=False): def format_annotation(annotation): + # type: (Any) -> str """Return formatted representation of a type annotation. Show qualified names for types and additional details for types from @@ -272,18 +307,18 @@ def format_annotation(annotation): Displaying complex types from ``typing`` relies on its private API. """ - if typing and isinstance(annotation, typing.TypeVar): + if typing and isinstance(annotation, typing.TypeVar): # type: ignore return annotation.__name__ if annotation == Ellipsis: return '...' if not isinstance(annotation, type): return repr(annotation) - qualified_name = (annotation.__module__ + '.' + annotation.__qualname__ + qualified_name = (annotation.__module__ + '.' + annotation.__qualname__ # type: ignore if annotation else repr(annotation)) if annotation.__module__ == 'builtins': - return annotation.__qualname__ + return annotation.__qualname__ # type: ignore elif typing: if hasattr(typing, 'GenericMeta') and \ isinstance(annotation, typing.GenericMeta): @@ -344,6 +379,7 @@ def format_annotation(annotation): def formatargspec(function, args, varargs=None, varkw=None, defaults=None, kwonlyargs=(), kwonlydefaults={}, annotations={}): + # type: (Callable, Tuple[str, ...], str, str, Any, Tuple, Dict, Dict[str, Any]) -> str """Return a string representation of an ``inspect.FullArgSpec`` tuple. An enhanced version of ``inspect.formatargspec()`` that handles typing @@ -351,18 +387,20 @@ def formatargspec(function, args, varargs=None, varkw=None, defaults=None, """ def format_arg_with_annotation(name): + # type: (str) -> str if name in annotations: return '%s: %s' % (name, format_annotation(get_annotation(name))) return name def get_annotation(name): + # type: (str) -> str value = annotations[name] if isinstance(value, string_types): return introspected_hints.get(name, value) else: return value - introspected_hints = (typing.get_type_hints(function) + introspected_hints = (typing.get_type_hints(function) # type: ignore if typing and hasattr(function, '__code__') else {}) fd = StringIO() @@ -376,7 +414,7 @@ def formatargspec(function, args, varargs=None, varkw=None, defaults=None, arg_fd.write(format_arg_with_annotation(arg)) if defaults and i >= defaults_start: arg_fd.write(' = ' if arg in annotations else '=') - arg_fd.write(object_description(defaults[i - defaults_start])) + arg_fd.write(object_description(defaults[i - defaults_start])) # type: ignore formatted.append(arg_fd.getvalue()) if varargs: @@ -391,7 +429,7 @@ def formatargspec(function, args, varargs=None, varkw=None, defaults=None, arg_fd.write(format_arg_with_annotation(kwarg)) if kwonlydefaults and kwarg in kwonlydefaults: arg_fd.write(' = ' if kwarg in annotations else '=') - arg_fd.write(object_description(kwonlydefaults[kwarg])) + arg_fd.write(object_description(kwonlydefaults[kwarg])) # type: ignore formatted.append(arg_fd.getvalue()) if varkw: @@ -438,6 +476,7 @@ class Documenter(object): @staticmethod def get_attr(obj, name, *defargs): + # type: (Any, unicode, Any) -> Any """getattr() override for types such as Zope interfaces.""" for typ, func in iteritems(AutoDirective._special_attrgetters): if isinstance(obj, typ): @@ -446,10 +485,12 @@ class Documenter(object): @classmethod def can_document_member(cls, member, membername, isattr, parent): + # type: (Any, unicode, bool, Any) -> bool """Called to see if a member can be documented by this documenter.""" raise NotImplementedError('must be implemented in subclasses') def __init__(self, directive, name, indent=u''): + # type: (Directive, unicode, unicode) -> None self.directive = directive self.env = directive.env self.options = directive.genopt @@ -457,27 +498,29 @@ class Documenter(object): self.indent = indent # the module and object path within the module, and the fully # qualified name (all set after resolve_name succeeds) - self.modname = None - self.module = None - self.objpath = None - self.fullname = None + self.modname = None # type: str + self.module = None # type: ModuleType + self.objpath = None # type: List[unicode] + self.fullname = None # type: unicode # extra signature items (arguments and return annotation, # also set after resolve_name succeeds) - self.args = None - self.retann = None + self.args = None # type: unicode + self.retann = None # type: unicode # the object to document (set after import_object succeeds) - self.object = None - self.object_name = None + self.object = None # type: Any + self.object_name = None # type: unicode # the parent/owner of the object to document - self.parent = None + self.parent = None # type: Any # the module analyzer to get at attribute docs, or None - self.analyzer = None + self.analyzer = None # type: Any def add_line(self, line, source, *lineno): + # type: (unicode, unicode, int) -> None """Append one line of generated reST to the output.""" self.directive.result.append(self.indent + line, source, *lineno) def resolve_name(self, modname, parents, path, base): + # type: (str, Any, str, Any) -> Tuple[str, List[unicode]] """Resolve the module and name of the object to document given by the arguments and the current module/class. @@ -488,6 +531,7 @@ class Documenter(object): raise NotImplementedError('must be implemented in subclasses') def parse_name(self): + # type: () -> bool """Determine what module to import and what attribute to document. Returns True and sets *self.modname*, *self.objpath*, *self.fullname*, @@ -498,7 +542,7 @@ class Documenter(object): # an autogenerated one try: explicit_modname, path, base, args, retann = \ - py_ext_sig_re.match(self.name).groups() + py_ext_sig_re.match(self.name).groups() # type: ignore except AttributeError: self.directive.warn('invalid signature for auto%s (%r)' % (self.objtype, self.name)) @@ -512,8 +556,7 @@ class Documenter(object): modname = None parents = [] - self.modname, self.objpath = \ - self.resolve_name(modname, parents, path, base) + self.modname, self.objpath = self.resolve_name(modname, parents, path, base) if not self.modname: return False @@ -525,6 +568,7 @@ class Documenter(object): return True def import_object(self): + # type: () -> bool """Import the object given by *self.modname* and *self.objpath* and set it as *self.object*. @@ -568,13 +612,14 @@ class Documenter(object): errmsg += '; the following exception was raised:\n%s' % \ traceback.format_exc() if PY2: - errmsg = errmsg.decode('utf-8') + errmsg = errmsg.decode('utf-8') # type: ignore dbg(errmsg) self.directive.warn(errmsg) self.env.note_reread() return False def get_real_modname(self): + # type: () -> str """Get the real module name of an object to document. It can differ from the name of the module through which the object was @@ -583,6 +628,7 @@ class Documenter(object): return self.get_attr(self.object, '__module__', None) or self.modname def check_module(self): + # type: () -> bool """Check if *self.object* is really defined in the module given by *self.modname*. """ @@ -595,6 +641,7 @@ class Documenter(object): return True def format_args(self): + # type: () -> unicode """Format the argument signature of *self.object*. Should return None if the object does not have a signature. @@ -602,6 +649,7 @@ class Documenter(object): return None def format_name(self): + # type: () -> unicode """Format the name of *self.object*. This normally should be something that can be parsed by the generated @@ -613,13 +661,14 @@ class Documenter(object): return '.'.join(self.objpath) or self.modname def format_signature(self): + # type: () -> unicode """Format the signature (arguments and return annotation) of the object. Let the user process it via the ``autodoc-process-signature`` event. """ if self.args is not None: # signature given explicitly - args = "(%s)" % self.args + args = "(%s)" % self.args # type: unicode else: # try to introspect the signature try: @@ -643,6 +692,7 @@ class Documenter(object): return '' def add_directive_header(self, sig): + # type: (unicode) -> None """Add the directive header and options to the generated content.""" domain = getattr(self, 'domain', 'py') directive = getattr(self, 'directivetype', self.objtype) @@ -658,6 +708,7 @@ class Documenter(object): self.add_line(u' :module: %s' % self.modname, sourcename) def get_doc(self, encoding=None, ignore=1): + # type: (unicode, int) -> List[List[unicode]] """Decode and return lines of the docstring(s) for the object.""" docstring = self.get_attr(self.object, '__doc__', None) # make sure we have Unicode docstrings, then sanitize and split @@ -671,6 +722,7 @@ class Documenter(object): return [] def process_doc(self, docstrings): + # type: (List[List[unicode]]) -> Iterator[unicode] """Let the user process the docstrings before adding them.""" for docstringlines in docstrings: if self.env.app: @@ -682,6 +734,7 @@ class Documenter(object): yield line def get_sourcename(self): + # type: () -> unicode if self.analyzer: # prevent encoding errors when the file name is non-ASCII if not isinstance(self.analyzer.srcname, text_type): @@ -693,6 +746,7 @@ class Documenter(object): return u'docstring of %s' % self.fullname def add_content(self, more_content, no_docstring=False): + # type: (Any, bool) -> None """Add content from docstrings, attribute documentation and user.""" # set sourcename and add content from attribute documentation sourcename = self.get_sourcename() @@ -724,6 +778,7 @@ class Documenter(object): self.add_line(line, src[0], src[1]) def get_object_members(self, want_all): + # type: (bool) -> Tuple[bool, List[Tuple[unicode, object]]] """Return `(members_check_module, members)` where `members` is a list of `(membername, member)` pairs of the members of *self.object*. @@ -775,6 +830,7 @@ class Documenter(object): return False, sorted(members) def filter_members(self, members, want_all): + # type: (List[Tuple[unicode, Any]], bool) -> List[Tuple[unicode, Any, bool]] """Filter the given member list. Members are skipped if @@ -852,6 +908,7 @@ class Documenter(object): return ret def document_members(self, all_members=False): + # type: (bool) -> None """Generate reST for member documentation. If *all_members* is True, do all members, else those given by @@ -873,7 +930,7 @@ class Documenter(object): if membername not in self.options.exclude_members] # document non-skipped members - memberdocumenters = [] + memberdocumenters = [] # type: List[Tuple[Documenter, bool]] for (mname, member, isattr) in self.filter_members(members, want_all): classes = [cls for cls in itervalues(AutoDirective._registry) if cls.can_document_member(member, mname, isattr, self)] @@ -914,6 +971,7 @@ class Documenter(object): def generate(self, more_content=None, real_modname=None, check_module=False, all_members=False): + # type: (Any, str, bool, bool) -> None """Generate reST for the object given by *self.name*, and possibly for its members. @@ -1007,15 +1065,18 @@ class ModuleDocumenter(Documenter): @classmethod def can_document_member(cls, member, membername, isattr, parent): + # type: (Any, unicode, bool, Any) -> bool # don't document submodules automatically return False def resolve_name(self, modname, parents, path, base): + # type: (str, Any, str, Any) -> Tuple[str, List[unicode]] if modname is not None: self.directive.warn('"::" in automodule name doesn\'t make sense') return (path or '') + base, [] def parse_name(self): + # type: () -> bool ret = Documenter.parse_name(self) if self.args or self.retann: self.directive.warn('signature arguments or return annotation ' @@ -1023,6 +1084,7 @@ class ModuleDocumenter(Documenter): return ret def add_directive_header(self, sig): + # type: (unicode) -> None Documenter.add_directive_header(self, sig) sourcename = self.get_sourcename() @@ -1038,6 +1100,7 @@ class ModuleDocumenter(Documenter): self.add_line(u' :deprecated:', sourcename) def get_object_members(self, want_all): + # type: (bool) -> Tuple[bool, List[Tuple[unicode, object]]] if want_all: if not hasattr(self.object, '__all__'): # for implicit module members, check __module__ to avoid @@ -1074,6 +1137,7 @@ class ModuleLevelDocumenter(Documenter): classes, data/constants). """ def resolve_name(self, modname, parents, path, base): + # type: (str, Any, str, Any) -> Tuple[str, List[unicode]] if modname is None: if path: modname = path.rstrip('.') @@ -1094,6 +1158,7 @@ class ClassLevelDocumenter(Documenter): attributes). """ def resolve_name(self, modname, parents, path, base): + # type: (str, Any, str, Any) -> Tuple[str, List[unicode]] if modname is None: if path: mod_cls = path.rstrip('.') @@ -1127,6 +1192,7 @@ class DocstringSignatureMixin(object): """ def _find_signature(self, encoding=None): + # type: (unicode) -> Tuple[str, str] docstrings = self.get_doc(encoding) self._new_docstrings = docstrings[:] result = None @@ -1135,12 +1201,12 @@ class DocstringSignatureMixin(object): if not doclines: continue # match first line of docstring against signature RE - match = py_ext_sig_re.match(doclines[0]) + match = py_ext_sig_re.match(doclines[0]) # type: ignore if not match: continue exmod, path, base, args, retann = match.groups() # the base name must match ours - valid_names = [self.objpath[-1]] + valid_names = [self.objpath[-1]] # type: ignore if isinstance(self, ClassDocumenter): valid_names.append('__init__') if hasattr(self.object, '__mro__'): @@ -1155,19 +1221,21 @@ class DocstringSignatureMixin(object): return result def get_doc(self, encoding=None, ignore=1): + # type: (unicode, int) -> List[List[unicode]] lines = getattr(self, '_new_docstrings', None) if lines is not None: return lines - return Documenter.get_doc(self, encoding, ignore) + return Documenter.get_doc(self, encoding, ignore) # type: ignore def format_signature(self): - if self.args is None and self.env.config.autodoc_docstring_signature: + # type: () -> unicode + if self.args is None and self.env.config.autodoc_docstring_signature: # type: ignore # only act if a signature is not explicitly given already, and if # the feature is enabled result = self._find_signature() if result is not None: self.args, self.retann = result - return Documenter.format_signature(self) + return Documenter.format_signature(self) # type: ignore class DocstringStripSignatureMixin(DocstringSignatureMixin): @@ -1176,7 +1244,8 @@ class DocstringStripSignatureMixin(DocstringSignatureMixin): feature of stripping any function signature from the docstring. """ def format_signature(self): - if self.args is None and self.env.config.autodoc_docstring_signature: + # type: () -> unicode + if self.args is None and self.env.config.autodoc_docstring_signature: # type: ignore # only act if a signature is not explicitly given already, and if # the feature is enabled result = self._find_signature() @@ -1185,10 +1254,10 @@ class DocstringStripSignatureMixin(DocstringSignatureMixin): # DocstringSignatureMixin.format_signature. # Documenter.format_signature use self.args value to format. _args, self.retann = result - return Documenter.format_signature(self) + return Documenter.format_signature(self) # type: ignore -class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): +class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type: ignore """ Specialized Documenter subclass for functions. """ @@ -1197,9 +1266,11 @@ class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): @classmethod def can_document_member(cls, member, membername, isattr, parent): + # type: (Any, unicode, bool, Any) -> bool return isinstance(member, (FunctionType, BuiltinFunctionType)) def format_args(self): + # type: () -> unicode if inspect.isbuiltin(self.object) or \ inspect.ismethoddescriptor(self.object): # cannot introspect arguments of a C function or method @@ -1226,10 +1297,11 @@ class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): return args def document_members(self, all_members=False): + # type: (bool) -> None pass -class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): +class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type: ignore """ Specialized Documenter subclass for classes. """ @@ -1245,9 +1317,11 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): @classmethod def can_document_member(cls, member, membername, isattr, parent): + # type: (Any, unicode, bool, Any) -> bool return isinstance(member, class_types) def import_object(self): + # type: () -> Any ret = ModuleLevelDocumenter.import_object(self) # if the class is documented under another name, document it # as data/attribute @@ -1259,6 +1333,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): return ret def format_args(self): + # type: () -> unicode # for classes, the relevant signature is the __init__ method's initmeth = self.get_attr(self.object, '__init__', None) # classes without __init__ method, default __init__ or @@ -1278,12 +1353,14 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): return formatargspec(initmeth, *argspec) def format_signature(self): + # type: () -> unicode if self.doc_as_attr: return '' return DocstringSignatureMixin.format_signature(self) def add_directive_header(self, sig): + # type: (unicode) -> None if self.doc_as_attr: self.directivetype = 'attribute' Documenter.add_directive_header(self, sig) @@ -1301,6 +1378,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): sourcename) def get_doc(self, encoding=None, ignore=1): + # type: (unicode, int) -> List[List[unicode]] lines = getattr(self, '_new_docstrings', None) if lines is not None: return lines @@ -1346,6 +1424,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): return doc def add_content(self, more_content, no_docstring=False): + # type: (Any, bool) -> None if self.doc_as_attr: classname = safe_getattr(self.object, '__name__', None) if classname: @@ -1357,6 +1436,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): ModuleLevelDocumenter.add_content(self, more_content) def document_members(self, all_members=False): + # type: (bool) -> None if self.doc_as_attr: return ModuleLevelDocumenter.document_members(self, all_members) @@ -1374,8 +1454,9 @@ class ExceptionDocumenter(ClassDocumenter): @classmethod def can_document_member(cls, member, membername, isattr, parent): + # type: (Any, unicode, bool, Any) -> bool return isinstance(member, class_types) and \ - issubclass(member, BaseException) + issubclass(member, BaseException) # type: ignore class DataDocumenter(ModuleLevelDocumenter): @@ -1390,9 +1471,11 @@ class DataDocumenter(ModuleLevelDocumenter): @classmethod def can_document_member(cls, member, membername, isattr, parent): + # type: (Any, unicode, bool, Any) -> bool return isinstance(parent, ModuleDocumenter) and isattr def add_directive_header(self, sig): + # type: (unicode) -> None ModuleLevelDocumenter.add_directive_header(self, sig) sourcename = self.get_sourcename() if not self.options.annotation: @@ -1409,10 +1492,11 @@ class DataDocumenter(ModuleLevelDocumenter): sourcename) def document_members(self, all_members=False): + # type: (bool) -> None pass -class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): +class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): # type: ignore """ Specialized Documenter subclass for methods (normal, static and class). """ @@ -1422,10 +1506,12 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): @classmethod def can_document_member(cls, member, membername, isattr, parent): + # type: (Any, unicode, bool, Any) -> bool return inspect.isroutine(member) and \ not isinstance(parent, ModuleDocumenter) def import_object(self): + # type: () -> Any ret = ClassLevelDocumenter.import_object(self) if not ret: return ret @@ -1433,11 +1519,11 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): # to distinguish classmethod/staticmethod obj = self.parent.__dict__.get(self.object_name) - if isinstance(obj, classmethod): + if isinstance(obj, classmethod): # type: ignore self.directivetype = 'classmethod' # document class and static members before ordinary ones self.member_order = self.member_order - 1 - elif isinstance(obj, staticmethod): + elif isinstance(obj, staticmethod): # type: ignore self.directivetype = 'staticmethod' # document class and static members before ordinary ones self.member_order = self.member_order - 1 @@ -1446,6 +1532,7 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): return ret def format_args(self): + # type: () -> unicode if inspect.isbuiltin(self.object) or \ inspect.ismethoddescriptor(self.object): # can never get arguments of a C function or method @@ -1459,10 +1546,11 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): return args def document_members(self, all_members=False): + # type: (bool) -> None pass -class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter): +class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter): # type: ignore """ Specialized Documenter subclass for attributes. """ @@ -1479,6 +1567,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter): @classmethod def can_document_member(cls, member, membername, isattr, parent): + # type: (Any, unicode, bool, Any) -> bool isdatadesc = isdescriptor(member) and not \ isinstance(member, cls.method_types) and not \ type(member).__name__ in ("type", "method_descriptor", @@ -1488,9 +1577,11 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter): not isinstance(member, class_types)) def document_members(self, all_members=False): + # type: (bool) -> None pass def import_object(self): + # type: () -> Any ret = ClassLevelDocumenter.import_object(self) if isenumattribute(self.object): self.object = self.object.value @@ -1503,10 +1594,12 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter): return ret def get_real_modname(self): + # type: () -> str return self.get_attr(self.parent or self.object, '__module__', None) \ or self.modname def add_directive_header(self, sig): + # type: (unicode) -> None ClassLevelDocumenter.add_directive_header(self, sig) sourcename = self.get_sourcename() if not self.options.annotation: @@ -1524,6 +1617,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter): sourcename) def add_content(self, more_content, no_docstring=False): + # type: (Any, bool) -> None if not self._datadescriptor: # if it's not a data descriptor, its docstring is very probably the # wrong thing to display @@ -1545,10 +1639,12 @@ class InstanceAttributeDocumenter(AttributeDocumenter): @classmethod def can_document_member(cls, member, membername, isattr, parent): + # type: (Any, unicode, bool, Any) -> bool """This documents only INSTANCEATTR members.""" return isattr and (member is INSTANCEATTR) def import_object(self): + # type: () -> bool """Never import anything.""" # disguise as an attribute self.objtype = 'attribute' @@ -1556,6 +1652,7 @@ class InstanceAttributeDocumenter(AttributeDocumenter): return True def add_content(self, more_content, no_docstring=False): + # type: (Any, bool) -> None """Never try to get a docstring from the object.""" AttributeDocumenter.add_content(self, more_content, no_docstring=True) @@ -1576,10 +1673,10 @@ class AutoDirective(Directive): attributes of the parents. """ # a registry of objtype -> documenter class - _registry = {} + _registry = {} # type: Dict[unicode, Type[Documenter]] # a registry of type -> getattr function - _special_attrgetters = {} + _special_attrgetters = {} # type: Dict[Type, Callable] # flags that can be given in autodoc_default_flags _default_flags = set([ @@ -1597,13 +1694,16 @@ class AutoDirective(Directive): option_spec = DefDict(identity) def warn(self, msg): + # type: (unicode) -> None self.warnings.append(self.reporter.warning(msg, line=self.lineno)) def run(self): - self.filename_set = set() # a set of dependent filenames + # type: () -> List[nodes.Node] + self.filename_set = set() # type: Set[unicode] + # a set of dependent filenames self.reporter = self.state.document.reporter self.env = self.state.document.settings.env - self.warnings = [] + self.warnings = [] # type: List[unicode] self.result = ViewList() try: @@ -1667,6 +1767,7 @@ class AutoDirective(Directive): def add_documenter(cls): + # type: (Type[Documenter]) -> None """Register a new Documenter.""" if not issubclass(cls, Documenter): raise ExtensionError('autodoc documenter %r must be a subclass ' @@ -1679,6 +1780,7 @@ def add_documenter(cls): def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.add_autodocumenter(ModuleDocumenter) app.add_autodocumenter(ClassDocumenter) app.add_autodocumenter(ExceptionDocumenter) diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py index 030fec301..886623217 100644 --- a/sphinx/ext/autosummary/__init__.py +++ b/sphinx/ext/autosummary/__init__.py @@ -62,6 +62,7 @@ from six import string_types from types import ModuleType from six import text_type + from docutils.parsers.rst import directives from docutils.statemachine import ViewList from docutils import nodes @@ -73,6 +74,14 @@ from sphinx.util.compat import Directive from sphinx.pycode import ModuleAnalyzer, PycodeError from sphinx.ext.autodoc import Options +if False: + # For type annotation + from typing import Any, Tuple, Type, Union # NOQA + from docutils.utils import Inliner # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.environment import BuildEnvironment # NOQA + from sphinx.ext.autodoc import Documenter # NOQA + # -- autosummary_toc node ------------------------------------------------------ @@ -81,6 +90,7 @@ class autosummary_toc(nodes.comment): def process_autosummary_toc(app, doctree): + # type: (Sphinx, nodes.Node) -> None """Insert items described in autosummary:: to the TOC tree, but do not generate the toctree:: list. """ @@ -105,11 +115,13 @@ def process_autosummary_toc(app, doctree): def autosummary_toc_visit_html(self, node): + # type: (nodes.NodeVisitor, autosummary_toc) -> None """Hide autosummary toctree list in HTML output.""" raise nodes.SkipNode def autosummary_noop(self, node): + # type: (nodes.NodeVisitor, nodes.Node) -> None pass @@ -120,6 +132,7 @@ class autosummary_table(nodes.comment): def autosummary_table_visit_html(self, node): + # type: (nodes.NodeVisitor, autosummary_table) -> None """Make the first column of the table non-breaking.""" try: tbody = node[0][0][-1] @@ -138,11 +151,12 @@ def autosummary_table_visit_html(self, node): # -- autodoc integration ------------------------------------------------------- class FakeDirective(object): - env = {} + env = {} # type: Dict genopt = Options() def get_documenter(obj, parent): + # type: (Any, Any) -> Type[Documenter] """Get an autodoc.Documenter class suitable for documenting the given object. @@ -198,13 +212,15 @@ class Autosummary(Directive): } def warn(self, msg): + # type: (unicode) -> None self.warnings.append(self.state.document.reporter.warning( msg, line=self.lineno)) def run(self): + # type: () -> List[nodes.Node] self.env = env = self.state.document.settings.env self.genopt = Options() - self.warnings = [] + self.warnings = [] # type: List[nodes.Node] self.result = ViewList() names = [x.strip().split()[0] for x in self.content @@ -237,6 +253,7 @@ class Autosummary(Directive): return self.warnings + nodes def get_items(self, names): + # type: (List[unicode]) -> List[Tuple[unicode, unicode, unicode, unicode]] """Try to import the given names, and return a list of ``[(name, signature, summary_string, real_name), ...]``. """ @@ -244,7 +261,7 @@ class Autosummary(Directive): prefixes = get_import_prefixes_from_env(env) - items = [] + items = [] # type: List[Tuple[unicode, unicode, unicode, unicode]] max_item_chars = 50 @@ -334,6 +351,7 @@ class Autosummary(Directive): return items def get_table(self, items): + # type: (List[Tuple[unicode, unicode, unicode, unicode]]) -> List[Union[addnodes.tabular_col_spec, autosummary_table]] # NOQA """Generate a proper list of table nodes for autosummary:: directive. *items* is a list produced by :meth:`get_items`. @@ -352,6 +370,7 @@ class Autosummary(Directive): group.append(body) def append_row(*column_texts): + # type: (unicode) -> None row = nodes.row('') for text in column_texts: node = nodes.paragraph('') @@ -369,7 +388,7 @@ class Autosummary(Directive): for name, sig, summary, real_name in items: qualifier = 'obj' if 'nosignatures' not in self.options: - col1 = ':%s:`%s <%s>`\ %s' % (qualifier, name, real_name, rst.escape(sig)) + col1 = ':%s:`%s <%s>`\ %s' % (qualifier, name, real_name, rst.escape(sig)) # type: unicode # NOQA else: col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name) col2 = summary @@ -379,6 +398,7 @@ class Autosummary(Directive): def mangle_signature(sig, max_chars=30): + # type: (unicode, int) -> unicode """Reformat a function signature to a more compact form.""" s = re.sub(r"^\((.*)\)$", r"\1", sig).strip() @@ -388,12 +408,12 @@ def mangle_signature(sig, max_chars=30): s = re.sub(r"'[^']*'", "", s) # Parse the signature to arguments + options - args = [] - opts = [] + args = [] # type: List[unicode] + opts = [] # type: List[unicode] opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=") while s: - m = opt_re.search(s) + m = opt_re.search(s) # type: ignore if not m: # The rest are arguments args = s.split(', ') @@ -415,6 +435,7 @@ def mangle_signature(sig, max_chars=30): def limited_join(sep, items, max_chars=30, overflow_marker="..."): + # type: (unicode, List[unicode], int, unicode) -> unicode """Join a number of strings to one, limiting the length to *max_chars*. If the string overflows this limit, replace the last fitting item by @@ -441,11 +462,12 @@ def limited_join(sep, items, max_chars=30, overflow_marker="..."): # -- Importing items ----------------------------------------------------------- def get_import_prefixes_from_env(env): + # type: (BuildEnvironment) -> List """ Obtain current Python import prefixes (for `import_by_name`) from ``document.env`` """ - prefixes = [None] + prefixes = [None] # type: List currmodule = env.ref_context.get('py:module') if currmodule: @@ -462,6 +484,7 @@ def get_import_prefixes_from_env(env): def import_by_name(name, prefixes=[None]): + # type: (unicode, List) -> Tuple[unicode, Any, Any, unicode] """Import a Python object that has the given *name*, under one of the *prefixes*. The first name that succeeds is used. """ @@ -480,6 +503,7 @@ def import_by_name(name, prefixes=[None]): def _import_by_name(name): + # type: (str) -> Tuple[Any, Any, unicode] """Import a Python object given its full name.""" try: name_parts = name.split('.') @@ -524,6 +548,7 @@ def _import_by_name(name): def autolink_role(typ, rawtext, etext, lineno, inliner, options={}, content=[]): + # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA """Smart linking role. Expands to ':obj:`text`' if `text` is an object that can be imported; @@ -539,21 +564,24 @@ def autolink_role(typ, rawtext, etext, lineno, inliner, name, obj, parent, modname = import_by_name(pnode['reftarget'], prefixes) except ImportError: content = pnode[0] - r[0][0] = nodes.emphasis(rawtext, content[0].astext(), - classes=content['classes']) + r[0][0] = nodes.emphasis(rawtext, content[0].astext(), # type: ignore + classes=content['classes']) # type: ignore return r def get_rst_suffix(app): + # type: (Sphinx) -> unicode def get_supported_format(suffix): + # type: (unicode) -> Tuple[unicode] parser_class = app.config.source_parsers.get(suffix) if parser_class is None: return ('restructuredtext',) if isinstance(parser_class, string_types): - parser_class = import_object(parser_class, 'source parser') + parser_class = import_object(parser_class, 'source parser') # type: ignore return parser_class.supported - for suffix in app.config.source_suffix: + suffix = None # type: unicode + for suffix in app.config.source_suffix: # type: ignore if 'restructuredtext' in get_supported_format(suffix): return suffix @@ -561,6 +589,7 @@ def get_rst_suffix(app): def process_generate_options(app): + # type: (Sphinx) -> None genfiles = app.config.autosummary_generate if genfiles and not hasattr(genfiles, '__len__'): @@ -589,6 +618,7 @@ def process_generate_options(app): def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] # I need autodoc app.setup_extension('sphinx.ext.autodoc') app.add_node(autosummary_toc, diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py index 8495da7b4..3e81a14a2 100644 --- a/sphinx/ext/autosummary/generate.py +++ b/sphinx/ext/autosummary/generate.py @@ -49,8 +49,16 @@ add_documenter(MethodDocumenter) add_documenter(AttributeDocumenter) add_documenter(InstanceAttributeDocumenter) +if False: + # For type annotation + from typing import Any, Callable, Tuple # NOQA + from sphinx import addnodes # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.environment import BuildEnvironment # NOQA + def main(argv=sys.argv): + # type: (List[str]) -> None usage = """%prog [OPTIONS] SOURCEFILE ...""" p = optparse.OptionParser(usage.strip()) p.add_option("-o", "--output-dir", action="store", type="string", @@ -73,10 +81,12 @@ def main(argv=sys.argv): def _simple_info(msg): + # type: (unicode) -> None print(msg) def _simple_warn(msg): + # type: (unicode) -> None print('WARNING: ' + msg, file=sys.stderr) @@ -85,6 +95,7 @@ def _simple_warn(msg): def generate_autosummary_docs(sources, output_dir=None, suffix='.rst', warn=_simple_warn, info=_simple_info, base_path=None, builder=None, template_dir=None): + # type: (List[unicode], unicode, unicode, Callable, Callable, unicode, Builder, unicode) -> None # NOQA showed_sources = list(sorted(sources)) if len(showed_sources) > 20: @@ -99,6 +110,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst', sources = [os.path.join(base_path, filename) for filename in sources] # create our own templating environment + template_dirs = None # type: List[unicode] template_dirs = [os.path.join(package_dir, 'ext', 'autosummary', 'templates')] if builder is not None: @@ -154,7 +166,8 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst', template = template_env.get_template('autosummary/base.rst') def get_members(obj, typ, include_public=[]): - items = [] + # type: (Any, unicode, List[unicode]) -> Tuple[List[unicode], List[unicode]] + items = [] # type: List[unicode] for name in dir(obj): try: documenter = get_documenter(safe_getattr(obj, name), @@ -167,7 +180,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst', if x in include_public or not x.startswith('_')] return public, items - ns = {} + ns = {} # type: Dict[unicode, Any] if doc.objtype == 'module': ns['members'] = dir(obj) @@ -215,21 +228,23 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst', # -- Finding documented entries in files --------------------------------------- def find_autosummary_in_files(filenames): + # type: (List[unicode]) -> List[Tuple[unicode, unicode, unicode]] """Find out what items are documented in source/*.rst. See `find_autosummary_in_lines`. """ - documented = [] + documented = [] # type: List[Tuple[unicode, unicode, unicode]] for filename in filenames: - with codecs.open(filename, 'r', encoding='utf-8', + with codecs.open(filename, 'r', encoding='utf-8', # type: ignore errors='ignore') as f: lines = f.read().splitlines() - documented.extend(find_autosummary_in_lines(lines, + documented.extend(find_autosummary_in_lines(lines, # type: ignore filename=filename)) return documented def find_autosummary_in_docstring(name, module=None, filename=None): + # type: (unicode, Any, unicode) -> List[Tuple[unicode, unicode, unicode]] """Find out what items are documented in the given object's docstring. See `find_autosummary_in_lines`. @@ -249,6 +264,7 @@ def find_autosummary_in_docstring(name, module=None, filename=None): def find_autosummary_in_lines(lines, module=None, filename=None): + # type: (List[unicode], Any, unicode) -> List[Tuple[unicode, unicode, unicode]] """Find out what items appear in autosummary:: directives in the given lines. @@ -268,9 +284,9 @@ def find_autosummary_in_lines(lines, module=None, filename=None): toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$') template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$') - documented = [] + documented = [] # type: List[Tuple[unicode, unicode, unicode]] - toctree = None + toctree = None # type: unicode template = None current_module = module in_autosummary = False @@ -278,7 +294,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None): for line in lines: if in_autosummary: - m = toctree_arg_re.match(line) + m = toctree_arg_re.match(line) # type: ignore if m: toctree = m.group(1) if filename: @@ -286,7 +302,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None): toctree) continue - m = template_arg_re.match(line) + m = template_arg_re.match(line) # type: ignore if m: template = m.group(1).strip() continue @@ -294,7 +310,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None): if line.strip().startswith(':'): continue # skip options - m = autosummary_item_re.match(line) + m = autosummary_item_re.match(line) # type: ignore if m: name = m.group(1).strip() if name.startswith('~'): @@ -310,7 +326,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None): in_autosummary = False - m = autosummary_re.match(line) + m = autosummary_re.match(line) # type: ignore if m: in_autosummary = True base_indent = m.group(1) @@ -318,7 +334,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None): template = None continue - m = automodule_re.search(line) + m = automodule_re.search(line) # type: ignore if m: current_module = m.group(1).strip() # recurse into the automodule docstring @@ -326,7 +342,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None): current_module, filename=filename)) continue - m = module_re.match(line) + m = module_re.match(line) # type: ignore if m: current_module = m.group(2) continue diff --git a/sphinx/ext/coverage.py b/sphinx/ext/coverage.py index c08b1e706..98681466c 100644 --- a/sphinx/ext/coverage.py +++ b/sphinx/ext/coverage.py @@ -22,14 +22,21 @@ import sphinx from sphinx.builders import Builder from sphinx.util.inspect import safe_getattr +if False: + # For type annotation + from typing import Any, Callable, IO, Pattern, Tuple # NOQA + from sphinx.application import Sphinx # NOQA + # utility def write_header(f, text, char='-'): + # type:(IO, unicode, unicode) -> None f.write(text + '\n') f.write(char * len(text) + '\n') def compile_regex_list(name, exps, warnfunc): + # type: (unicode, unicode, Callable) -> List[Pattern] lst = [] for exp in exps: try: @@ -44,19 +51,20 @@ class CoverageBuilder(Builder): name = 'coverage' def init(self): - self.c_sourcefiles = [] + # type: () -> None + self.c_sourcefiles = [] # type: List[unicode] for pattern in self.config.coverage_c_path: pattern = path.join(self.srcdir, pattern) self.c_sourcefiles.extend(glob.glob(pattern)) - self.c_regexes = [] + self.c_regexes = [] # type: List[Tuple[unicode, Pattern]] for (name, exp) in self.config.coverage_c_regexes.items(): try: self.c_regexes.append((name, re.compile(exp))) except Exception: self.warn('invalid regex %r in coverage_c_regexes' % exp) - self.c_ignorexps = {} + self.c_ignorexps = {} # type: Dict[unicode, List[Pattern]] for (name, exps) in iteritems(self.config.coverage_ignore_c_items): self.c_ignorexps[name] = compile_regex_list( 'coverage_ignore_c_items', exps, self.warn) @@ -71,18 +79,21 @@ class CoverageBuilder(Builder): self.warn) def get_outdated_docs(self): + # type: () -> unicode return 'coverage overview' def write(self, *ignored): - self.py_undoc = {} + # type: (Any) -> None + self.py_undoc = {} # type: Dict[unicode, Dict[unicode, Any]] self.build_py_coverage() self.write_py_coverage() - self.c_undoc = {} + self.c_undoc = {} # type: Dict[unicode, Set[Tuple[unicode, unicode]]] self.build_c_coverage() self.write_c_coverage() def build_c_coverage(self): + # type: () -> None # Fetch all the info from the header files c_objects = self.env.domaindata['c']['objects'] for filename in self.c_sourcefiles: @@ -104,6 +115,7 @@ class CoverageBuilder(Builder): self.c_undoc[filename] = undoc def write_c_coverage(self): + # type: () -> None output_file = path.join(self.outdir, 'c.txt') with open(output_file, 'w') as op: if self.config.coverage_write_headline: @@ -117,6 +129,7 @@ class CoverageBuilder(Builder): op.write('\n') def build_py_coverage(self): + # type: () -> None objects = self.env.domaindata['py']['objects'] modules = self.env.domaindata['py']['modules'] @@ -140,7 +153,7 @@ class CoverageBuilder(Builder): continue funcs = [] - classes = {} + classes = {} # type: Dict[unicode, List[unicode]] for name, obj in inspect.getmembers(mod): # diverse module attributes are ignored: @@ -177,7 +190,7 @@ class CoverageBuilder(Builder): classes[name] = [] continue - attrs = [] + attrs = [] # type: List[unicode] for attr_name in dir(obj): if attr_name not in obj.__dict__: @@ -207,6 +220,7 @@ class CoverageBuilder(Builder): self.py_undoc[mod_name] = {'funcs': funcs, 'classes': classes} def write_py_coverage(self): + # type: () -> None output_file = path.join(self.outdir, 'python.txt') failed = [] with open(output_file, 'w') as op: @@ -242,6 +256,7 @@ class CoverageBuilder(Builder): op.writelines(' * %s -- %s\n' % x for x in failed) def finish(self): + # type: () -> None # dump the coverage data to a pickle file too picklepath = path.join(self.outdir, 'undoc.pickle') with open(picklepath, 'wb') as dumpfile: @@ -249,6 +264,7 @@ class CoverageBuilder(Builder): def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.add_builder(CoverageBuilder) app.add_config_value('coverage_ignore_modules', [], False) app.add_config_value('coverage_ignore_functions', [], False) diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index 244762b69..31ccb22d9 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -19,6 +19,7 @@ from os import path import doctest from six import itervalues, StringIO, binary_type, text_type, PY2 + from docutils import nodes from docutils.parsers.rst import directives @@ -27,14 +28,20 @@ from sphinx.builders import Builder from sphinx.util import force_decode from sphinx.util.nodes import set_source_info from sphinx.util.compat import Directive -from sphinx.util.console import bold +from sphinx.util.console import bold # type: ignore from sphinx.util.osutil import fs_encoding +if False: + # For type annotation + from typing import Any, Callable, IO, Iterable, Sequence, Tuple # NOQA + from sphinx.application import Sphinx # NOQA + blankline_re = re.compile(r'^\s*<BLANKLINE>', re.MULTILINE) doctestopt_re = re.compile(r'#\s*doctest:.+$', re.MULTILINE) if PY2: def doctest_encode(text, encoding): + # type: (str, unicode) -> unicode if isinstance(text, text_type): text = text.encode(encoding) if text.startswith(codecs.BOM_UTF8): @@ -42,6 +49,7 @@ if PY2: return text else: def doctest_encode(text, encoding): + # type: (unicode, unicode) -> unicode return text @@ -58,6 +66,7 @@ class TestDirective(Directive): final_argument_whitespace = True def run(self): + # type: () -> List[nodes.Node] # use ordinary docutils nodes for test code: they get special attributes # so that our builder recognizes them, and the other builders are happy. code = '\n'.join(self.content) @@ -92,20 +101,20 @@ class TestDirective(Directive): option_strings = self.options['options'].replace(',', ' ').split() for option in option_strings: if (option[0] not in '+-' or option[1:] not in - doctest.OPTIONFLAGS_BY_NAME): + doctest.OPTIONFLAGS_BY_NAME): # type: ignore # XXX warn? continue - flag = doctest.OPTIONFLAGS_BY_NAME[option[1:]] + flag = doctest.OPTIONFLAGS_BY_NAME[option[1:]] # type: ignore node['options'][flag] = (option[0] == '+') return [node] class TestsetupDirective(TestDirective): - option_spec = {} + option_spec = {} # type: Dict class TestcleanupDirective(TestDirective): - option_spec = {} + option_spec = {} # type: Dict class DoctestDirective(TestDirective): @@ -128,19 +137,21 @@ class TestoutputDirective(TestDirective): } -parser = doctest.DocTestParser() +parser = doctest.DocTestParser() # type: ignore # helper classes class TestGroup(object): def __init__(self, name): + # type: (unicode) -> None self.name = name - self.setup = [] - self.tests = [] - self.cleanup = [] + self.setup = [] # type: List[TestCode] + self.tests = [] # type: List[List[TestCode]] + self.cleanup = [] # type: List[TestCode] def add_code(self, code, prepend=False): + # type: (TestCode, bool) -> None if code.type == 'testsetup': if prepend: self.setup.insert(0, code) @@ -158,30 +169,34 @@ class TestGroup(object): else: raise RuntimeError('invalid TestCode type') - def __repr__(self): + def __repr__(self): # type: ignore + # type: () -> unicode return 'TestGroup(name=%r, setup=%r, cleanup=%r, tests=%r)' % ( self.name, self.setup, self.cleanup, self.tests) class TestCode(object): def __init__(self, code, type, lineno, options=None): + # type: (unicode, unicode, int, Dict) -> None self.code = code self.type = type self.lineno = lineno self.options = options or {} - def __repr__(self): + def __repr__(self): # type: ignore + # type: () -> unicode return 'TestCode(%r, %r, %r, options=%r)' % ( self.code, self.type, self.lineno, self.options) -class SphinxDocTestRunner(doctest.DocTestRunner): +class SphinxDocTestRunner(doctest.DocTestRunner): # type: ignore def summarize(self, out, verbose=None): + # type: (Callable, bool) -> Tuple[int, int] string_io = StringIO() old_stdout = sys.stdout sys.stdout = string_io try: - res = doctest.DocTestRunner.summarize(self, verbose) + res = doctest.DocTestRunner.summarize(self, verbose) # type: ignore finally: sys.stdout = old_stdout out(string_io.getvalue()) @@ -189,6 +204,7 @@ class SphinxDocTestRunner(doctest.DocTestRunner): def _DocTestRunner__patched_linecache_getlines(self, filename, module_globals=None): + # type: (unicode, Any) -> Any # this is overridden from DocTestRunner adding the try-except below m = self._DocTestRunner__LINECACHE_FILENAME_RE.match(filename) if m and m.group('name') == self.test.name: @@ -213,6 +229,7 @@ class DocTestBuilder(Builder): name = 'doctest' def init(self): + # type: () -> None # default options self.opt = self.config.doctest_default_flags @@ -221,7 +238,7 @@ class DocTestBuilder(Builder): # for doctest examples but unusable for multi-statement code such # as setup code -- to be able to use doctest error reporting with # that code nevertheless, we monkey-patch the "compile" it uses. - doctest.compile = self.compile + doctest.compile = self.compile # type: ignore sys.path[0:0] = self.config.doctest_path @@ -236,7 +253,8 @@ class DocTestBuilder(Builder): date = time.strftime('%Y-%m-%d %H:%M:%S') - self.outfile = codecs.open(path.join(self.outdir, 'output.txt'), + self.outfile = None # type: IO + self.outfile = codecs.open(path.join(self.outdir, 'output.txt'), # type: ignore 'w', encoding='utf-8') self.outfile.write('''\ Results of doctest builder run on %s @@ -244,10 +262,12 @@ Results of doctest builder run on %s ''' % (date, '='*len(date))) def _out(self, text): + # type: (unicode) -> None self.info(text, nonl=True) self.outfile.write(text) def _warn_out(self, text): + # type: (unicode) -> None if self.app.quiet or self.app.warningiserror: self.warn(text) else: @@ -257,14 +277,18 @@ Results of doctest builder run on %s self.outfile.write(text) def get_target_uri(self, docname, typ=None): + # type: (unicode, unicode) -> unicode return '' def get_outdated_docs(self): + # type: () -> Set[unicode] return self.env.found_docs def finish(self): + # type: () -> None # write executive summary def s(v): + # type: (int) -> unicode return v != 1 and 's' or '' repl = (self.total_tries, s(self.total_tries), self.total_failures, s(self.total_failures), @@ -284,6 +308,7 @@ Doctest summary self.app.statuscode = 1 def write(self, build_docnames, updated_docnames, method='update'): + # type: (Iterable[unicode], Sequence[unicode], unicode) -> None if build_docnames is None: build_docnames = sorted(self.env.all_docs) @@ -294,7 +319,8 @@ Doctest summary self.test_doc(docname, doctree) def test_doc(self, docname, doctree): - groups = {} + # type: (unicode, nodes.Node) -> None + groups = {} # type: Dict[unicode, TestGroup] add_to_all_groups = [] self.setup_runner = SphinxDocTestRunner(verbose=False, optionflags=self.opt) @@ -308,11 +334,13 @@ Doctest summary if self.config.doctest_test_doctest_blocks: def condition(node): + # type: (nodes.Node) -> bool return (isinstance(node, (nodes.literal_block, nodes.comment)) and 'testnodetype' in node) or \ isinstance(node, nodes.doctest_block) else: def condition(node): + # type: (nodes.Node) -> bool return isinstance(node, (nodes.literal_block, nodes.comment)) \ and 'testnodetype' in node for node in doctree.traverse(condition): @@ -366,26 +394,29 @@ Doctest summary self.cleanup_tries += res_t def compile(self, code, name, type, flags, dont_inherit): + # type: (unicode, unicode, unicode, Any, bool) -> Any return compile(code, name, self.type, flags, dont_inherit) def test_group(self, group, filename): + # type: (TestGroup, unicode) -> None if PY2: filename_str = filename.encode(fs_encoding) else: filename_str = filename - ns = {} + ns = {} # type: Dict def run_setup_cleanup(runner, testcodes, what): + # type: (Any, List[TestCode], Any) -> bool examples = [] for testcode in testcodes: - examples.append(doctest.Example( - doctest_encode(testcode.code, self.env.config.source_encoding), '', + examples.append(doctest.Example( # type: ignore + doctest_encode(testcode.code, self.env.config.source_encoding), '', # type: ignore # NOQA lineno=testcode.lineno)) if not examples: return True # simulate a doctest with the code - sim_doctest = doctest.DocTest(examples, {}, + sim_doctest = doctest.DocTest(examples, {}, # type: ignore '%s (%s code)' % (group.name, what), filename_str, 0, None) sim_doctest.globs = ns @@ -407,7 +438,7 @@ Doctest summary # ordinary doctests (code/output interleaved) try: test = parser.get_doctest( - doctest_encode(code[0].code, self.env.config.source_encoding), {}, + doctest_encode(code[0].code, self.env.config.source_encoding), {}, # type: ignore # NOQA group.name, filename_str, code[0].lineno) except Exception: self.warn('ignoring invalid doctest code: %r' % @@ -427,19 +458,19 @@ Doctest summary output = code[1] and code[1].code or '' options = code[1] and code[1].options or {} # disable <BLANKLINE> processing as it is not needed - options[doctest.DONT_ACCEPT_BLANKLINE] = True + options[doctest.DONT_ACCEPT_BLANKLINE] = True # type: ignore # find out if we're testing an exception m = parser._EXCEPTION_RE.match(output) if m: exc_msg = m.group('msg') else: exc_msg = None - example = doctest.Example( - doctest_encode(code[0].code, self.env.config.source_encoding), output, + example = doctest.Example( # type: ignore + doctest_encode(code[0].code, self.env.config.source_encoding), output, # type: ignore # NOQA exc_msg=exc_msg, lineno=code[0].lineno, options=options) - test = doctest.DocTest([example], {}, group.name, + test = doctest.DocTest([example], {}, group.name, # type: ignore filename_str, code[0].lineno, None) self.type = 'exec' # multiple statements again # DocTest.__init__ copies the globs namespace, which we don't want @@ -452,6 +483,7 @@ Doctest summary def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.add_directive('testsetup', TestsetupDirective) app.add_directive('testcleanup', TestcleanupDirective) app.add_directive('doctest', DoctestDirective) @@ -465,6 +497,6 @@ def setup(app): app.add_config_value('doctest_global_cleanup', '', False) app.add_config_value( 'doctest_default_flags', - doctest.DONT_ACCEPT_TRUE_FOR_1 | doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL, + doctest.DONT_ACCEPT_TRUE_FOR_1 | doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL, # type: ignore # NOQA False) return {'version': sphinx.__display_version__, 'parallel_read_safe': True} diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py index 47c8dcfff..0c29777dd 100644 --- a/sphinx/ext/graphviz.py +++ b/sphinx/ext/graphviz.py @@ -18,6 +18,7 @@ from subprocess import Popen, PIPE from hashlib import sha1 from six import text_type + from docutils import nodes from docutils.parsers.rst import directives from docutils.statemachine import ViewList @@ -29,6 +30,11 @@ from sphinx.util.i18n import search_image_for_language from sphinx.util.osutil import ensuredir, ENOENT, EPIPE, EINVAL from sphinx.util.compat import Directive +if False: + # For type annotation + from typing import Any, Tuple # NOQA + from sphinx.application import Sphinx # NOQA + mapname_re = re.compile(r'<map id="(.*?)"') @@ -42,6 +48,7 @@ class graphviz(nodes.General, nodes.Inline, nodes.Element): def figure_wrapper(directive, node, caption): + # type: (Directive, nodes.Node, unicode) -> nodes.figure figure_node = nodes.figure('', node) if 'align' in node: figure_node['align'] = node.attributes.pop('align') @@ -58,6 +65,7 @@ def figure_wrapper(directive, node, caption): def align_spec(argument): + # type: (Any) -> bool return directives.choice(argument, ('left', 'center', 'right')) @@ -79,6 +87,7 @@ class Graphviz(Directive): } def run(self): + # type: () -> List[nodes.Node] if self.arguments: document = self.state.document if self.content: @@ -140,6 +149,7 @@ class GraphvizSimple(Directive): } def run(self): + # type: () -> List[nodes.Node] node = graphviz() node['code'] = '%s %s {\n%s\n}\n' % \ (self.name, self.arguments[0], '\n'.join(self.content)) @@ -162,6 +172,7 @@ class GraphvizSimple(Directive): def render_dot(self, code, options, format, prefix='graphviz'): + # type: (nodes.NodeVisitor, unicode, Dict, unicode, unicode) -> Tuple[unicode, unicode] """Render graphviz code into a PNG or PDF output file.""" graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot) hashkey = (code + str(options) + str(graphviz_dot) + @@ -221,6 +232,7 @@ def render_dot(self, code, options, format, prefix='graphviz'): def warn_for_deprecated_option(self, node): + # type: (nodes.NodeVisitor, graphviz) -> None if hasattr(self.builder, '_graphviz_warned_inline'): return @@ -231,6 +243,7 @@ def warn_for_deprecated_option(self, node): def render_dot_html(self, node, code, options, prefix='graphviz', imgcls=None, alt=None): + # type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode, unicode, unicode) -> Tuple[unicode, unicode] # NOQA format = self.builder.config.graphviz_output_format try: if format not in ('png', 'svg'): @@ -263,7 +276,7 @@ def render_dot_html(self, node, code, options, prefix='graphviz', (fname, alt, imgcss)) else: # has a map: get the name of the map and connect the parts - mapname = mapname_re.match(imgmap[0].decode('utf-8')).group(1) + mapname = mapname_re.match(imgmap[0].decode('utf-8')).group(1) # type: ignore self.body.append('<img src="%s" alt="%s" usemap="#%s" %s/>\n' % (fname, alt, mapname, imgcss)) self.body.extend([item.decode('utf-8') for item in imgmap]) @@ -274,11 +287,13 @@ def render_dot_html(self, node, code, options, prefix='graphviz', def html_visit_graphviz(self, node): + # type: (nodes.NodeVisitor, graphviz) -> None warn_for_deprecated_option(self, node) render_dot_html(self, node, node['code'], node['options']) def render_dot_latex(self, node, code, options, prefix='graphviz'): + # type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode) -> None try: fname, outfn = render_dot(self, code, options, 'pdf', prefix) except GraphvizError as exc: @@ -292,7 +307,7 @@ def render_dot_latex(self, node, code, options, prefix='graphviz'): para_separator = '\n' if fname is not None: - post = None + post = None # type: unicode if not is_inline and 'align' in node: if node['align'] == 'left': self.body.append('{') @@ -309,11 +324,13 @@ def render_dot_latex(self, node, code, options, prefix='graphviz'): def latex_visit_graphviz(self, node): + # type: (nodes.NodeVisitor, graphviz) -> None warn_for_deprecated_option(self, node) render_dot_latex(self, node, node['code'], node['options']) def render_dot_texinfo(self, node, code, options, prefix='graphviz'): + # type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode) -> None try: fname, outfn = render_dot(self, code, options, 'png', prefix) except GraphvizError as exc: @@ -325,11 +342,13 @@ def render_dot_texinfo(self, node, code, options, prefix='graphviz'): def texinfo_visit_graphviz(self, node): + # type: (nodes.NodeVisitor, graphviz) -> None warn_for_deprecated_option(self, node) render_dot_texinfo(self, node, node['code'], node['options']) def text_visit_graphviz(self, node): + # type: (nodes.NodeVisitor, graphviz) -> None warn_for_deprecated_option(self, node) if 'alt' in node.attributes: self.add_text(_('[graph: %s]') % node['alt']) @@ -339,6 +358,7 @@ def text_visit_graphviz(self, node): def man_visit_graphviz(self, node): + # type: (nodes.NodeVisitor, graphviz) -> None warn_for_deprecated_option(self, node) if 'alt' in node.attributes: self.body.append(_('[graph: %s]') % node['alt']) @@ -348,6 +368,7 @@ def man_visit_graphviz(self, node): def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.add_node(graphviz, html=(html_visit_graphviz, None), latex=(latex_visit_graphviz, None), diff --git a/sphinx/ext/ifconfig.py b/sphinx/ext/ifconfig.py index 74580fb4a..923e2d080 100644 --- a/sphinx/ext/ifconfig.py +++ b/sphinx/ext/ifconfig.py @@ -26,6 +26,11 @@ import sphinx from sphinx.util.nodes import set_source_info from sphinx.util.compat import Directive +if False: + # For type annotation + from typing import Any # NOQA + from sphinx.application import Sphinx # NOQA + class ifconfig(nodes.Element): pass @@ -37,9 +42,10 @@ class IfConfig(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True - option_spec = {} + option_spec = {} # type: Dict def run(self): + # type: () -> List[nodes.Node] node = ifconfig() node.document = self.state.document set_source_info(self, node) @@ -50,16 +56,17 @@ class IfConfig(Directive): def process_ifconfig_nodes(app, doctree, docname): + # type: (Sphinx, nodes.Node, unicode) -> None ns = dict((k, app.config[k]) for k in app.config.values) ns.update(app.config.__dict__.copy()) ns['builder'] = app.builder.name for node in doctree.traverse(ifconfig): try: - res = eval(node['expr'], ns) + res = eval(node['expr'], ns) # type: ignore except Exception as err: # handle exceptions in a clean fashion from traceback import format_exception_only - msg = ''.join(format_exception_only(err.__class__, err)) + msg = ''.join(format_exception_only(err.__class__, err)) # type: ignore newnode = doctree.reporter.error('Exception occured in ' 'ifconfig expression: \n%s' % msg, base_node=node) @@ -72,6 +79,7 @@ def process_ifconfig_nodes(app, doctree, docname): def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.add_node(ifconfig) app.add_directive('ifconfig', IfConfig) app.connect('doctree-resolved', process_ifconfig_nodes) diff --git a/sphinx/ext/imgmath.py b/sphinx/ext/imgmath.py index e5b8b26c5..9b75e7ee3 100644 --- a/sphinx/ext/imgmath.py +++ b/sphinx/ext/imgmath.py @@ -19,6 +19,7 @@ from subprocess import Popen, PIPE from hashlib import sha1 from six import text_type + from docutils import nodes import sphinx @@ -29,11 +30,18 @@ from sphinx.util.osutil import ensuredir, ENOENT, cd from sphinx.util.pycompat import sys_encoding from sphinx.ext.mathbase import setup_math as mathbase_setup, wrap_displaymath +if False: + # For type annotation + from typing import Any, Tuple # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.ext.mathbase import math as math_node, displaymath # NOQA + class MathExtError(SphinxError): category = 'Math extension error' def __init__(self, msg, stderr=None, stdout=None): + # type: (unicode, unicode, unicode) -> None if stderr: msg += '\n[stderr]\n' + stderr.decode(sys_encoding, 'replace') if stdout: @@ -72,6 +80,7 @@ depth_re = re.compile(br'\[\d+ depth=(-?\d+)\]') def render_math(self, math): + # type: (nodes.NodeVisitor, unicode) -> Tuple[unicode, int] """Render the LaTeX math expression *math* using latex and dvipng or dvisvgm. @@ -116,9 +125,8 @@ def render_math(self, math): else: tempdir = self.builder._imgmath_tempdir - tf = codecs.open(path.join(tempdir, 'math.tex'), 'w', 'utf-8') - tf.write(latex) - tf.close() + with codecs.open(path.join(tempdir, 'math.tex'), 'w', 'utf-8') as tf: # type: ignore + tf.write(latex) # build latex command; old versions of latex don't have the # --output-directory option, so we have to manually chdir to the @@ -199,23 +207,26 @@ def render_math(self, math): def cleanup_tempdir(app, exc): + # type: (Sphinx, Exception) -> None if exc: return if not hasattr(app.builder, '_imgmath_tempdir'): return try: - shutil.rmtree(app.builder._mathpng_tempdir) + shutil.rmtree(app.builder._mathpng_tempdir) # type: ignore except Exception: pass def get_tooltip(self, node): + # type: (nodes.NodeVisitor, math_node) -> unicode if self.builder.config.imgmath_add_tooltips: return ' alt="%s"' % self.encode(node['latex']).strip() return '' def html_visit_math(self, node): + # type: (nodes.NodeVisitor, math_node) -> None try: fname, depth = render_math(self, '$'+node['latex']+'$') except MathExtError as exc: @@ -238,6 +249,7 @@ def html_visit_math(self, node): def html_visit_displaymath(self, node): + # type: (nodes.NodeVisitor, displaymath) -> None if node['nowrap']: latex = node['latex'] else: @@ -268,6 +280,7 @@ def html_visit_displaymath(self, node): def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] try: mathbase_setup(app, (html_visit_math, None), (html_visit_displaymath, None)) except ExtensionError: diff --git a/sphinx/ext/inheritance_diagram.py b/sphinx/ext/inheritance_diagram.py index 11af67dc5..341780473 100644 --- a/sphinx/ext/inheritance_diagram.py +++ b/sphinx/ext/inheritance_diagram.py @@ -42,10 +42,10 @@ import inspect try: from hashlib import md5 except ImportError: - from md5 import md5 + from md5 import md5 # type: ignore from six import text_type -from six.moves import builtins +from six.moves import builtins # type: ignore from docutils import nodes from docutils.parsers.rst import directives @@ -57,6 +57,12 @@ from sphinx.pycode import ModuleAnalyzer from sphinx.util import force_decode from sphinx.util.compat import Directive +if False: + # For type annotation + from typing import Any, Tuple # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.environment import BuildEnvironment # NOQA + class_sig_re = re.compile(r'''^([\w.]*\.)? # module names (\w+) \s* $ # class/final module name @@ -75,6 +81,7 @@ class InheritanceGraph(object): """ def __init__(self, class_names, currmodule, show_builtins=False, private_bases=False, parts=0): + # type: (unicode, str, bool, bool, int) -> None """*class_names* is a list of child classes to show bases from. If *show_builtins* is True, then Python builtins will be shown @@ -89,9 +96,10 @@ class InheritanceGraph(object): 'inheritance diagram') def _import_class_or_module(self, name, currmodule): + # type: (unicode, str) -> Any """Import a class using its fully-qualified *name*.""" try: - path, base = class_sig_re.match(name).groups() + path, base = class_sig_re.match(name).groups() # type: ignore except (AttributeError, ValueError): raise InheritanceException('Invalid class or module %r specified ' 'for inheritance diagram' % name) @@ -126,7 +134,7 @@ class InheritanceGraph(object): return [todoc] elif inspect.ismodule(todoc): classes = [] - for cls in todoc.__dict__.values(): + for cls in todoc.__dict__.values(): # type: ignore if inspect.isclass(cls) and cls.__module__ == todoc.__name__: classes.append(cls) return classes @@ -134,13 +142,15 @@ class InheritanceGraph(object): 'not a class or module' % name) def _import_classes(self, class_names, currmodule): + # type: (unicode, str) -> List[Any] """Import a list of classes.""" - classes = [] + classes = [] # type: List[Any] for name in class_names: classes.extend(self._import_class_or_module(name, currmodule)) return classes def _class_info(self, classes, show_builtins, private_bases, parts): + # type: (List[Any], bool, bool, int) -> List[Tuple[unicode, unicode, List[unicode], unicode]] # NOQA """Return name and bases for all classes that are ancestors of *classes*. @@ -151,6 +161,7 @@ class InheritanceGraph(object): py_builtins = vars(builtins).values() def recurse(cls): + # type: (Any) -> None if not show_builtins and cls in py_builtins: return if not private_bases and cls.__name__.startswith('_'): @@ -172,7 +183,7 @@ class InheritanceGraph(object): except Exception: # might raise AttributeError for strange classes pass - baselist = [] + baselist = [] # type: List[unicode] all_classes[cls] = (nodename, fullname, baselist, tooltip) for base in cls.__bases__: if not show_builtins and base in py_builtins: @@ -189,6 +200,7 @@ class InheritanceGraph(object): return list(all_classes.values()) def class_name(self, cls, parts=0): + # type: (Any, int) -> unicode """Given a class object, return a fully-qualified name. This works for things I've tested in matplotlib so far, but may not be @@ -205,8 +217,9 @@ class InheritanceGraph(object): return '.'.join(name_parts[-parts:]) def get_all_class_names(self): + # type: () -> List[unicode] """Get all of the class names involved in the graph.""" - return [fullname for (_, fullname, _, _) in self.class_info] + return [fullname for (_, fullname, _, _) in self.class_info] # type: ignore # These are the default attrs for graphviz default_graph_attrs = { @@ -227,13 +240,16 @@ class InheritanceGraph(object): } def _format_node_attrs(self, attrs): + # type: (Dict) -> unicode return ','.join(['%s=%s' % x for x in sorted(attrs.items())]) def _format_graph_attrs(self, attrs): + # type: (Dict) -> unicode return ''.join(['%s=%s;\n' % x for x in sorted(attrs.items())]) def generate_dot(self, name, urls={}, env=None, graph_attrs={}, node_attrs={}, edge_attrs={}): + # type: (unicode, Dict, BuildEnvironment, Dict, Dict, Dict) -> unicode """Generate a graphviz dot graph from the classes that were passed in to __init__. @@ -255,7 +271,7 @@ class InheritanceGraph(object): n_attrs.update(env.config.inheritance_node_attrs) e_attrs.update(env.config.inheritance_edge_attrs) - res = [] + res = [] # type: List[unicode] res.append('digraph %s {\n' % name) res.append(self._format_graph_attrs(g_attrs)) @@ -301,6 +317,7 @@ class InheritanceDiagram(Directive): } def run(self): + # type: () -> List[nodes.Node] node = inheritance_diagram() node.document = self.state.document env = self.state.document.settings.env @@ -340,11 +357,13 @@ class InheritanceDiagram(Directive): def get_graph_hash(node): + # type: (inheritance_diagram) -> unicode encoded = (node['content'] + str(node['parts'])).encode('utf-8') return md5(encoded).hexdigest()[-10:] def html_visit_inheritance_diagram(self, node): + # type: (nodes.NodeVisitor, inheritance_diagram) -> None """ Output the graph for HTML. This will insert a PNG with clickable image map. @@ -377,6 +396,7 @@ def html_visit_inheritance_diagram(self, node): def latex_visit_inheritance_diagram(self, node): + # type: (nodes.NodeVisitor, inheritance_diagram) -> None """ Output the graph for LaTeX. This will insert a PDF. """ @@ -392,6 +412,7 @@ def latex_visit_inheritance_diagram(self, node): def texinfo_visit_inheritance_diagram(self, node): + # type: (nodes.NodeVisitor, inheritance_diagram) -> None """ Output the graph for Texinfo. This will insert a PNG. """ @@ -407,10 +428,12 @@ def texinfo_visit_inheritance_diagram(self, node): def skip(self, node): + # type: (nodes.NodeVisitor, inheritance_diagram) -> None raise nodes.SkipNode def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.setup_extension('sphinx.ext.graphviz') app.add_node( inheritance_diagram, diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py index 4ef7e4b9b..df561204e 100644 --- a/sphinx/ext/intersphinx.py +++ b/sphinx/ext/intersphinx.py @@ -33,8 +33,9 @@ import posixpath from os import path import re -from six import iteritems, string_types +from six import PY3, iteritems, string_types from six.moves.urllib.parse import urlsplit, urlunsplit + from docutils import nodes from docutils.utils import relative_path @@ -43,13 +44,25 @@ from sphinx.locale import _ from sphinx.builders.html import INVENTORY_FILENAME from sphinx.util.requests import requests, useragent_header +if False: + # For type annotation + from typing import Any, Callable, Dict, IO, Iterator, Tuple, Union # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.environment import BuildEnvironment # NOQA + + if PY3: + unicode = str + + Inventory = Dict[unicode, Dict[unicode, Tuple[unicode, unicode, unicode, unicode]]] + UTF8StreamReader = codecs.lookup('utf-8')[2] def read_inventory_v1(f, uri, join): + # type: (IO, unicode, Callable) -> Inventory f = UTF8StreamReader(f) - invdata = {} + invdata = {} # type: Inventory line = next(f) projname = line.rstrip()[11:] line = next(f) @@ -69,7 +82,8 @@ def read_inventory_v1(f, uri, join): def read_inventory_v2(f, uri, join, bufsize=16*1024): - invdata = {} + # type: (IO, unicode, Callable, int) -> Inventory + invdata = {} # type: Inventory line = f.readline() projname = line.rstrip()[11:].decode('utf-8') line = f.readline() @@ -79,12 +93,14 @@ def read_inventory_v2(f, uri, join, bufsize=16*1024): raise ValueError def read_chunks(): + # type: () -> Iterator[bytes] decompressor = zlib.decompressobj() for chunk in iter(lambda: f.read(bufsize), b''): yield decompressor.decompress(chunk) yield decompressor.flush() def split_lines(iter): + # type: (Iterator[bytes]) -> Iterator[unicode] buf = b'' for chunk in iter: buf += chunk @@ -117,6 +133,7 @@ def read_inventory_v2(f, uri, join, bufsize=16*1024): def read_inventory(f, uri, join, bufsize=16*1024): + # type: (IO, unicode, Callable, int) -> Inventory line = f.readline().rstrip().decode('utf-8') if line == '# Sphinx inventory version 1': return read_inventory_v1(f, uri, join) @@ -125,6 +142,7 @@ def read_inventory(f, uri, join, bufsize=16*1024): def _strip_basic_auth(url): + # type: (unicode) -> unicode """Returns *url* with basic auth credentials removed. Also returns the basic auth username and password if they're present in *url*. @@ -146,6 +164,7 @@ def _strip_basic_auth(url): def _read_from_url(url, timeout=None): + # type: (unicode, int) -> IO """Reads data from *url* with an HTTP *GET*. This function supports fetching from resources which use basic HTTP auth as @@ -168,6 +187,7 @@ def _read_from_url(url, timeout=None): def _get_safe_url(url): + # type: (unicode) -> unicode """Gets version of *url* with basic auth passwords obscured. This function returns results suitable for printing and logging. @@ -193,6 +213,7 @@ def _get_safe_url(url): def fetch_inventory(app, uri, inv): + # type: (Sphinx, unicode, Any) -> Any """Fetch, parse and return an intersphinx inventory file.""" # both *uri* (base URI of the links to generate) and *inv* (actual # location of the inventory file) can be local or remote URIs @@ -211,7 +232,7 @@ def fetch_inventory(app, uri, inv): return try: if hasattr(f, 'url'): - newinv = f.url + newinv = f.url # type: ignore if inv != newinv: app.info('intersphinx inventory has moved: %s -> %s' % (inv, newinv)) @@ -231,17 +252,22 @@ def fetch_inventory(app, uri, inv): def load_mappings(app): + # type: (Sphinx) -> None """Load all intersphinx mappings into the environment.""" now = int(time.time()) cache_time = now - app.config.intersphinx_cache_limit * 86400 env = app.builder.env if not hasattr(env, 'intersphinx_cache'): - env.intersphinx_cache = {} - env.intersphinx_inventory = {} - env.intersphinx_named_inventory = {} - cache = env.intersphinx_cache + env.intersphinx_cache = {} # type: ignore + env.intersphinx_inventory = {} # type: ignore + env.intersphinx_named_inventory = {} # type: ignore + cache = env.intersphinx_cache # type: ignore update = False for key, value in iteritems(app.config.intersphinx_mapping): + name = None # type: unicode + uri = None # type: unicode + inv = None # type: Union[unicode, Tuple[unicode, ...]] + if isinstance(value, tuple): # new format name, (uri, inv) = key, value @@ -257,7 +283,7 @@ def load_mappings(app): if not isinstance(inv, tuple): invs = (inv, ) else: - invs = inv + invs = inv # type: ignore for inv in invs: if not inv: @@ -266,7 +292,7 @@ def load_mappings(app): # files; remote ones only if the cache time is expired if '://' not in inv or uri not in cache \ or cache[uri][1] < cache_time: - safe_inv_url = _get_safe_url(inv) + safe_inv_url = _get_safe_url(inv) # type: ignore app.info( 'loading intersphinx inventory from %s...' % safe_inv_url) invdata = fetch_inventory(app, uri, inv) @@ -276,8 +302,8 @@ def load_mappings(app): break if update: - env.intersphinx_inventory = {} - env.intersphinx_named_inventory = {} + env.intersphinx_inventory = {} # type: ignore + env.intersphinx_named_inventory = {} # type: ignore # Duplicate values in different inventories will shadow each # other; which one will override which can vary between builds # since they are specified using an unordered dict. To make @@ -290,15 +316,17 @@ def load_mappings(app): unnamed_vals = [v for v in cached_vals if not v[0]] for name, _x, invdata in named_vals + unnamed_vals: if name: - env.intersphinx_named_inventory[name] = invdata + env.intersphinx_named_inventory[name] = invdata # type: ignore for type, objects in iteritems(invdata): - env.intersphinx_inventory.setdefault( + env.intersphinx_inventory.setdefault( # type: ignore type, {}).update(objects) def missing_reference(app, env, node, contnode): + # type: (Sphinx, BuildEnvironment, nodes.Node, nodes.Node) -> None """Attempt to resolve a missing reference via intersphinx references.""" target = node['reftarget'] + objtypes = None # type: List[unicode] if node['reftype'] == 'any': # we search anything! objtypes = ['%s:%s' % (domain.name, objtype) @@ -317,14 +345,14 @@ def missing_reference(app, env, node, contnode): if not objtypes: return objtypes = ['%s:%s' % (domain, objtype) for objtype in objtypes] - to_try = [(env.intersphinx_inventory, target)] + to_try = [(env.intersphinx_inventory, target)] # type: ignore in_set = None if ':' in target: # first part may be the foreign doc set name setname, newtarget = target.split(':', 1) - if setname in env.intersphinx_named_inventory: + if setname in env.intersphinx_named_inventory: # type: ignore in_set = setname - to_try.append((env.intersphinx_named_inventory[setname], newtarget)) + to_try.append((env.intersphinx_named_inventory[setname], newtarget)) # type: ignore # NOQA for inventory, target in to_try: for objtype in objtypes: if objtype not in inventory or target not in inventory[objtype]: @@ -358,6 +386,7 @@ def missing_reference(app, env, node, contnode): def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.add_config_value('intersphinx_mapping', {}, True) app.add_config_value('intersphinx_cache_limit', 5, False) app.add_config_value('intersphinx_timeout', None, False) @@ -377,7 +406,7 @@ if __name__ == '__main__': print(msg, file=sys.stderr) filename = sys.argv[1] - invdata = fetch_inventory(MockApp(), '', filename) + invdata = fetch_inventory(MockApp(), '', filename) # type: ignore for key in sorted(invdata or {}): print(key) for entry, einfo in sorted(invdata[key].items()): diff --git a/sphinx/ext/linkcode.py b/sphinx/ext/linkcode.py index 63bd38727..a9693299e 100644 --- a/sphinx/ext/linkcode.py +++ b/sphinx/ext/linkcode.py @@ -16,12 +16,18 @@ from sphinx import addnodes from sphinx.locale import _ from sphinx.errors import SphinxError +if False: + # For type annotation + from typing import Any # NOQA + from sphinx.application import Sphinx # NOQA + class LinkcodeError(SphinxError): category = "linkcode error" def doctree_read(app, doctree): + # type: (Sphinx, nodes.Node) -> None env = app.builder.env resolve_target = getattr(env.config, 'linkcode_resolve', None) @@ -38,7 +44,7 @@ def doctree_read(app, doctree): for objnode in doctree.traverse(addnodes.desc): domain = objnode.get('domain') - uris = set() + uris = set() # type: Set[unicode] for signode in objnode: if not isinstance(signode, addnodes.desc_signature): continue @@ -72,6 +78,7 @@ def doctree_read(app, doctree): def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.connect('doctree-read', doctree_read) app.add_config_value('linkcode_resolve', None, '') return {'version': sphinx.__display_version__, 'parallel_read_safe': True} diff --git a/sphinx/ext/mathbase.py b/sphinx/ext/mathbase.py index ae4b439b7..4a5bcfb6e 100644 --- a/sphinx/ext/mathbase.py +++ b/sphinx/ext/mathbase.py @@ -18,6 +18,14 @@ from sphinx.domains import Domain from sphinx.util.nodes import make_refnode, set_source_info from sphinx.util.compat import Directive +if False: + # For type annotation + from typing import Any, Callable, Iterable, Tuple # NOQA + from docutils.parsers.rst.states import Inliner # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.environment import BuildEnvironment # NOQA + class math(nodes.Inline, nodes.TextElement): pass @@ -33,6 +41,7 @@ class eqref(nodes.Inline, nodes.TextElement): class EqXRefRole(XRefRole): def result_nodes(self, document, env, node, is_ref): + # type: (nodes.Node, BuildEnvironment, nodes.Node, bool) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA node['refdomain'] = 'math' return [node], [] @@ -44,22 +53,25 @@ class MathDomain(Domain): initial_data = { 'objects': {}, # labelid -> (docname, eqno) - } + } # type: Dict[unicode, Dict[unicode, Tuple[unicode, int]]] dangling_warnings = { 'eq': 'equation not found: %(target)s', } def clear_doc(self, docname): + # type: (unicode) -> None for labelid, (doc, eqno) in list(self.data['objects'].items()): if doc == docname: del self.data['objects'][labelid] def merge_domaindata(self, docnames, otherdata): + # type: (Iterable[unicode], Dict) -> None for labelid, (doc, eqno) in otherdata['objects'].items(): if doc in docnames: self.data['objects'][labelid] = doc def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA assert typ == 'eq' docname, number = self.data['objects'].get(target, (None, None)) if docname: @@ -76,6 +88,7 @@ class MathDomain(Domain): return None def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[nodes.Node] # NOQA refnode = self.resolve_xref(env, fromdocname, builder, 'eq', target, node, contnode) if refnode is None: return [] @@ -83,9 +96,11 @@ class MathDomain(Domain): return [refnode] def get_objects(self): + # type: () -> List return [] def add_equation(self, env, docname, labelid): + # type: (BuildEnvironment, unicode, unicode) -> int equations = self.data['objects'] if labelid in equations: path = env.doc2path(equations[labelid][0]) @@ -97,12 +112,15 @@ class MathDomain(Domain): return eqno def get_next_equation_number(self, docname): + # type: (unicode) -> int targets = [eq for eq in self.data['objects'].values() if eq[0] == docname] return len(targets) + 1 def wrap_displaymath(math, label, numbering): + # type: (unicode, unicode, bool) -> unicode def is_equation(part): + # type: (unicode) -> unicode return part.strip() if label is None: @@ -137,11 +155,13 @@ def wrap_displaymath(math, label, numbering): def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]): + # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA latex = utils.unescape(text, restore_backslashes=True) return [math(latex=latex)], [] def is_in_section_title(node): + # type: (nodes.Node) -> bool """Determine whether the node is in a section title""" from sphinx.util.nodes import traverse_parent @@ -165,6 +185,7 @@ class MathDirective(Directive): } def run(self): + # type: () -> List[nodes.Node] latex = '\n'.join(self.content) if self.arguments and self.arguments[0]: latex = self.arguments[0] + '\n\n' + latex @@ -186,6 +207,7 @@ class MathDirective(Directive): return ret def add_target(self, ret): + # type: (List[nodes.Node]) -> None node = ret[0] env = self.state.document.settings.env @@ -213,6 +235,7 @@ class MathDirective(Directive): def latex_visit_math(self, node): + # type: (nodes.NodeVisitor, math) -> None if is_in_section_title(node): protect = r'\protect' else: @@ -223,6 +246,7 @@ def latex_visit_math(self, node): def latex_visit_displaymath(self, node): + # type: (nodes.NodeVisitor, displaymath) -> None if not node['label']: label = None else: @@ -239,17 +263,20 @@ def latex_visit_displaymath(self, node): def latex_visit_eqref(self, node): + # type: (nodes.NodeVisitor, eqref) -> None label = "equation:%s:%s" % (node['docname'], node['target']) self.body.append('\\eqref{%s}' % label) raise nodes.SkipNode def text_visit_math(self, node): + # type: (nodes.NodeVisitor, math) -> None self.add_text(node['latex']) raise nodes.SkipNode def text_visit_displaymath(self, node): + # type: (nodes.NodeVisitor, displaymath) -> None self.new_state() self.add_text(node['latex']) self.end_state() @@ -257,24 +284,29 @@ def text_visit_displaymath(self, node): def man_visit_math(self, node): + # type: (nodes.NodeVisitor, math) -> None self.body.append(node['latex']) raise nodes.SkipNode def man_visit_displaymath(self, node): + # type: (nodes.NodeVisitor, displaymath) -> None self.visit_centered(node) def man_depart_displaymath(self, node): + # type: (nodes.NodeVisitor, displaymath) -> None self.depart_centered(node) def texinfo_visit_math(self, node): + # type: (nodes.NodeVisitor, math) -> None self.body.append('@math{' + self.escape_arg(node['latex']) + '}') raise nodes.SkipNode def texinfo_visit_displaymath(self, node): + # type: (nodes.NodeVisitor, displaymath) -> None if node.get('label'): self.add_anchor(node['label'], node) self.body.append('\n\n@example\n%s\n@end example\n\n' % @@ -282,10 +314,12 @@ def texinfo_visit_displaymath(self, node): def texinfo_depart_displaymath(self, node): + # type: (nodes.NodeVisitor, displaymath) -> None pass def setup_math(app, htmlinlinevisitors, htmldisplayvisitors): + # type: (Sphinx, Tuple[Callable, Any], Tuple[Callable, Any]) -> None app.add_config_value('math_number_all', False, 'env') app.add_domain(MathDomain) app.add_node(math, override=True, diff --git a/sphinx/ext/napoleon/__init__.py b/sphinx/ext/napoleon/__init__.py index 651355c57..b74dfb75d 100644 --- a/sphinx/ext/napoleon/__init__.py +++ b/sphinx/ext/napoleon/__init__.py @@ -14,8 +14,13 @@ import sys from six import PY2, iteritems import sphinx +from sphinx.application import Sphinx from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring +if False: + # For type annotation + from typing import Any # NOQA + class Config(object): """Sphinx napoleon extension settings in `conf.py`. @@ -254,6 +259,7 @@ class Config(object): } def __init__(self, **settings): + # type: (Any) -> None for name, (default, rebuild) in iteritems(self._config_values): setattr(self, name, default) for name, value in iteritems(settings): @@ -261,6 +267,7 @@ class Config(object): def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] """Sphinx extension setup function. When the extension is loaded, Sphinx imports this module and executes @@ -282,9 +289,9 @@ def setup(app): `The Extension API <http://sphinx-doc.org/extdev/appapi.html>`_ """ - from sphinx.application import Sphinx if not isinstance(app, Sphinx): - return # probably called by tests + return # type: ignore + # probably called by tests _patch_python_domain() @@ -297,13 +304,14 @@ def setup(app): def _patch_python_domain(): + # type: () -> None try: from sphinx.domains.python import PyTypedField except ImportError: pass else: import sphinx.domains.python - import sphinx.locale + import sphinx.locale # type: ignore l_ = sphinx.locale.lazy_gettext for doc_field in sphinx.domains.python.PyObject.doc_field_types: if doc_field.name == 'parameter': @@ -317,6 +325,7 @@ def _patch_python_domain(): def _process_docstring(app, what, name, obj, options, lines): + # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None """Process the docstring for a given python object. Called when autodoc has read and processed a docstring. `lines` is a list @@ -353,6 +362,7 @@ def _process_docstring(app, what, name, obj, options, lines): """ result_lines = lines + docstring = None # type: GoogleDocstring if app.config.napoleon_numpy_docstring: docstring = NumpyDocstring(result_lines, app.config, app, what, name, obj, options) @@ -365,6 +375,7 @@ def _process_docstring(app, what, name, obj, options, lines): def _skip_member(app, what, name, obj, skip, options): + # type: (Sphinx, unicode, unicode, Any, bool, Any) -> bool """Determine if private and special class members are included in docs. The following settings in conf.py determine if private and special class diff --git a/sphinx/ext/napoleon/docstring.py b/sphinx/ext/napoleon/docstring.py index e526a11ae..7df6e83ab 100644 --- a/sphinx/ext/napoleon/docstring.py +++ b/sphinx/ext/napoleon/docstring.py @@ -21,6 +21,12 @@ from six.moves import range from sphinx.ext.napoleon.iterators import modify_iter from sphinx.util.pycompat import UnicodeMixin +if False: + # For type annotation + from typing import Any, Callable, Tuple, Union # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.config import Config as SphinxConfig # NOQA + _directive_regex = re.compile(r'\.\. \S+::') _google_section_regex = re.compile(r'^(\s|\w)+:\s*$') @@ -99,19 +105,20 @@ class GoogleDocstring(UnicodeMixin): """ def __init__(self, docstring, config=None, app=None, what='', name='', obj=None, options=None): + # type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA self._config = config self._app = app if not self._config: from sphinx.ext.napoleon import Config - self._config = self._app and self._app.config or Config() + self._config = self._app and self._app.config or Config() # type: ignore if not what: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' - elif isinstance(obj, collections.Callable): + elif isinstance(obj, collections.Callable): # type: ignore what = 'function' else: what = 'object' @@ -121,14 +128,14 @@ class GoogleDocstring(UnicodeMixin): self._obj = obj self._opt = options if isinstance(docstring, string_types): - docstring = docstring.splitlines() + docstring = docstring.splitlines() # type: ignore self._lines = docstring self._line_iter = modify_iter(docstring, modifier=lambda s: s.rstrip()) - self._parsed_lines = [] + self._parsed_lines = [] # type: List[unicode] self._is_in_section = False self._section_indent = 0 if not hasattr(self, '_directive_sections'): - self._directive_sections = [] + self._directive_sections = [] # type: List[unicode] if not hasattr(self, '_sections'): self._sections = { 'args': self._parse_parameters_section, @@ -154,10 +161,11 @@ class GoogleDocstring(UnicodeMixin): 'warns': self._parse_warns_section, 'yield': self._parse_yields_section, 'yields': self._parse_yields_section, - } + } # type: Dict[unicode, Callable] self._parse() def __unicode__(self): + # type: () -> unicode """Return the parsed docstring in reStructuredText format. Returns @@ -169,6 +177,7 @@ class GoogleDocstring(UnicodeMixin): return u('\n').join(self.lines()) def lines(self): + # type: () -> List[unicode] """Return the parsed lines of the docstring in reStructuredText format. Returns @@ -180,38 +189,42 @@ class GoogleDocstring(UnicodeMixin): return self._parsed_lines def _consume_indented_block(self, indent=1): + # type: (int) -> List[unicode] lines = [] line = self._line_iter.peek() while(not self._is_section_break() and (not line or self._is_indented(line, indent))): - lines.append(next(self._line_iter)) + lines.append(next(self._line_iter)) # type: ignore line = self._line_iter.peek() return lines def _consume_contiguous(self): + # type: () -> List[unicode] lines = [] while (self._line_iter.has_next() and self._line_iter.peek() and not self._is_section_header()): - lines.append(next(self._line_iter)) + lines.append(next(self._line_iter)) # type: ignore return lines def _consume_empty(self): + # type: () -> List[unicode] lines = [] line = self._line_iter.peek() while self._line_iter.has_next() and not line: - lines.append(next(self._line_iter)) + lines.append(next(self._line_iter)) # type: ignore line = self._line_iter.peek() return lines def _consume_field(self, parse_type=True, prefer_type=False): - line = next(self._line_iter) + # type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]] + line = next(self._line_iter) # type: ignore before, colon, after = self._partition_field_on_colon(line) - _name, _type, _desc = before, '', after + _name, _type, _desc = before, '', after # type: unicode, unicode, unicode if parse_type: - match = _google_typed_arg_regex.match(before) + match = _google_typed_arg_regex.match(before) # type: ignore if match: _name = match.group(1) _type = match.group(2) @@ -221,11 +234,12 @@ class GoogleDocstring(UnicodeMixin): if prefer_type and not _type: _type, _name = _name, _type indent = self._get_indent(line) + 1 - _desc = [_desc] + self._dedent(self._consume_indented_block(indent)) + _desc = [_desc] + self._dedent(self._consume_indented_block(indent)) # type: ignore _desc = self.__class__(_desc, self._config).lines() - return _name, _type, _desc + return _name, _type, _desc # type: ignore def _consume_fields(self, parse_type=True, prefer_type=False): + # type: (bool, bool) -> List[Tuple[unicode, unicode, List[unicode]]] self._consume_empty() fields = [] while not self._is_section_break(): @@ -235,19 +249,21 @@ class GoogleDocstring(UnicodeMixin): return fields def _consume_inline_attribute(self): - line = next(self._line_iter) + # type: () -> Tuple[unicode, List[unicode]] + line = next(self._line_iter) # type: ignore _type, colon, _desc = self._partition_field_on_colon(line) if not colon: _type, _desc = _desc, _type - _desc = [_desc] + self._dedent(self._consume_to_end()) + _desc = [_desc] + self._dedent(self._consume_to_end()) # type: ignore _desc = self.__class__(_desc, self._config).lines() - return _type, _desc + return _type, _desc # type: ignore def _consume_returns_section(self): + # type: () -> List[Tuple[unicode, unicode, List[unicode]]] lines = self._dedent(self._consume_to_next_section()) if lines: before, colon, after = self._partition_field_on_colon(lines[0]) - _name, _type, _desc = '', '', lines + _name, _type, _desc = '', '', lines # type: unicode, unicode, List[unicode] if colon: if after: @@ -263,30 +279,35 @@ class GoogleDocstring(UnicodeMixin): return [] def _consume_usage_section(self): + # type: () -> List[unicode] lines = self._dedent(self._consume_to_next_section()) return lines def _consume_section_header(self): - section = next(self._line_iter) + # type: () -> unicode + section = next(self._line_iter) # type: ignore stripped_section = section.strip(':') if stripped_section.lower() in self._sections: section = stripped_section return section def _consume_to_end(self): + # type: () -> List[unicode] lines = [] while self._line_iter.has_next(): - lines.append(next(self._line_iter)) + lines.append(next(self._line_iter)) # type: ignore return lines def _consume_to_next_section(self): + # type: () -> List[unicode] self._consume_empty() lines = [] while not self._is_section_break(): - lines.append(next(self._line_iter)) + lines.append(next(self._line_iter)) # type: ignore return lines + self._consume_empty() def _dedent(self, lines, full=False): + # type: (List[unicode], bool) -> List[unicode] if full: return [line.lstrip() for line in lines] else: @@ -294,6 +315,7 @@ class GoogleDocstring(UnicodeMixin): return [line[min_indent:] for line in lines] def _escape_args_and_kwargs(self, name): + # type: (unicode) -> unicode if name[:2] == '**': return r'\*\*' + name[2:] elif name[:1] == '*': @@ -302,29 +324,32 @@ class GoogleDocstring(UnicodeMixin): return name def _fix_field_desc(self, desc): + # type: (List[unicode]) -> List[unicode] if self._is_list(desc): - desc = [''] + desc + desc = [''] + desc # type: ignore elif desc[0].endswith('::'): desc_block = desc[1:] indent = self._get_indent(desc[0]) block_indent = self._get_initial_indent(desc_block) if block_indent > indent: - desc = [''] + desc + desc = [''] + desc # type: ignore else: desc = ['', desc[0]] + self._indent(desc_block, 4) return desc def _format_admonition(self, admonition, lines): + # type: (unicode, List[unicode]) -> List[unicode] lines = self._strip_empty(lines) if len(lines) == 1: return ['.. %s:: %s' % (admonition, lines[0].strip()), ''] elif lines: lines = self._indent(self._dedent(lines), 3) - return ['.. %s::' % admonition, ''] + lines + [''] + return ['.. %s::' % admonition, ''] + lines + [''] # type: ignore else: return ['.. %s::' % admonition, ''] def _format_block(self, prefix, lines, padding=None): + # type: (unicode, List[unicode], unicode) -> List[unicode] if lines: if padding is None: padding = ' ' * len(prefix) @@ -342,6 +367,7 @@ class GoogleDocstring(UnicodeMixin): def _format_docutils_params(self, fields, field_role='param', type_role='type'): + # type: (List[Tuple[unicode, unicode, List[unicode]]], unicode, unicode) -> List[unicode] # NOQA lines = [] for _name, _type, _desc in fields: _desc = self._strip_empty(_desc) @@ -357,13 +383,14 @@ class GoogleDocstring(UnicodeMixin): return lines + [''] def _format_field(self, _name, _type, _desc): + # type: (unicode, unicode, List[unicode]) -> List[unicode] _desc = self._strip_empty(_desc) has_desc = any(_desc) separator = has_desc and ' -- ' or '' if _name: if _type: if '`' in _type: - field = '**%s** (%s)%s' % (_name, _type, separator) + field = '**%s** (%s)%s' % (_name, _type, separator) # type: unicode else: field = '**%s** (*%s*)%s' % (_name, _type, separator) else: @@ -386,10 +413,11 @@ class GoogleDocstring(UnicodeMixin): return [field] def _format_fields(self, field_type, fields): + # type: (unicode, List[Tuple[unicode, unicode, List[unicode]]]) -> List[unicode] field_type = ':%s:' % field_type.strip() padding = ' ' * len(field_type) multi = len(fields) > 1 - lines = [] + lines = [] # type: List[unicode] for _name, _type, _desc in fields: field = self._format_field(_name, _type, _desc) if multi: @@ -404,6 +432,7 @@ class GoogleDocstring(UnicodeMixin): return lines def _get_current_indent(self, peek_ahead=0): + # type: (int) -> int line = self._line_iter.peek(peek_ahead + 1)[peek_ahead] while line != self._line_iter.sentinel: if line: @@ -413,18 +442,21 @@ class GoogleDocstring(UnicodeMixin): return 0 def _get_indent(self, line): + # type: (unicode) -> int for i, s in enumerate(line): if not s.isspace(): return i return len(line) def _get_initial_indent(self, lines): + # type: (List[unicode]) -> int for line in lines: if line: return self._get_indent(line) return 0 def _get_min_indent(self, lines): + # type: (List[unicode]) -> int min_indent = None for line in lines: if line: @@ -436,9 +468,11 @@ class GoogleDocstring(UnicodeMixin): return min_indent or 0 def _indent(self, lines, n=4): + # type: (List[unicode], int) -> List[unicode] return [(' ' * n) + line for line in lines] def _is_indented(self, line, indent=1): + # type: (unicode, int) -> bool for i, s in enumerate(line): if i >= indent: return True @@ -447,11 +481,12 @@ class GoogleDocstring(UnicodeMixin): return False def _is_list(self, lines): + # type: (List[unicode]) -> bool if not lines: return False - if _bullet_list_regex.match(lines[0]): + if _bullet_list_regex.match(lines[0]): # type: ignore return True - if _enumerated_list_regex.match(lines[0]): + if _enumerated_list_regex.match(lines[0]): # type: ignore return True if len(lines) < 2 or lines[0].endswith('::'): return False @@ -464,6 +499,7 @@ class GoogleDocstring(UnicodeMixin): return next_indent > indent def _is_section_header(self): + # type: () -> bool section = self._line_iter.peek().lower() match = _google_section_regex.match(section) if match and section.strip(':') in self._sections: @@ -478,6 +514,7 @@ class GoogleDocstring(UnicodeMixin): return False def _is_section_break(self): + # type: () -> bool line = self._line_iter.peek() return (not self._line_iter.has_next() or self._is_section_header() or @@ -486,6 +523,7 @@ class GoogleDocstring(UnicodeMixin): not self._is_indented(line, self._section_indent))) def _parse(self): + # type: () -> None self._parsed_lines = self._consume_empty() if self._name and (self._what == 'attribute' or self._what == 'data'): @@ -498,7 +536,7 @@ class GoogleDocstring(UnicodeMixin): section = self._consume_section_header() self._is_in_section = True self._section_indent = self._get_current_indent() - if _directive_regex.match(section): + if _directive_regex.match(section): # type: ignore lines = [section] + self._consume_to_next_section() else: lines = self._sections[section.lower()](section) @@ -513,42 +551,47 @@ class GoogleDocstring(UnicodeMixin): self._parsed_lines.extend(lines) def _parse_attribute_docstring(self): + # type: () -> List[unicode] _type, _desc = self._consume_inline_attribute() return self._format_field('', _type, _desc) def _parse_attributes_section(self, section): + # type: (unicode) -> List[unicode] lines = [] for _name, _type, _desc in self._consume_fields(): if self._config.napoleon_use_ivar: - field = ':ivar %s: ' % _name + field = ':ivar %s: ' % _name # type: unicode lines.extend(self._format_block(field, _desc)) if _type: lines.append(':vartype %s: %s' % (_name, _type)) else: lines.extend(['.. attribute:: ' + _name, '']) - field = self._format_field('', _type, _desc) - lines.extend(self._indent(field, 3)) + field = self._format_field('', _type, _desc) # type: ignore + lines.extend(self._indent(field, 3)) # type: ignore lines.append('') if self._config.napoleon_use_ivar: lines.append('') return lines def _parse_examples_section(self, section): + # type: (unicode) -> List[unicode] use_admonition = self._config.napoleon_use_admonition_for_examples return self._parse_generic_section(section, use_admonition) def _parse_usage_section(self, section): - header = ['.. rubric:: Usage:', ''] - block = ['.. code-block:: python', ''] + # type: (unicode) -> List[unicode] + header = ['.. rubric:: Usage:', ''] # type: List[unicode] + block = ['.. code-block:: python', ''] # type: List[unicode] lines = self._consume_usage_section() lines = self._indent(lines, 3) return header + block + lines + [''] def _parse_generic_section(self, section, use_admonition): + # type: (unicode, bool) -> List[unicode] lines = self._strip_empty(self._consume_to_next_section()) lines = self._dedent(lines) if use_admonition: - header = '.. admonition:: %s' % section + header = '.. admonition:: %s' % section # type: unicode lines = self._indent(lines, 3) else: header = '.. rubric:: %s' % section @@ -558,6 +601,7 @@ class GoogleDocstring(UnicodeMixin): return [header, ''] def _parse_keyword_arguments_section(self, section): + # type: (unicode) -> List[unicode] fields = self._consume_fields() if self._config.napoleon_use_keyword: return self._format_docutils_params( @@ -568,26 +612,31 @@ class GoogleDocstring(UnicodeMixin): return self._format_fields('Keyword Arguments', fields) def _parse_methods_section(self, section): - lines = [] + # type: (unicode) -> List[unicode] + lines = [] # type: List[unicode] for _name, _, _desc in self._consume_fields(parse_type=False): lines.append('.. method:: %s' % _name) if _desc: - lines.extend([''] + self._indent(_desc, 3)) + lines.extend([''] + self._indent(_desc, 3)) # type: ignore lines.append('') return lines def _parse_note_section(self, section): + # type: (unicode) -> List[unicode] lines = self._consume_to_next_section() return self._format_admonition('note', lines) def _parse_notes_section(self, section): + # type: (unicode) -> List[unicode] use_admonition = self._config.napoleon_use_admonition_for_notes return self._parse_generic_section('Notes', use_admonition) def _parse_other_parameters_section(self, section): + # type: (unicode) -> List[unicode] return self._format_fields('Other Parameters', self._consume_fields()) def _parse_parameters_section(self, section): + # type: (unicode) -> List[unicode] fields = self._consume_fields() if self._config.napoleon_use_param: return self._format_docutils_params(fields) @@ -595,11 +644,12 @@ class GoogleDocstring(UnicodeMixin): return self._format_fields('Parameters', fields) def _parse_raises_section(self, section): + # type: (unicode) -> List[unicode] fields = self._consume_fields(parse_type=False, prefer_type=True) field_type = ':raises:' padding = ' ' * len(field_type) multi = len(fields) > 1 - lines = [] + lines = [] # type: List[unicode] for _, _type, _desc in fields: _desc = self._strip_empty(_desc) has_desc = any(_desc) @@ -633,10 +683,12 @@ class GoogleDocstring(UnicodeMixin): return lines def _parse_references_section(self, section): + # type: (unicode) -> List[unicode] use_admonition = self._config.napoleon_use_admonition_for_references return self._parse_generic_section('References', use_admonition) def _parse_returns_section(self, section): + # type: (unicode) -> List[unicode] fields = self._consume_returns_section() multi = len(fields) > 1 if multi: @@ -644,7 +696,7 @@ class GoogleDocstring(UnicodeMixin): else: use_rtype = self._config.napoleon_use_rtype - lines = [] + lines = [] # type: List[unicode] for _name, _type, _desc in fields: if use_rtype: field = self._format_field(_name, '', _desc) @@ -665,30 +717,36 @@ class GoogleDocstring(UnicodeMixin): return lines def _parse_see_also_section(self, section): + # type: (unicode) -> List[unicode] lines = self._consume_to_next_section() return self._format_admonition('seealso', lines) def _parse_todo_section(self, section): + # type: (unicode) -> List[unicode] lines = self._consume_to_next_section() return self._format_admonition('todo', lines) def _parse_warning_section(self, section): + # type: (unicode) -> List[unicode] lines = self._consume_to_next_section() return self._format_admonition('warning', lines) def _parse_warns_section(self, section): + # type: (unicode) -> List[unicode] return self._format_fields('Warns', self._consume_fields()) def _parse_yields_section(self, section): + # type: (unicode) -> List[unicode] fields = self._consume_returns_section() return self._format_fields('Yields', fields) def _partition_field_on_colon(self, line): + # type: (unicode) -> Tuple[unicode, unicode, unicode] before_colon = [] after_colon = [] colon = '' found_colon = False - for i, source in enumerate(_xref_regex.split(line)): + for i, source in enumerate(_xref_regex.split(line)): # type: ignore if found_colon: after_colon.append(source) else: @@ -706,6 +764,7 @@ class GoogleDocstring(UnicodeMixin): "".join(after_colon).strip()) def _strip_empty(self, lines): + # type: (List[unicode]) -> List[unicode] if lines: start = -1 for i, line in enumerate(lines): @@ -820,12 +879,14 @@ class NumpyDocstring(GoogleDocstring): """ def __init__(self, docstring, config=None, app=None, what='', name='', obj=None, options=None): + # type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA self._directive_sections = ['.. index::'] super(NumpyDocstring, self).__init__(docstring, config, app, what, name, obj, options) def _consume_field(self, parse_type=True, prefer_type=False): - line = next(self._line_iter) + # type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]] + line = next(self._line_iter) # type: ignore if parse_type: _name, _, _type = self._partition_field_on_colon(line) else: @@ -841,16 +902,19 @@ class NumpyDocstring(GoogleDocstring): return _name, _type, _desc def _consume_returns_section(self): + # type: () -> List[Tuple[unicode, unicode, List[unicode]]] return self._consume_fields(prefer_type=True) def _consume_section_header(self): - section = next(self._line_iter) + # type: () -> unicode + section = next(self._line_iter) # type: ignore if not _directive_regex.match(section): # Consume the header underline - next(self._line_iter) + next(self._line_iter) # type: ignore return section def _is_section_break(self): + # type: () -> bool line1, line2 = self._line_iter.peek(2) return (not self._line_iter.has_next() or self._is_section_header() or @@ -860,10 +924,11 @@ class NumpyDocstring(GoogleDocstring): not self._is_indented(line1, self._section_indent))) def _is_section_header(self): + # type: () -> bool section, underline = self._line_iter.peek(2) section = section.lower() if section in self._sections and isinstance(underline, string_types): - return bool(_numpy_section_regex.match(underline)) + return bool(_numpy_section_regex.match(underline)) # type: ignore elif self._directive_sections: if _directive_regex.match(section): for directive_section in self._directive_sections: @@ -875,6 +940,7 @@ class NumpyDocstring(GoogleDocstring): r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X) def _parse_see_also_section(self, section): + # type: (unicode) -> List[unicode] lines = self._consume_to_next_section() try: return self._parse_numpydoc_see_also_section(lines) @@ -882,6 +948,7 @@ class NumpyDocstring(GoogleDocstring): return self._format_admonition('seealso', lines) def _parse_numpydoc_see_also_section(self, content): + # type: (List[unicode]) -> List[unicode] """ Derived from the NumpyDoc implementation of _parse_see_also. @@ -914,13 +981,13 @@ class NumpyDocstring(GoogleDocstring): del rest[:] current_func = None - rest = [] + rest = [] # type: List[unicode] for line in content: if not line.strip(): continue - m = self._name_rgx.match(line) + m = self._name_rgx.match(line) # type: ignore if m and line[m.end():].strip().startswith(':'): push_item(current_func, rest) current_func, line = line[:m.end()], line[m.end():] @@ -960,12 +1027,12 @@ class NumpyDocstring(GoogleDocstring): 'const': 'const', 'attribute': 'attr', 'attr': 'attr' - } + } # type: Dict[unicode, unicode] if self._what is None: - func_role = 'obj' + func_role = 'obj' # type: unicode else: func_role = roles.get(self._what, '') - lines = [] + lines = [] # type: List[unicode] last_had_desc = True for func, desc, role in items: if role: diff --git a/sphinx/ext/napoleon/iterators.py b/sphinx/ext/napoleon/iterators.py index f66d67f2c..76544b534 100644 --- a/sphinx/ext/napoleon/iterators.py +++ b/sphinx/ext/napoleon/iterators.py @@ -13,6 +13,10 @@ import collections +if False: + # For type annotation + from typing import Any, Iterable # NOQA + class peek_iter(object): """An iterator object that supports peeking ahead. @@ -48,34 +52,39 @@ class peek_iter(object): """ def __init__(self, *args): + # type: (Any) -> None """__init__(o, sentinel=None)""" - self._iterable = iter(*args) - self._cache = collections.deque() + self._iterable = iter(*args) # type: Iterable + self._cache = collections.deque() # type: collections.deque if len(args) == 2: self.sentinel = args[1] else: self.sentinel = object() def __iter__(self): + # type: () -> peek_iter return self def __next__(self, n=None): + # type: (int) -> Any # note: prevent 2to3 to transform self.next() in next(self) which # causes an infinite loop ! return getattr(self, 'next')(n) def _fillcache(self, n): + # type: (int) -> None """Cache `n` items. If `n` is 0 or None, then 1 item is cached.""" if not n: n = 1 try: while len(self._cache) < n: - self._cache.append(next(self._iterable)) + self._cache.append(next(self._iterable)) # type: ignore except StopIteration: while len(self._cache) < n: self._cache.append(self.sentinel) def has_next(self): + # type: () -> bool """Determine if iterator is exhausted. Returns @@ -91,6 +100,7 @@ class peek_iter(object): return self.peek() != self.sentinel def next(self, n=None): + # type: (int) -> Any """Get the next item or `n` items of the iterator. Parameters @@ -126,6 +136,7 @@ class peek_iter(object): return result def peek(self, n=None): + # type: (int) -> Any """Preview the next item or `n` items of the iterator. The iterator is not advanced when peek is called. @@ -209,6 +220,7 @@ class modify_iter(peek_iter): """ def __init__(self, *args, **kwargs): + # type: (Any, Any) -> None """__init__(o, sentinel=None, modifier=lambda x: x)""" if 'modifier' in kwargs: self.modifier = kwargs['modifier'] @@ -223,6 +235,7 @@ class modify_iter(peek_iter): super(modify_iter, self).__init__(*args) def _fillcache(self, n): + # type: (int) -> None """Cache `n` modified items. If `n` is 0 or None, 1 item is cached. Each item returned by the iterator is passed through the @@ -233,7 +246,7 @@ class modify_iter(peek_iter): n = 1 try: while len(self._cache) < n: - self._cache.append(self.modifier(next(self._iterable))) + self._cache.append(self.modifier(next(self._iterable))) # type: ignore except StopIteration: while len(self._cache) < n: self._cache.append(self.sentinel) diff --git a/sphinx/ext/pngmath.py b/sphinx/ext/pngmath.py index d7660550e..a02b61b9b 100644 --- a/sphinx/ext/pngmath.py +++ b/sphinx/ext/pngmath.py @@ -20,6 +20,7 @@ from subprocess import Popen, PIPE from hashlib import sha1 from six import text_type + from docutils import nodes import sphinx @@ -29,11 +30,18 @@ from sphinx.util.osutil import ensuredir, ENOENT, cd from sphinx.util.pycompat import sys_encoding from sphinx.ext.mathbase import setup_math as mathbase_setup, wrap_displaymath +if False: + # For type annotation + from typing import Any, Tuple # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.ext.mathbase import math as math_node, displaymath # NOQA + class MathExtError(SphinxError): category = 'Math extension error' def __init__(self, msg, stderr=None, stdout=None): + # type: (unicode, unicode, unicode) -> None if stderr: msg += '\n[stderr]\n' + stderr.decode(sys_encoding, 'replace') if stdout: @@ -71,6 +79,7 @@ depth_re = re.compile(br'\[\d+ depth=(-?\d+)\]') def render_math(self, math): + # type: (nodes.NodeVisitor, unicode) -> Tuple[unicode, int] """Render the LaTeX math expression *math* using latex and dvipng. Return the filename relative to the built document and the "depth", @@ -107,9 +116,8 @@ def render_math(self, math): else: tempdir = self.builder._mathpng_tempdir - tf = codecs.open(path.join(tempdir, 'math.tex'), 'w', 'utf-8') - tf.write(latex) - tf.close() + with codecs.open(path.join(tempdir, 'math.tex'), 'w', 'utf-8') as tf: # type: ignore + tf.write(latex) # build latex command; old versions of latex don't have the # --output-directory option, so we have to manually chdir to the @@ -171,23 +179,26 @@ def render_math(self, math): def cleanup_tempdir(app, exc): + # type: (Sphinx, Exception) -> None if exc: return if not hasattr(app.builder, '_mathpng_tempdir'): return try: - shutil.rmtree(app.builder._mathpng_tempdir) + shutil.rmtree(app.builder._mathpng_tempdir) # type: ignore except Exception: pass def get_tooltip(self, node): + # type: (nodes.NodeVisitor, math_node) -> unicode if self.builder.config.pngmath_add_tooltips: return ' alt="%s"' % self.encode(node['latex']).strip() return '' def html_visit_math(self, node): + # type: (nodes.NodeVisitor, math_node) -> None try: fname, depth = render_math(self, '$'+node['latex']+'$') except MathExtError as exc: @@ -210,6 +221,7 @@ def html_visit_math(self, node): def html_visit_displaymath(self, node): + # type: (nodes.NodeVisitor, displaymath) -> None if node['nowrap']: latex = node['latex'] else: @@ -238,6 +250,7 @@ def html_visit_displaymath(self, node): def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.warn('sphinx.ext.pngmath has been deprecated. Please use sphinx.ext.imgmath instead.') try: mathbase_setup(app, (html_visit_math, None), (html_visit_displaymath, None)) diff --git a/sphinx/ext/todo.py b/sphinx/ext/todo.py index f3b526ce6..5db878ad5 100644 --- a/sphinx/ext/todo.py +++ b/sphinx/ext/todo.py @@ -22,6 +22,12 @@ from sphinx.util.nodes import set_source_info from docutils.parsers.rst import Directive from docutils.parsers.rst.directives.admonitions import BaseAdmonition +if False: + # For type annotation + from typing import Any, Iterable # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.environment import BuildEnvironment # NOQA + class todo_node(nodes.Admonition, nodes.Element): pass @@ -46,6 +52,7 @@ class Todo(BaseAdmonition): } def run(self): + # type: () -> List[nodes.Node] if not self.options.get('class'): self.options['class'] = ['admonition-todo'] @@ -63,12 +70,13 @@ class Todo(BaseAdmonition): def process_todos(app, doctree): + # type: (Sphinx, nodes.Node) -> None # collect all todos in the environment # this is not done in the directive itself because it some transformations # must have already been run, e.g. substitutions env = app.builder.env if not hasattr(env, 'todo_all_todos'): - env.todo_all_todos = [] + env.todo_all_todos = [] # type: ignore for node in doctree.traverse(todo_node): app.emit('todo-defined', node) @@ -80,7 +88,7 @@ def process_todos(app, doctree): targetnode = None newnode = node.deepcopy() del newnode['ids'] - env.todo_all_todos.append({ + env.todo_all_todos.append({ # type: ignore 'docname': env.docname, 'source': node.source or env.doc2path(env.docname), 'lineno': node.line, @@ -101,15 +109,17 @@ class TodoList(Directive): required_arguments = 0 optional_arguments = 0 final_argument_whitespace = False - option_spec = {} + option_spec = {} # type: Dict def run(self): + # type: () -> List[todolist] # Simply insert an empty todolist node which will be replaced later # when process_todo_nodes is called return [todolist('')] def process_todo_nodes(app, doctree, fromdocname): + # type: (Sphinx, nodes.Node, unicode) -> None if not app.config['todo_include_todos']: for node in doctree.traverse(todo_node): node.parent.remove(node) @@ -119,7 +129,7 @@ def process_todo_nodes(app, doctree, fromdocname): env = app.builder.env if not hasattr(env, 'todo_all_todos'): - env.todo_all_todos = [] + env.todo_all_todos = [] # type: ignore for node in doctree.traverse(todolist): if not app.config['todo_include_todos']: @@ -128,7 +138,7 @@ def process_todo_nodes(app, doctree, fromdocname): content = [] - for todo_info in env.todo_all_todos: + for todo_info in env.todo_all_todos: # type: ignore para = nodes.paragraph(classes=['todo-source']) if app.config['todo_link_only']: description = _('<<original entry>>') @@ -168,30 +178,35 @@ def process_todo_nodes(app, doctree, fromdocname): def purge_todos(app, env, docname): + # type: (Sphinx, BuildEnvironment, unicode) -> None if not hasattr(env, 'todo_all_todos'): return - env.todo_all_todos = [todo for todo in env.todo_all_todos + env.todo_all_todos = [todo for todo in env.todo_all_todos # type: ignore if todo['docname'] != docname] def merge_info(app, env, docnames, other): + # type: (Sphinx, BuildEnvironment, Iterable[unicode], BuildEnvironment) -> None if not hasattr(other, 'todo_all_todos'): return if not hasattr(env, 'todo_all_todos'): - env.todo_all_todos = [] - env.todo_all_todos.extend(other.todo_all_todos) + env.todo_all_todos = [] # type: ignore + env.todo_all_todos.extend(other.todo_all_todos) # type: ignore def visit_todo_node(self, node): + # type: (nodes.NodeVisitor, todo_node) -> None self.visit_admonition(node) # self.visit_admonition(node, 'todo') def depart_todo_node(self, node): + # type: (nodes.NodeVisitor, todo_node) -> None self.depart_admonition(node) def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.add_event('todo-defined') app.add_config_value('todo_include_todos', False, 'html') app.add_config_value('todo_link_only', False, 'html') diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py index 276a137d5..813a465db 100644 --- a/sphinx/ext/viewcode.py +++ b/sphinx/ext/viewcode.py @@ -12,6 +12,7 @@ import traceback from six import iteritems, text_type + from docutils import nodes import sphinx @@ -20,10 +21,17 @@ from sphinx.locale import _ from sphinx.pycode import ModuleAnalyzer from sphinx.util import get_full_modname from sphinx.util.nodes import make_refnode -from sphinx.util.console import blue +from sphinx.util.console import blue # type: ignore + +if False: + # For type annotation + from typing import Any, Iterable, Iterator, Tuple # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.environment import BuildEnvironment # NOQA def _get_full_modname(app, modname, attribute): + # type: (Sphinx, str, unicode) -> unicode try: return get_full_modname(modname, attribute) except AttributeError: @@ -43,20 +51,21 @@ def _get_full_modname(app, modname, attribute): def doctree_read(app, doctree): + # type: (Sphinx, nodes.Node) -> None env = app.builder.env if not hasattr(env, '_viewcode_modules'): - env._viewcode_modules = {} + env._viewcode_modules = {} # type: ignore if app.builder.name == "singlehtml": return if app.builder.name.startswith("epub") and not env.config.viewcode_enable_epub: return def has_tag(modname, fullname, docname, refname): - entry = env._viewcode_modules.get(modname, None) + entry = env._viewcode_modules.get(modname, None) # type: ignore try: analyzer = ModuleAnalyzer.for_module(modname) except Exception: - env._viewcode_modules[modname] = False + env._viewcode_modules[modname] = False # type: ignore return if not isinstance(analyzer.code, text_type): code = analyzer.code.decode(analyzer.encoding) @@ -65,7 +74,7 @@ def doctree_read(app, doctree): if entry is None or entry[0] != code: analyzer.find_tags() entry = code, analyzer.tags, {}, refname - env._viewcode_modules[modname] = entry + env._viewcode_modules[modname] = entry # type: ignore elif entry is False: return _, tags, used, _ = entry @@ -76,7 +85,7 @@ def doctree_read(app, doctree): for objnode in doctree.traverse(addnodes.desc): if objnode.get('domain') != 'py': continue - names = set() + names = set() # type: Set[unicode] for signode in objnode: if not isinstance(signode, addnodes.desc_signature): continue @@ -106,16 +115,18 @@ def doctree_read(app, doctree): def env_merge_info(app, env, docnames, other): + # type: (Sphinx, BuildEnvironment, Iterable[unicode], BuildEnvironment) -> None if not hasattr(other, '_viewcode_modules'): return # create a _viewcode_modules dict on the main environment if not hasattr(env, '_viewcode_modules'): - env._viewcode_modules = {} + env._viewcode_modules = {} # type: ignore # now merge in the information from the subprocess - env._viewcode_modules.update(other._viewcode_modules) + env._viewcode_modules.update(other._viewcode_modules) # type: ignore def missing_reference(app, env, node, contnode): + # type: (Sphinx, BuildEnvironment, nodes.Node, nodes.Node) -> nodes.Node # resolve our "viewcode" reference nodes -- they need special treatment if node['reftype'] == 'viewcode': return make_refnode(app.builder, node['refdoc'], node['reftarget'], @@ -123,20 +134,21 @@ def missing_reference(app, env, node, contnode): def collect_pages(app): + # type: (Sphinx) -> Iterator[Tuple[unicode, Dict[unicode, Any], unicode]] env = app.builder.env if not hasattr(env, '_viewcode_modules'): return - highlighter = app.builder.highlighter + highlighter = app.builder.highlighter # type: ignore urito = app.builder.get_relative_uri - modnames = set(env._viewcode_modules) + modnames = set(env._viewcode_modules) # type: ignore # app.builder.info(' (%d module code pages)' % # len(env._viewcode_modules), nonl=1) for modname, entry in app.status_iterator( - iteritems(env._viewcode_modules), 'highlighting module code... ', - blue, len(env._viewcode_modules), lambda x: x[0]): + iteritems(env._viewcode_modules), 'highlighting module code... ', # type:ignore + blue, len(env._viewcode_modules), lambda x: x[0]): # type:ignore if not entry: continue code, tags, used, refname = entry @@ -185,7 +197,7 @@ def collect_pages(app): 'title': modname, 'body': (_('<h1>Source code for %s</h1>') % modname + '\n'.join(lines)), - } + } # type: Dict[unicode, Any] yield (pagename, context, 'page.html') if not modnames: @@ -218,6 +230,7 @@ def collect_pages(app): def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.add_config_value('viewcode_import', True, False) app.add_config_value('viewcode_enable_epub', False, False) app.connect('doctree-read', doctree_read) From 3407ef0ca8a8ce41e67092d2605f8fc77bebb982 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 10 Nov 2016 14:05:58 +0900 Subject: [PATCH 010/190] Add type-check annotations to sphinx.writers --- sphinx/writers/latex.py | 312 ++++++++++++++++++++++++++++++++++---- sphinx/writers/texinfo.py | 306 +++++++++++++++++++++++++++++++++---- sphinx/writers/text.py | 248 +++++++++++++++++++++++++++--- sphinx/writers/xml.py | 12 +- 4 files changed, 795 insertions(+), 83 deletions(-) diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 60483ded5..e084c0b49 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -32,6 +32,11 @@ from sphinx.util.template import LaTeXRenderer from sphinx.util.texescape import tex_escape_map, tex_replace_map from sphinx.util.smartypants import educate_quotes_latex +if False: + # For type annotation + from typing import Any, Callable, Iterator, Pattern, Tuple, Union # NOQA + from sphinx.builder import Builder # NOQA + BEGIN_DOC = r''' \begin{document} @@ -96,7 +101,7 @@ DEFAULT_SETTINGS = { 'tocdepth': '', 'secnumdepth': '', 'pageautorefname': '', -} +} # type: Dict[unicode, unicode] ADDITIONAL_SETTINGS = { 'pdflatex': { @@ -121,7 +126,7 @@ ADDITIONAL_SETTINGS = { 'platex': { 'latex_engine': 'platex', }, -} +} # type: Dict[unicode, Dict[unicode, unicode]] class collected_footnote(nodes.footnote): @@ -141,17 +146,19 @@ class LaTeXWriter(writers.Writer): ('Document class', ['--docclass'], {'default': 'manual'}), ('Author', ['--author'], {'default': ''}), )) - settings_defaults = {} + settings_defaults = {} # type: Dict output = None def __init__(self, builder): + # type: (Builder) -> None writers.Writer.__init__(self) self.builder = builder self.translator_class = ( self.builder.translator_class or LaTeXTranslator) def translate(self): + # type: () -> None transform = ShowUrlsTransform(self.document) transform.apply() visitor = self.translator_class(self.document, self.builder) @@ -163,10 +170,12 @@ class LaTeXWriter(writers.Writer): class ExtBabel(Babel): def __init__(self, language_code): + # type: (unicode) -> None super(ExtBabel, self).__init__(language_code or '') self.language_code = language_code def get_shorthandoff(self): + # type: () -> unicode shortlang = self.language.split('_')[0] if shortlang in ('de', 'ngerman', 'sl', 'slovene', 'pt', 'portuges', 'es', 'spanish', 'nl', 'dutch', 'pl', 'polish', 'it', @@ -177,15 +186,18 @@ class ExtBabel(Babel): return '' def uses_cyrillic(self): + # type: () -> bool shortlang = self.language.split('_')[0] return shortlang in ('bg', 'bulgarian', 'kk', 'kazakh', 'mn', 'mongolian', 'ru', 'russian', 'uk', 'ukrainian') def is_supported_language(self): + # type: () -> bool return bool(super(ExtBabel, self).get_language()) def get_language(self): + # type: () -> unicode language = super(ExtBabel, self).get_language() if not language: return 'english' # fallback to english @@ -197,9 +209,11 @@ class ShowUrlsTransform(object): expanded = False def __init__(self, document): + # type: (nodes.Node) -> None self.document = document def apply(self): + # type: () -> None # replace id_prefix temporarily id_prefix = self.document.settings.id_prefix self.document.settings.id_prefix = 'show_urls' @@ -212,6 +226,7 @@ class ShowUrlsTransform(object): self.document.settings.id_prefix = id_prefix def expand_show_urls(self): + # type: () -> None show_urls = self.document.settings.env.config.latex_show_urls if show_urls is False or show_urls == 'no': return @@ -234,6 +249,7 @@ class ShowUrlsTransform(object): node.parent.insert(index + 1, textnode) def create_footnote(self, uri): + # type: (unicode) -> List[Union[nodes.footnote, nodes.footnote_ref]] label = nodes.label('', '#') para = nodes.paragraph() para.append(nodes.reference('', nodes.Text(uri), refuri=uri, nolinkurl=True)) @@ -250,7 +266,9 @@ class ShowUrlsTransform(object): return [footnote, footnote_ref] def renumber_footnotes(self): + # type: () -> None def is_used_number(number): + # type: (unicode) -> bool for node in self.document.traverse(nodes.footnote): if not node.get('auto') and number in node['names']: return True @@ -258,13 +276,16 @@ class ShowUrlsTransform(object): return False def is_auto_footnote(node): + # type: (nodes.Node) -> bool return isinstance(node, nodes.footnote) and node.get('auto') def footnote_ref_by(node): + # type: (nodes.Node) -> Callable[[nodes.Node], bool] ids = node['ids'] parent = list(traverse_parent(node, (nodes.document, addnodes.start_of_file)))[0] def is_footnote_ref(node): + # type: (nodes.Node) -> bool return (isinstance(node, nodes.footnote_reference) and ids[0] == node['refid'] and parent in list(traverse_parent(node))) @@ -293,23 +314,26 @@ class ShowUrlsTransform(object): class Table(object): def __init__(self): + # type: () -> None self.col = 0 self.colcount = 0 - self.colspec = None + self.colspec = None # type: unicode self.rowcount = 0 self.had_head = False self.has_problematic = False self.has_verbatim = False - self.caption = None + self.caption = None # type: List[unicode] self.longtable = False def escape_abbr(text): + # type: (unicode) -> unicode """Adjust spacing after abbreviations.""" return re.sub('\.(?=\s|$)', '.\\@', text) def rstdim_to_latexdim(width_str): + # type: (unicode) -> unicode """Convert `width_str` with rst length to LaTeX length.""" match = re.match('^(\d*\.?\d*)\s*(\S*)$', width_str) if not match: @@ -336,9 +360,10 @@ class LaTeXTranslator(nodes.NodeVisitor): docclasses = ('howto', 'manual') def __init__(self, document, builder): + # type: (nodes.Node, Builder) -> None nodes.NodeVisitor.__init__(self, document) self.builder = builder - self.body = [] + self.body = [] # type: List[unicode] # flags self.in_title = 0 @@ -355,8 +380,8 @@ class LaTeXTranslator(nodes.NodeVisitor): self.no_contractions = 0 self.compact_list = 0 self.first_param = 0 - self.remember_multirow = {} - self.remember_multirowcol = {} + self.remember_multirow = {} # type: Dict[int, int] + self.remember_multirowcol = {} # type: Dict[int, int] # determine top section level if builder.config.latex_toplevel_sectioning: @@ -438,6 +463,7 @@ class LaTeXTranslator(nodes.NodeVisitor): if getattr(builder, 'usepackages', None): def declare_package(packagename, options=None): + # type:(unicode, unicode) -> unicode if options: return '\\usepackage[%s]{%s}' % (options, packagename) else: @@ -486,54 +512,61 @@ class LaTeXTranslator(nodes.NodeVisitor): self.highlighter = highlighting.PygmentsBridge( 'latex', builder.config.pygments_style, builder.config.trim_doctest_flags) - self.context = [] - self.descstack = [] - self.bibitems = [] - self.table = None - self.next_table_colspec = None + self.context = [] # type: List[Any] + self.descstack = [] # type: List[unicode] + self.bibitems = [] # type: List[List[unicode]] + self.table = None # type: Table + self.next_table_colspec = None # type: unicode # stack of [language, linenothreshold] settings per file # the first item here is the default and must not be changed # the second item is the default for the master file and can be changed # by .. highlight:: directive in the master file self.hlsettingstack = 2 * [[builder.config.highlight_language, sys.maxsize]] - self.bodystack = [] - self.footnotestack = [] + self.bodystack = [] # type: List[List[unicode]] + self.footnotestack = [] # type: List[Dict[unicode, List[Union[collected_footnote, bool]]]] # NOQA self.footnote_restricted = False - self.pending_footnotes = [] - self.curfilestack = [] - self.handled_abbrs = set() - self.next_hyperlink_ids = {} - self.next_section_ids = set() + self.pending_footnotes = [] # type: List[nodes.footnote_reference] + self.curfilestack = [] # type: List[unicode] + self.handled_abbrs = set() # type: Set[unicode] + self.next_hyperlink_ids = {} # type: Dict[unicode, Set[unicode]] + self.next_section_ids = set() # type: Set[unicode] def pushbody(self, newbody): + # type: (List[unicode]) -> None self.bodystack.append(self.body) self.body = newbody def popbody(self): + # type: () -> List[unicode] body = self.body self.body = self.bodystack.pop() return body def push_hyperlink_ids(self, figtype, ids): + # type: (unicode, Set[unicode]) -> None hyperlink_ids = self.next_hyperlink_ids.setdefault(figtype, set()) hyperlink_ids.update(ids) def pop_hyperlink_ids(self, figtype): + # type: (unicode) -> Set[unicode] return self.next_hyperlink_ids.pop(figtype, set()) def check_latex_elements(self): + # type: () -> None for key in self.builder.config.latex_elements: if key not in self.elements: msg = _("Unknown configure key: latex_elements[%r] is ignored.") self.builder.warn(msg % key) def restrict_footnote(self, node): + # type: (nodes.Node) -> None if self.footnote_restricted is False: self.footnote_restricted = node self.pending_footnotes = [] def unrestrict_footnote(self, node): + # type: (nodes.Node) -> None if self.footnote_restricted == node: self.footnote_restricted = False for footnode in self.pending_footnotes: @@ -542,6 +575,7 @@ class LaTeXTranslator(nodes.NodeVisitor): self.pending_footnotes = [] def format_docclass(self, docclass): + # type: (unicode) -> unicode """ prepends prefix to sphinx document classes """ if docclass in self.docclasses: @@ -549,6 +583,7 @@ class LaTeXTranslator(nodes.NodeVisitor): return docclass def astext(self): + # type: () -> unicode self.elements.update({ 'body': u''.join(self.body), 'indices': self.generate_indices() @@ -561,26 +596,32 @@ class LaTeXTranslator(nodes.NodeVisitor): return LaTeXRenderer().render(DEFAULT_TEMPLATE, self.elements) def hypertarget(self, id, withdoc=True, anchor=True): + # type: (unicode, bool, bool) -> unicode if withdoc: id = self.curfilestack[-1] + ':' + id return (anchor and '\\phantomsection' or '') + \ '\\label{%s}' % self.idescape(id) def hyperlink(self, id): + # type: (unicode) -> unicode return '{\\hyperref[%s]{' % self.hyperrefescape(id) def hyperpageref(self, id): + # type: (unicode) -> unicode return '\\autopageref*{%s}' % self.idescape(id) def idescape(self, id): + # type: (unicode) -> unicode return text_type(id).translate(tex_replace_map).\ encode('ascii', 'backslashreplace').decode('ascii').\ replace('\\', '_') def hyperrefescape(self, ref): + # type: (unicode) -> unicode return self.idescape(ref).replace('-', '\\string-') def babel_renewcommand(self, command, definition): + # type: (unicode, unicode) -> unicode if self.elements['babel']: prefix = '\\addto\\captions%s{' % self.babel.get_language() suffix = '}' @@ -591,6 +632,7 @@ class LaTeXTranslator(nodes.NodeVisitor): return ('%s\\renewcommand{%s}{%s}%s\n' % (prefix, command, definition, suffix)) def babel_defmacro(self, name, definition): + # type: (unicode, unicode) -> unicode if self.elements['babel']: prefix = '\\addto\\extras%s{' % self.babel.get_language() suffix = '}' @@ -601,7 +643,8 @@ class LaTeXTranslator(nodes.NodeVisitor): return ('%s\\def%s{%s}%s\n' % (prefix, name, definition, suffix)) def generate_numfig_format(self, builder): - ret = [] + # type: (Builder) -> unicode + ret = [] # type: List[unicode] figure = self.builder.config.numfig_format['figure'].split('%s', 1) if len(figure) == 1: ret.append('\\def\\fnum@figure{%s}\n' % @@ -640,7 +683,9 @@ class LaTeXTranslator(nodes.NodeVisitor): return ''.join(ret) def generate_indices(self): + # type: (Builder) -> unicode def generate(content, collapsed): + # type: (List[Tuple[unicode, List[Tuple[unicode, unicode, unicode, unicode, unicode]]]], bool) -> unicode # NOQA ret.append('\\begin{sphinxtheindex}\n') ret.append('\\def\\bigletter#1{{\\Large\\sffamily#1}' '\\nopagebreak\\vspace{1mm}}\n') @@ -685,6 +730,7 @@ class LaTeXTranslator(nodes.NodeVisitor): return ''.join(ret) def visit_document(self, node): + # type: (nodes.Node) -> None self.footnotestack.append(self.collect_footnotes(node)) self.curfilestack.append(node.get('docname', '')) if self.first_document == 1: @@ -701,8 +747,9 @@ class LaTeXTranslator(nodes.NodeVisitor): self.sectionlevel = self.top_sectionlevel - 1 def depart_document(self, node): + # type: (nodes.Node) -> None if self.bibitems: - widest_label = "" + widest_label = "" # type: unicode for bi in self.bibitems: if len(widest_label) < len(bi[0]): widest_label = bi[0] @@ -717,6 +764,7 @@ class LaTeXTranslator(nodes.NodeVisitor): self.bibitems = [] def visit_start_of_file(self, node): + # type: (nodes.Node) -> None # collect new footnotes self.footnotestack.append(self.collect_footnotes(node)) # also add a document target @@ -726,7 +774,9 @@ class LaTeXTranslator(nodes.NodeVisitor): self.hlsettingstack.append(self.hlsettingstack[0]) def collect_footnotes(self, node): + # type: (nodes.Node) -> Dict[unicode, List[Union[collected_footnote, bool]]] def footnotes_under(n): + # type: (nodes.Node) -> Iterator[nodes.Node] if isinstance(n, nodes.footnote): yield n else: @@ -735,7 +785,8 @@ class LaTeXTranslator(nodes.NodeVisitor): continue for k in footnotes_under(c): yield k - fnotes = {} + + fnotes = {} # type: Dict[unicode, List[Union[collected_footnote, bool]]] for fn in footnotes_under(node): num = fn.children[0].astext().strip() newnode = collected_footnote(*fn.children, number=num) @@ -743,15 +794,18 @@ class LaTeXTranslator(nodes.NodeVisitor): return fnotes def depart_start_of_file(self, node): + # type: (nodes.Node) -> None self.footnotestack.pop() self.curfilestack.pop() self.hlsettingstack.pop() def visit_highlightlang(self, node): + # type: (nodes.Node) -> None self.hlsettingstack[-1] = [node['lang'], node['linenothreshold']] raise nodes.SkipNode def visit_section(self, node): + # type: (nodes.Node) -> None if not self.this_is_the_title: self.sectionlevel += 1 self.body.append('\n\n') @@ -759,40 +813,50 @@ class LaTeXTranslator(nodes.NodeVisitor): self.next_section_ids.update(node['ids']) def depart_section(self, node): + # type: (nodes.Node) -> None self.sectionlevel = max(self.sectionlevel - 1, self.top_sectionlevel - 1) def visit_problematic(self, node): + # type: (nodes.Node) -> None self.body.append(r'{\color{red}\bfseries{}') def depart_problematic(self, node): + # type: (nodes.Node) -> None self.body.append('}') def visit_topic(self, node): + # type: (nodes.Node) -> None self.in_minipage = 1 self.body.append('\n\\begin{sphinxShadowBox}\n') def depart_topic(self, node): + # type: (nodes.Node) -> None self.in_minipage = 0 self.body.append('\\end{sphinxShadowBox}\n') visit_sidebar = visit_topic depart_sidebar = depart_topic def visit_glossary(self, node): + # type: (nodes.Node) -> None pass def depart_glossary(self, node): + # type: (nodes.Node) -> None pass def visit_productionlist(self, node): + # type: (nodes.Node) -> None self.body.append('\n\n\\begin{productionlist}\n') self.in_production_list = 1 def depart_productionlist(self, node): + # type: (nodes.Node) -> None self.body.append('\\end{productionlist}\n\n') self.in_production_list = 0 def visit_production(self, node): + # type: (nodes.Node) -> None if node['tokenname']: tn = node['tokenname'] self.body.append(self.hypertarget('grammar-token-' + tn)) @@ -801,15 +865,19 @@ class LaTeXTranslator(nodes.NodeVisitor): self.body.append('\\productioncont{') def depart_production(self, node): + # type: (nodes.Node) -> None self.body.append('}\n') def visit_transition(self, node): + # type: (nodes.Node) -> None self.body.append(self.elements['transition']) def depart_transition(self, node): + # type: (nodes.Node) -> None pass def visit_title(self, node): + # type: (nodes.Node) -> None parent = node.parent if isinstance(parent, addnodes.seealso): # the environment already handles this @@ -866,6 +934,7 @@ class LaTeXTranslator(nodes.NodeVisitor): self.in_title = 1 def depart_title(self, node): + # type: (nodes.Node) -> None self.in_title = 0 if isinstance(node.parent, nodes.table): self.table.caption = self.popbody() @@ -874,6 +943,7 @@ class LaTeXTranslator(nodes.NodeVisitor): self.unrestrict_footnote(node) def visit_subtitle(self, node): + # type: (nodes.Node) -> None if isinstance(node.parent, nodes.sidebar): self.body.append('\\sphinxstylesidebarsubtitle{') self.context.append('}\n') @@ -881,17 +951,21 @@ class LaTeXTranslator(nodes.NodeVisitor): self.context.append('') def depart_subtitle(self, node): + # type: (nodes.Node) -> None self.body.append(self.context.pop()) def visit_desc(self, node): + # type: (nodes.Node) -> None self.body.append('\n\n\\begin{fulllineitems}\n') if self.table: self.table.has_problematic = True def depart_desc(self, node): + # type: (nodes.Node) -> None self.body.append('\n\\end{fulllineitems}\n\n') def _visit_signature_line(self, node): + # type: (nodes.Node) -> None for child in node: if isinstance(child, addnodes.desc_parameterlist): self.body.append(r'\pysiglinewithargsret{') @@ -900,9 +974,11 @@ class LaTeXTranslator(nodes.NodeVisitor): self.body.append(r'\pysigline{') def _depart_signature_line(self, node): + # type: (nodes.Node) -> None self.body.append('}') def visit_desc_signature(self, node): + # type: (nodes.Node) -> None if node.parent['objtype'] != 'describe' and node['ids']: hyper = self.hypertarget(node['ids'][0]) else: @@ -912,55 +988,69 @@ class LaTeXTranslator(nodes.NodeVisitor): self._visit_signature_line(node) def depart_desc_signature(self, node): + # type: (nodes.Node) -> None if not node.get('is_multiline'): self._depart_signature_line(node) def visit_desc_signature_line(self, node): + # type: (nodes.Node) -> None self._visit_signature_line(node) def depart_desc_signature_line(self, node): + # type: (nodes.Node) -> None self._depart_signature_line(node) def visit_desc_addname(self, node): + # type: (nodes.Node) -> None self.body.append(r'\sphinxcode{') self.literal_whitespace += 1 def depart_desc_addname(self, node): + # type: (nodes.Node) -> None self.body.append('}') self.literal_whitespace -= 1 def visit_desc_type(self, node): + # type: (nodes.Node) -> None pass def depart_desc_type(self, node): + # type: (nodes.Node) -> None pass def visit_desc_returns(self, node): + # type: (nodes.Node) -> None self.body.append(r'{ $\rightarrow$ ') def depart_desc_returns(self, node): + # type: (nodes.Node) -> None self.body.append(r'}') def visit_desc_name(self, node): + # type: (nodes.Node) -> None self.body.append(r'\sphinxbfcode{') self.no_contractions += 1 self.literal_whitespace += 1 def depart_desc_name(self, node): + # type: (nodes.Node) -> None self.body.append('}') self.literal_whitespace -= 1 self.no_contractions -= 1 def visit_desc_parameterlist(self, node): + # type: (nodes.Node) -> None # close name, open parameterlist self.body.append('}{') self.first_param = 1 def depart_desc_parameterlist(self, node): + # type: (nodes.Node) -> None # close parameterlist, open return annotation self.body.append('}{') def visit_desc_parameter(self, node): + # type: (nodes.Node) -> None if not self.first_param: self.body.append(', ') else: @@ -969,36 +1059,46 @@ class LaTeXTranslator(nodes.NodeVisitor): self.body.append(r'\emph{') def depart_desc_parameter(self, node): + # type: (nodes.Node) -> None if not node.hasattr('noemph'): self.body.append('}') def visit_desc_optional(self, node): + # type: (nodes.Node) -> None self.body.append(r'\sphinxoptional{') def depart_desc_optional(self, node): + # type: (nodes.Node) -> None self.body.append('}') def visit_desc_annotation(self, node): + # type: (nodes.Node) -> None self.body.append(r'\sphinxstrong{') def depart_desc_annotation(self, node): + # type: (nodes.Node) -> None self.body.append('}') def visit_desc_content(self, node): + # type: (nodes.Node) -> None if node.children and not isinstance(node.children[0], nodes.paragraph): # avoid empty desc environment which causes a formatting bug self.body.append('~') def depart_desc_content(self, node): + # type: (nodes.Node) -> None pass def visit_seealso(self, node): + # type: (nodes.Node) -> None self.body.append(u'\n\n\\sphinxstrong{%s:}\n\n' % admonitionlabels['seealso']) def depart_seealso(self, node): + # type: (nodes.Node) -> None self.body.append("\n\n") def visit_rubric(self, node): + # type: (nodes.Node) -> None if len(node.children) == 1 and node.children[0].astext() in \ ('Footnotes', _('Footnotes')): raise nodes.SkipNode @@ -1007,13 +1107,16 @@ class LaTeXTranslator(nodes.NodeVisitor): self.in_title = 1 def depart_rubric(self, node): + # type: (nodes.Node) -> None self.in_title = 0 self.body.append(self.context.pop()) def visit_footnote(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_collected_footnote(self, node): + # type: (nodes.Node) -> None self.in_footnote += 1 if 'footnotetext' in node: self.body.append('%%\n\\begin{footnotetext}[%s]' @@ -1023,6 +1126,7 @@ class LaTeXTranslator(nodes.NodeVisitor): '\\sphinxAtStartFootnote\n' % node['number']) def depart_collected_footnote(self, node): + # type: (nodes.Node) -> None if 'footnotetext' in node: self.body.append('%\n\\end{footnotetext}') else: @@ -1030,6 +1134,7 @@ class LaTeXTranslator(nodes.NodeVisitor): self.in_footnote -= 1 def visit_label(self, node): + # type: (nodes.Node) -> None if isinstance(node.parent, nodes.citation): self.bibitems[-1][0] = node.astext() self.bibitems[-1][2] = self.curfilestack[-1] @@ -1037,23 +1142,26 @@ class LaTeXTranslator(nodes.NodeVisitor): raise nodes.SkipNode def visit_tabular_col_spec(self, node): + # type: (nodes.Node) -> None self.next_table_colspec = node['spec'] raise nodes.SkipNode def visit_table(self, node): + # type: (nodes.Node) -> None if self.table: raise UnsupportedError( '%s:%s: nested tables are not yet implemented.' % (self.curfilestack[-1], node.line or '')) self.table = Table() self.table.longtable = 'longtable' in node['classes'] - self.tablebody = [] - self.tableheaders = [] + self.tablebody = [] # type: List[unicode] + self.tableheaders = [] # type: List[unicode] # Redirect body output until table is finished. self.pushbody(self.tablebody) self.restrict_footnote(node) def depart_table(self, node): + # type: (nodes.Node) -> None if self.table.rowcount > 30: self.table.longtable = True self.popbody() @@ -1130,18 +1238,23 @@ class LaTeXTranslator(nodes.NodeVisitor): self.tablebody = None def visit_colspec(self, node): + # type: (nodes.Node) -> None self.table.colcount += 1 def depart_colspec(self, node): + # type: (nodes.Node) -> None pass def visit_tgroup(self, node): + # type: (nodes.Node) -> None pass def depart_tgroup(self, node): + # type: (nodes.Node) -> None pass def visit_thead(self, node): + # type: (nodes.Node) -> None self.table.had_head = True if self.next_table_colspec: self.table.colspec = '{%s}\n' % self.next_table_colspec @@ -1150,24 +1263,29 @@ class LaTeXTranslator(nodes.NodeVisitor): self.body = self.tableheaders def depart_thead(self, node): + # type: (nodes.Node) -> None pass def visit_tbody(self, node): + # type: (nodes.Node) -> None if not self.table.had_head: self.visit_thead(node) self.body = self.tablebody def depart_tbody(self, node): + # type: (nodes.Node) -> None self.remember_multirow = {} self.remember_multirowcol = {} def visit_row(self, node): + # type: (nodes.Node) -> None self.table.col = 0 for key, value in self.remember_multirow.items(): if not value and key in self.remember_multirowcol: del self.remember_multirowcol[key] def depart_row(self, node): + # type: (nodes.Node) -> None self.body.append('\\\\\n') if any(self.remember_multirow.values()): linestart = 1 @@ -1188,6 +1306,7 @@ class LaTeXTranslator(nodes.NodeVisitor): self.table.rowcount += 1 def visit_entry(self, node): + # type: (nodes.Node) -> None if self.table.col == 0: while self.remember_multirow.get(self.table.col + 1, 0): self.table.col += 1 @@ -1249,6 +1368,7 @@ class LaTeXTranslator(nodes.NodeVisitor): self.context.append(context) def depart_entry(self, node): + # type: (nodes.Node) -> None if self.in_merged_cell: self.in_merged_cell = 0 self.literal_whitespace -= 1 @@ -1262,6 +1382,7 @@ class LaTeXTranslator(nodes.NodeVisitor): self.body.append(self.context.pop()) # header def visit_acks(self, node): + # type: (nodes.Node) -> None # this is a list in the source, but should be rendered as a # comma-separated list here self.body.append('\n\n') @@ -1271,16 +1392,19 @@ class LaTeXTranslator(nodes.NodeVisitor): raise nodes.SkipNode def visit_bullet_list(self, node): + # type: (nodes.Node) -> None if not self.compact_list: self.body.append('\\begin{itemize}\n') if self.table: self.table.has_problematic = True def depart_bullet_list(self, node): + # type: (nodes.Node) -> None if not self.compact_list: self.body.append('\\end{itemize}\n') def visit_enumerated_list(self, node): + # type: (nodes.Node) -> None self.body.append('\\begin{enumerate}\n') if 'start' in node: self.body.append('\\setcounter{enumi}{%d}\n' % (node['start'] - 1)) @@ -1288,33 +1412,41 @@ class LaTeXTranslator(nodes.NodeVisitor): self.table.has_problematic = True def depart_enumerated_list(self, node): + # type: (nodes.Node) -> None self.body.append('\\end{enumerate}\n') def visit_list_item(self, node): + # type: (nodes.Node) -> None # Append "{}" in case the next character is "[", which would break # LaTeX's list environment (no numbering and the "[" is not printed). self.body.append(r'\item {} ') def depart_list_item(self, node): + # type: (nodes.Node) -> None self.body.append('\n') def visit_definition_list(self, node): + # type: (nodes.Node) -> None self.body.append('\\begin{description}\n') if self.table: self.table.has_problematic = True def depart_definition_list(self, node): + # type: (nodes.Node) -> None self.body.append('\\end{description}\n') def visit_definition_list_item(self, node): + # type: (nodes.Node) -> None pass def depart_definition_list_item(self, node): + # type: (nodes.Node) -> None pass def visit_term(self, node): + # type: (nodes.Node) -> None self.in_term += 1 - ctx = '}] \\leavevmode' + ctx = '}] \\leavevmode' # type: unicode if node.get('ids'): ctx += self.hypertarget(node['ids'][0]) self.body.append('\\item[{') @@ -1322,40 +1454,50 @@ class LaTeXTranslator(nodes.NodeVisitor): self.context.append(ctx) def depart_term(self, node): + # type: (nodes.Node) -> None self.body.append(self.context.pop()) self.unrestrict_footnote(node) self.in_term -= 1 def visit_termsep(self, node): + # type: (nodes.Node) -> None warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.5', DeprecationWarning) self.body.append(', ') raise nodes.SkipNode def visit_classifier(self, node): + # type: (nodes.Node) -> None self.body.append('{[}') def depart_classifier(self, node): + # type: (nodes.Node) -> None self.body.append('{]}') def visit_definition(self, node): + # type: (nodes.Node) -> None pass def depart_definition(self, node): + # type: (nodes.Node) -> None self.body.append('\n') def visit_field_list(self, node): + # type: (nodes.Node) -> None self.body.append('\\begin{quote}\\begin{description}\n') if self.table: self.table.has_problematic = True def depart_field_list(self, node): + # type: (nodes.Node) -> None self.body.append('\\end{description}\\end{quote}\n') def visit_field(self, node): + # type: (nodes.Node) -> None pass def depart_field(self, node): + # type: (nodes.Node) -> None pass visit_field_name = visit_term @@ -1365,6 +1507,7 @@ class LaTeXTranslator(nodes.NodeVisitor): depart_field_body = depart_definition def visit_paragraph(self, node): + # type: (nodes.Node) -> None index = node.parent.index(node) if (index > 0 and isinstance(node.parent, nodes.compound) and not isinstance(node.parent[index - 1], nodes.paragraph) and @@ -1378,17 +1521,21 @@ class LaTeXTranslator(nodes.NodeVisitor): self.body.append('\n') def depart_paragraph(self, node): + # type: (nodes.Node) -> None self.body.append('\n') def visit_centered(self, node): + # type: (nodes.Node) -> None self.body.append('\n\\begin{center}') if self.table: self.table.has_problematic = True def depart_centered(self, node): + # type: (nodes.Node) -> None self.body.append('\n\\end{center}') def visit_hlist(self, node): + # type: (nodes.Node) -> None # for now, we don't support a more compact list format # don't add individual itemize environments, but one for all columns self.compact_list += 1 @@ -1398,26 +1545,32 @@ class LaTeXTranslator(nodes.NodeVisitor): self.table.has_problematic = True def depart_hlist(self, node): + # type: (nodes.Node) -> None self.compact_list -= 1 self.body.append('\\end{itemize}\n') def visit_hlistcol(self, node): + # type: (nodes.Node) -> None pass def depart_hlistcol(self, node): + # type: (nodes.Node) -> None pass def latex_image_length(self, width_str): + # type: (nodes.Node) -> unicode try: return rstdim_to_latexdim(width_str) except ValueError: self.builder.warn('dimension unit %s is invalid. Ignored.' % width_str) def is_inline(self, node): + # type: (nodes.Node) -> bool """Check whether a node represents an inline element.""" return isinstance(node.parent, nodes.TextElement) def visit_image(self, node): + # type: (nodes.Node) -> None attrs = node.attributes pre = [] # in reverse order post = [] @@ -1490,10 +1643,12 @@ class LaTeXTranslator(nodes.NodeVisitor): self.body.extend(post) def depart_image(self, node): + # type: (nodes.Node) -> None pass def visit_figure(self, node): - ids = '' + # type: (nodes.Node) -> None + ids = '' # type: unicode for id in self.pop_hyperlink_ids('figure'): ids += self.hypertarget(id, anchor=False) if node['ids']: @@ -1549,10 +1704,12 @@ class LaTeXTranslator(nodes.NodeVisitor): self.context.append(ids + align_end + '\\end{figure}\n') def depart_figure(self, node): + # type: (nodes.Node) -> None self.body.append(self.context.pop()) self.unrestrict_footnote(node) def visit_caption(self, node): + # type: (nodes.Node) -> None self.in_caption += 1 self.restrict_footnote(node) if self.in_container_literal_block: @@ -1565,29 +1722,36 @@ class LaTeXTranslator(nodes.NodeVisitor): self.body.append('\\caption{') def depart_caption(self, node): + # type: (nodes.Node) -> None self.body.append('}') self.in_caption -= 1 self.unrestrict_footnote(node) def visit_legend(self, node): + # type: (nodes.Node) -> None self.body.append('{\\small ') def depart_legend(self, node): + # type: (nodes.Node) -> None self.body.append('}') def visit_admonition(self, node): + # type: (nodes.Node) -> None self.body.append('\n\\begin{sphinxadmonition}{note}') def depart_admonition(self, node): + # type: (nodes.Node) -> None self.body.append('\\end{sphinxadmonition}\n') def _make_visit_admonition(name): def visit_admonition(self, node): + # type: (nodes.Node) -> None self.body.append(u'\n\\begin{sphinxadmonition}{%s}{%s:}' % (name, admonitionlabels[name])) return visit_admonition def _depart_named_admonition(self, node): + # type: (nodes.Node) -> None self.body.append('\\end{sphinxadmonition}\n') visit_attention = _make_visit_admonition('attention') @@ -1610,13 +1774,17 @@ class LaTeXTranslator(nodes.NodeVisitor): depart_warning = _depart_named_admonition def visit_versionmodified(self, node): + # type: (nodes.Node) -> None pass def depart_versionmodified(self, node): + # type: (nodes.Node) -> None pass def visit_target(self, node): + # type: (nodes.Node) -> None def add_target(id): + # type: (unicode) -> None # indexing uses standard LaTeX index markup, so the targets # will be generated differently if id.startswith('index-'): @@ -1664,16 +1832,20 @@ class LaTeXTranslator(nodes.NodeVisitor): add_target(id) def depart_target(self, node): + # type: (nodes.Node) -> None pass def visit_attribution(self, node): + # type: (nodes.Node) -> None self.body.append('\n\\begin{flushright}\n') self.body.append('---') def depart_attribution(self, node): + # type: (nodes.Node) -> None self.body.append('\n\\end{flushright}\n') def visit_index(self, node, scre=re.compile(r';\s*')): + # type: (nodes.Node, Pattern) -> None if not node.get('inline', True): self.body.append('\n') entries = node['entries'] @@ -1710,11 +1882,13 @@ class LaTeXTranslator(nodes.NodeVisitor): raise nodes.SkipNode def visit_raw(self, node): + # type: (nodes.Node) -> None if 'latex' in node.get('format', '').split(): self.body.append(node.astext()) raise nodes.SkipNode def visit_reference(self, node): + # type: (nodes.Node) -> None if not self.in_title: for id in node.get('ids'): anchor = not self.in_caption @@ -1773,9 +1947,11 @@ class LaTeXTranslator(nodes.NodeVisitor): self.context.append('') def depart_reference(self, node): + # type: (nodes.Node) -> None self.body.append(self.context.pop()) def visit_number_reference(self, node): + # type: (nodes.Node) -> None if node.get('refid'): id = self.curfilestack[-1] + ':' + node['refid'] else: @@ -1797,46 +1973,59 @@ class LaTeXTranslator(nodes.NodeVisitor): raise nodes.SkipNode def visit_download_reference(self, node): + # type: (nodes.Node) -> None pass def depart_download_reference(self, node): + # type: (nodes.Node) -> None pass def visit_pending_xref(self, node): + # type: (nodes.Node) -> None pass def depart_pending_xref(self, node): + # type: (nodes.Node) -> None pass def visit_emphasis(self, node): + # type: (nodes.Node) -> None self.body.append(r'\sphinxstyleemphasis{') def depart_emphasis(self, node): + # type: (nodes.Node) -> None self.body.append('}') def visit_literal_emphasis(self, node): + # type: (nodes.Node) -> None self.body.append(r'\sphinxstyleliteralemphasis{') self.no_contractions += 1 def depart_literal_emphasis(self, node): + # type: (nodes.Node) -> None self.body.append('}') self.no_contractions -= 1 def visit_strong(self, node): + # type: (nodes.Node) -> None self.body.append(r'\sphinxstylestrong{') def depart_strong(self, node): + # type: (nodes.Node) -> None self.body.append('}') def visit_literal_strong(self, node): + # type: (nodes.Node) -> None self.body.append(r'\sphinxstyleliteralstrong{') self.no_contractions += 1 def depart_literal_strong(self, node): + # type: (nodes.Node) -> None self.body.append('}') self.no_contractions -= 1 def visit_abbreviation(self, node): + # type: (nodes.Node) -> None abbr = node.astext() self.body.append(r'\sphinxstyleabbreviation{') # spell out the explanation once @@ -1847,39 +2036,48 @@ class LaTeXTranslator(nodes.NodeVisitor): self.context.append('}') def depart_abbreviation(self, node): + # type: (nodes.Node) -> None self.body.append(self.context.pop()) def visit_manpage(self, node): + # type: (nodes.Node) -> Any return self.visit_literal_emphasis(node) def depart_manpage(self, node): + # type: (nodes.Node) -> Any return self.depart_literal_emphasis(node) def visit_title_reference(self, node): + # type: (nodes.Node) -> None self.body.append(r'\sphinxtitleref{') def depart_title_reference(self, node): + # type: (nodes.Node) -> None self.body.append('}') def visit_citation(self, node): + # type: (nodes.Node) -> None # TODO maybe use cite bibitems # bibitem: [citelabel, citetext, docname, citeid] self.bibitems.append(['', '', '', '']) self.context.append(len(self.body)) def depart_citation(self, node): + # type: (nodes.Node) -> None size = self.context.pop() text = ''.join(self.body[size:]) del self.body[size:] self.bibitems[-1][1] = text def visit_citation_reference(self, node): + # type: (nodes.Node) -> None # This is currently never encountered, since citation_reference nodes # are already replaced by pending_xref nodes in the environment. self.body.append('\\cite{%s}' % self.idescape(node.astext())) raise nodes.SkipNode def visit_literal(self, node): + # type: (nodes.Node) -> None self.no_contractions += 1 if self.in_title: self.body.append(r'\sphinxstyleliteralintitle{') @@ -1887,10 +2085,12 @@ class LaTeXTranslator(nodes.NodeVisitor): self.body.append(r'\sphinxcode{') def depart_literal(self, node): + # type: (nodes.Node) -> None self.no_contractions -= 1 self.body.append('}') def visit_footnote_reference(self, node): + # type: (nodes.Node) -> None num = node.astext().strip() try: footnode, used = self.footnotestack[-1][num] @@ -1906,18 +2106,20 @@ class LaTeXTranslator(nodes.NodeVisitor): self.pending_footnotes.append(footnode) else: self.footnotestack[-1][num][1] = True - footnode.walkabout(self) + footnode.walkabout(self) # type: ignore raise nodes.SkipChildren def depart_footnote_reference(self, node): + # type: (nodes.Node) -> None pass def visit_literal_block(self, node): + # type: (nodes.Node) -> None if node.rawsource != node.astext(): # most probably a parsed-literal block -- don't highlight self.body.append('\\begin{alltt}\n') else: - ids = '' + ids = '' # type: unicode for id in self.pop_hyperlink_ids('code-block'): ids += self.hypertarget(id, anchor=False) if node['ids']: @@ -1943,6 +2145,7 @@ class LaTeXTranslator(nodes.NodeVisitor): opts = {} def warner(msg): + # type: (unicode) -> None self.builder.warn(msg, (self.curfilestack[-1], node.line)) hlcode = self.highlighter.highlight_block(code, lang, opts=opts, warn=warner, linenos=linenos, @@ -1974,17 +2177,21 @@ class LaTeXTranslator(nodes.NodeVisitor): raise nodes.SkipNode def depart_literal_block(self, node): + # type: (nodes.Node) -> None self.body.append('\n\\end{alltt}\n') visit_doctest_block = visit_literal_block depart_doctest_block = depart_literal_block def visit_line(self, node): + # type: (nodes.Node) -> None self.body.append('\item[] ') def depart_line(self, node): + # type: (nodes.Node) -> None self.body.append('\n') def visit_line_block(self, node): + # type: (nodes.Node) -> None if isinstance(node.parent, nodes.line_block): self.body.append('\\item[]\n' '\\begin{DUlineblock}{\\DUlineblockindent}\n') @@ -1994,9 +2201,11 @@ class LaTeXTranslator(nodes.NodeVisitor): self.table.has_problematic = True def depart_line_block(self, node): + # type: (nodes.Node) -> None self.body.append('\\end{DUlineblock}\n') def visit_block_quote(self, node): + # type: (nodes.Node) -> None # If the block quote contains a single object and that object # is a list, then generate a list not a block quote. # This lets us indent lists. @@ -2012,6 +2221,7 @@ class LaTeXTranslator(nodes.NodeVisitor): self.table.has_problematic = True def depart_block_quote(self, node): + # type: (nodes.Node) -> None done = 0 if len(node.children) == 1: child = node.children[0] @@ -2024,45 +2234,56 @@ class LaTeXTranslator(nodes.NodeVisitor): # option node handling copied from docutils' latex writer def visit_option(self, node): + # type: (nodes.Node) -> None if self.context[-1]: # this is not the first option self.body.append(', ') def depart_option(self, node): + # type: (nodes.Node) -> None # flag that the first option is done. self.context[-1] += 1 def visit_option_argument(self, node): + # type: (nodes.Node) -> None """The delimiter betweeen an option and its argument.""" self.body.append(node.get('delimiter', ' ')) def depart_option_argument(self, node): + # type: (nodes.Node) -> None pass def visit_option_group(self, node): + # type: (nodes.Node) -> None self.body.append('\\item [') # flag for first option self.context.append(0) def depart_option_group(self, node): + # type: (nodes.Node) -> None self.context.pop() # the flag self.body.append('] ') def visit_option_list(self, node): + # type: (nodes.Node) -> None self.body.append('\\begin{optionlist}{3cm}\n') if self.table: self.table.has_problematic = True def depart_option_list(self, node): + # type: (nodes.Node) -> None self.body.append('\\end{optionlist}\n') def visit_option_list_item(self, node): + # type: (nodes.Node) -> None pass def depart_option_list_item(self, node): + # type: (nodes.Node) -> None pass def visit_option_string(self, node): + # type: (nodes.Node) -> None ostring = node.astext() self.no_contractions += 1 self.body.append(self.encode(ostring)) @@ -2070,30 +2291,39 @@ class LaTeXTranslator(nodes.NodeVisitor): raise nodes.SkipNode def visit_description(self, node): + # type: (nodes.Node) -> None self.body.append(' ') def depart_description(self, node): + # type: (nodes.Node) -> None pass def visit_superscript(self, node): + # type: (nodes.Node) -> None self.body.append('$^{\\text{') def depart_superscript(self, node): + # type: (nodes.Node) -> None self.body.append('}}$') def visit_subscript(self, node): + # type: (nodes.Node) -> None self.body.append('$_{\\text{') def depart_subscript(self, node): + # type: (nodes.Node) -> None self.body.append('}}$') def visit_substitution_definition(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_substitution_reference(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_inline(self, node): + # type: (nodes.Node) -> None classes = node.get('classes', []) if classes in [['menuselection'], ['guilabel']]: self.body.append(r'\sphinxmenuselection{') @@ -2108,24 +2338,30 @@ class LaTeXTranslator(nodes.NodeVisitor): self.context.append('') def depart_inline(self, node): + # type: (nodes.Node) -> None self.body.append(self.context.pop()) def visit_generated(self, node): + # type: (nodes.Node) -> None pass def depart_generated(self, node): + # type: (nodes.Node) -> None pass def visit_compound(self, node): + # type: (nodes.Node) -> None pass def depart_compound(self, node): + # type: (nodes.Node) -> None pass def visit_container(self, node): + # type: (nodes.Node) -> None if node.get('literal_block'): self.in_container_literal_block += 1 - ids = '' + ids = '' # type: unicode for id in self.pop_hyperlink_ids('code-block'): ids += self.hypertarget(id, anchor=False) if node['ids']: @@ -2136,31 +2372,38 @@ class LaTeXTranslator(nodes.NodeVisitor): self.body.append('\n\\def\\sphinxLiteralBlockLabel{' + ids + '}\n') def depart_container(self, node): + # type: (nodes.Node) -> None if node.get('literal_block'): self.in_container_literal_block -= 1 self.body.append('\\let\\sphinxVerbatimTitle\\empty\n') self.body.append('\\let\\sphinxLiteralBlockLabel\\empty\n') def visit_decoration(self, node): + # type: (nodes.Node) -> None pass def depart_decoration(self, node): + # type: (nodes.Node) -> None pass # docutils-generated elements that we don't support def visit_header(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_footer(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_docinfo(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode # text handling def encode(self, text): + # type: (unicode) -> unicode text = text_type(text).translate(tex_escape_map) if self.literal_whitespace: # Insert a blank before the newline, to avoid @@ -2172,32 +2415,40 @@ class LaTeXTranslator(nodes.NodeVisitor): return text def encode_uri(self, text): + # type: (unicode) -> unicode # in \href, the tilde is allowed and must be represented literally return self.encode(text).replace('\\textasciitilde{}', '~') def visit_Text(self, node): + # type: (nodes.Node) -> None text = self.encode(node.astext()) if not self.no_contractions: text = educate_quotes_latex(text) self.body.append(text) def depart_Text(self, node): + # type: (nodes.Node) -> None pass def visit_comment(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_meta(self, node): + # type: (nodes.Node) -> None # only valid for HTML raise nodes.SkipNode def visit_system_message(self, node): + # type: (nodes.Node) -> None pass def depart_system_message(self, node): + # type: (nodes.Node) -> None self.body.append('\n') def visit_math(self, node): + # type: (nodes.Node) -> None self.builder.warn('using "math" markup without a Sphinx math extension ' 'active, please use one of the math extensions ' 'described at http://sphinx-doc.org/ext/math.html', @@ -2207,4 +2458,5 @@ class LaTeXTranslator(nodes.NodeVisitor): visit_math_block = visit_math def unknown_visit(self, node): + # type: (nodes.Node) -> None raise NotImplementedError('Unknown node: ' + node.__class__.__name__) diff --git a/sphinx/writers/texinfo.py b/sphinx/writers/texinfo.py index 6ec077fd7..0a9a42aca 100644 --- a/sphinx/writers/texinfo.py +++ b/sphinx/writers/texinfo.py @@ -16,6 +16,7 @@ import warnings from six import itervalues from six.moves import range + from docutils import nodes, writers from sphinx import addnodes, __display_version__ @@ -23,6 +24,11 @@ from sphinx.locale import admonitionlabels, _ from sphinx.util.i18n import format_date from sphinx.writers.latex import collected_footnote +if False: + # For type annotation + from typing import Any, Callable, Iterator, Pattern, Tuple, Union # NOQA + from sphinx.builders.texinfo import TexinfoBuilder # NOQA + COPYING = """\ @quotation @@ -80,6 +86,7 @@ TEMPLATE = """\ def find_subsections(section): + # type: (nodes.Node) -> List[nodes.Node] """Return a list of subsections for the given ``section``.""" result = [] for child in section.children: @@ -91,6 +98,7 @@ def find_subsections(section): def smart_capwords(s, sep=None): + # type: (unicode, unicode) -> unicode """Like string.capwords() but does not capitalize words that already contain a capital letter.""" words = s.split(sep) @@ -110,21 +118,23 @@ class TexinfoWriter(writers.Writer): ('Dir entry', ['--texinfo-dir-entry'], {'default': ''}), ('Description', ['--texinfo-dir-description'], {'default': ''}), ('Category', ['--texinfo-dir-category'], {'default': - 'Miscellaneous'}))) + 'Miscellaneous'}))) # type: Tuple[unicode, Any, Tuple[Tuple[unicode, List[unicode], Dict[unicode, unicode]], ...]] # NOQA - settings_defaults = {} + settings_defaults = {} # type: Dict - output = None + output = None # type: unicode visitor_attributes = ('output', 'fragment') def __init__(self, builder): + # type: (TexinfoBuilder) -> None writers.Writer.__init__(self) self.builder = builder self.translator_class = ( self.builder.translator_class or TexinfoTranslator) def translate(self): + # type: () -> None self.visitor = visitor = self.translator_class( self.document, self.builder) self.document.walkabout(visitor) @@ -153,44 +163,53 @@ class TexinfoTranslator(nodes.NodeVisitor): } def __init__(self, document, builder): + # type: (nodes.Node, TexinfoBuilder) -> None nodes.NodeVisitor.__init__(self, document) self.builder = builder self.init_settings() - self.written_ids = set() # node names and anchors in output + self.written_ids = set() # type: Set[unicode] + # node names and anchors in output # node names and anchors that should be in output - self.referenced_ids = set() - self.indices = [] # (node name, content) - self.short_ids = {} # anchors --> short ids - self.node_names = {} # node name --> node's name to display - self.node_menus = {} # node name --> node's menu entries - self.rellinks = {} # node name --> (next, previous, up) + self.referenced_ids = set() # type: Set[unicode] + self.indices = [] # type: List[Tuple[unicode, unicode]] + # (node name, content) + self.short_ids = {} # type: Dict[unicode, unicode] + # anchors --> short ids + self.node_names = {} # type: Dict[unicode, unicode] + # node name --> node's name to display + self.node_menus = {} # type: Dict[unicode, List[unicode]] + # node name --> node's menu entries + self.rellinks = {} # type: Dict[unicode, List[unicode]] + # node name --> (next, previous, up) self.collect_indices() self.collect_node_names() self.collect_node_menus() self.collect_rellinks() - self.body = [] - self.context = [] - self.previous_section = None + self.body = [] # type: List[unicode] + self.context = [] # type: List[unicode] + self.previous_section = None # type: nodes.section self.section_level = 0 self.seen_title = False - self.next_section_ids = set() + self.next_section_ids = set() # type: Set[unicode] self.escape_newlines = 0 self.escape_hyphens = 0 - self.curfilestack = [] - self.footnotestack = [] + self.curfilestack = [] # type: List[unicode] + self.footnotestack = [] # type: List[Dict[unicode, List[Union[collected_footnote, bool]]]] # NOQA self.in_footnote = 0 - self.handled_abbrs = set() + self.handled_abbrs = set() # type: Set[unicode] + self.colwidths = None # type: List[int] def finish(self): + # type: () -> None if self.previous_section is None: self.add_menu('Top') for index in self.indices: name, content = index pointers = tuple([name] + self.rellinks[name]) - self.body.append('\n@node %s,%s,%s,%s\n' % pointers) + self.body.append('\n@node %s,%s,%s,%s\n' % pointers) # type: ignore self.body.append('@unnumbered %s\n\n%s\n' % (name, content)) while self.referenced_ids: @@ -206,6 +225,7 @@ class TexinfoTranslator(nodes.NodeVisitor): # -- Helper routines def init_settings(self): + # type: () -> None settings = self.settings = self.document.settings elements = self.elements = self.default_elements.copy() elements.update({ @@ -222,17 +242,18 @@ class TexinfoTranslator(nodes.NodeVisitor): language=self.builder.config.language)) }) # title - title = elements['title'] + title = None # type: unicode + title = elements['title'] # type: ignore if not title: - title = self.document.next_node(nodes.title) - title = (title and title.astext()) or '<untitled>' + title = self.document.next_node(nodes.title) # type: ignore + title = (title and title.astext()) or '<untitled>' # type: ignore elements['title'] = self.escape_id(title) or '<untitled>' # filename if not elements['filename']: elements['filename'] = self.document.get('source') or 'untitled' - if elements['filename'][-4:] in ('.txt', '.rst'): - elements['filename'] = elements['filename'][:-4] - elements['filename'] += '.info' + if elements['filename'][-4:] in ('.txt', '.rst'): # type: ignore + elements['filename'] = elements['filename'][:-4] # type: ignore + elements['filename'] += '.info' # type: ignore # direntry if settings.texinfo_dir_entry: entry = self.format_menu_entry( @@ -249,11 +270,13 @@ class TexinfoTranslator(nodes.NodeVisitor): elements.update(settings.texinfo_elements) def collect_node_names(self): + # type: () -> None """Generates a unique id for each section. Assigns the attribute ``node_name`` to each section.""" def add_node_name(name): + # type: (unicode) -> unicode node_id = self.escape_id(name) nth, suffix = 1, '' while node_id + suffix in self.written_ids or \ @@ -279,6 +302,7 @@ class TexinfoTranslator(nodes.NodeVisitor): section['node_name'] = add_node_name(name) def collect_node_menus(self): + # type: () -> None """Collect the menu entries for each "node" section.""" node_menus = self.node_menus for node in ([self.document] + @@ -303,6 +327,7 @@ class TexinfoTranslator(nodes.NodeVisitor): node_menus['Top'].append(name) def collect_rellinks(self): + # type: () -> None """Collect the relative links (next, previous, up) for each "node".""" rellinks = self.rellinks node_menus = self.node_menus @@ -336,6 +361,7 @@ class TexinfoTranslator(nodes.NodeVisitor): # characters. def escape(self, s): + # type: (unicode) -> unicode """Return a string with Texinfo command characters escaped.""" s = s.replace('@', '@@') s = s.replace('{', '@{') @@ -346,6 +372,7 @@ class TexinfoTranslator(nodes.NodeVisitor): return s def escape_arg(self, s): + # type: (unicode) -> unicode """Return an escaped string suitable for use as an argument to a Texinfo command.""" s = self.escape(s) @@ -356,6 +383,7 @@ class TexinfoTranslator(nodes.NodeVisitor): return s def escape_id(self, s): + # type: (unicode) -> unicode """Return an escaped string suitable for node names and anchors.""" bad_chars = ',:.()' for bc in bad_chars: @@ -364,6 +392,7 @@ class TexinfoTranslator(nodes.NodeVisitor): return self.escape(s) def escape_menu(self, s): + # type: (unicode) -> unicode """Return an escaped string suitable for menu entries.""" s = self.escape_arg(s) s = s.replace(':', ';') @@ -371,11 +400,13 @@ class TexinfoTranslator(nodes.NodeVisitor): return s def ensure_eol(self): + # type: () -> None """Ensure the last line in body is terminated by new line.""" if self.body and self.body[-1][-1:] != '\n': self.body.append('\n') def format_menu_entry(self, name, node_name, desc): + # type: (unicode, unicode, unicode) -> unicode if name == node_name: s = '* %s:: ' % (name,) else: @@ -386,6 +417,7 @@ class TexinfoTranslator(nodes.NodeVisitor): return s + wdesc.strip() + '\n' def add_menu_entries(self, entries, reg=re.compile(r'\s+---?\s+')): + # type: (List[unicode], Pattern) -> None for entry in entries: name = self.node_names[entry] # special formatting for entries that are divided by an em-dash @@ -403,6 +435,7 @@ class TexinfoTranslator(nodes.NodeVisitor): self.body.append(self.format_menu_entry(name, entry, desc)) def add_menu(self, node_name): + # type: (unicode) -> None entries = self.node_menus[node_name] if not entries: return @@ -415,6 +448,7 @@ class TexinfoTranslator(nodes.NodeVisitor): return def _add_detailed_menu(name): + # type: (unicode) -> None entries = self.node_menus[name] if not entries: return @@ -431,6 +465,7 @@ class TexinfoTranslator(nodes.NodeVisitor): '@end menu\n') def tex_image_length(self, width_str): + # type: (unicode) -> unicode match = re.match('(\d*\.?\d*)\s*(\S*)', width_str) if not match: # fallback @@ -446,15 +481,17 @@ class TexinfoTranslator(nodes.NodeVisitor): return res def collect_indices(self): + # type: () -> None def generate(content, collapsed): - ret = ['\n@menu\n'] + # type: (List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool) -> unicode + ret = ['\n@menu\n'] # type: List[unicode] for letter, entries in content: for entry in entries: if not entry[3]: continue - name = self.escape_menu(entry[0]) + name = self.escape_menu(entry[0]) # type: ignore sid = self.get_short_id('%s:%s' % (entry[2], entry[3])) - desc = self.escape_arg(entry[6]) + desc = self.escape_arg(entry[6]) # type: ignore me = self.format_menu_entry(name, sid, desc) ret.append(me) ret.append('@end menu\n') @@ -484,7 +521,9 @@ class TexinfoTranslator(nodes.NodeVisitor): # TODO: move this to sphinx.util def collect_footnotes(self, node): + # type: (nodes.Node) -> Dict[unicode, List[Union[collected_footnote, bool]]] def footnotes_under(n): + # type: (nodes.Node) -> Iterator[nodes.footnote] if isinstance(n, nodes.footnote): yield n else: @@ -493,7 +532,7 @@ class TexinfoTranslator(nodes.NodeVisitor): continue for k in footnotes_under(c): yield k - fnotes = {} + fnotes = {} # type: Dict[unicode, List[Union[collected_footnote, bool]]] for fn in footnotes_under(node): num = fn.children[0].astext().strip() fnotes[num] = [collected_footnote(*fn.children), False] @@ -502,6 +541,7 @@ class TexinfoTranslator(nodes.NodeVisitor): # -- xref handling def get_short_id(self, id): + # type: (unicode) -> unicode """Return a shorter 'id' associated with ``id``.""" # Shorter ids improve paragraph filling in places # that the id is hidden by Emacs. @@ -513,6 +553,7 @@ class TexinfoTranslator(nodes.NodeVisitor): return sid def add_anchor(self, id, node): + # type: (unicode, nodes.Node) -> None if id.startswith('index-'): return id = self.curfilestack[-1] + ':' + id @@ -524,6 +565,7 @@ class TexinfoTranslator(nodes.NodeVisitor): self.written_ids.add(id) def add_xref(self, id, name, node): + # type: (unicode, unicode, nodes.Node) -> None name = self.escape_menu(name) sid = self.get_short_id(id) self.body.append('@ref{%s,,%s}' % (sid, name)) @@ -533,16 +575,19 @@ class TexinfoTranslator(nodes.NodeVisitor): # -- Visiting def visit_document(self, node): + # type: (nodes.Node) -> None self.footnotestack.append(self.collect_footnotes(node)) self.curfilestack.append(node.get('docname', '')) if 'docname' in node: self.add_anchor(':doc', node) def depart_document(self, node): + # type: (nodes.Node) -> None self.footnotestack.pop() self.curfilestack.pop() def visit_Text(self, node): + # type: (nodes.Node) -> None s = self.escape(node.astext()) if self.escape_newlines: s = s.replace('\n', ' ') @@ -552,9 +597,11 @@ class TexinfoTranslator(nodes.NodeVisitor): self.body.append(s) def depart_Text(self, node): + # type: (nodes.Node) -> None pass def visit_section(self, node): + # type: (nodes.section) -> None self.next_section_ids.update(node.get('ids', [])) if not self.seen_title: return @@ -565,7 +612,7 @@ class TexinfoTranslator(nodes.NodeVisitor): node_name = node['node_name'] pointers = tuple([node_name] + self.rellinks[node_name]) - self.body.append('\n@node %s,%s,%s,%s\n' % pointers) + self.body.append('\n@node %s,%s,%s,%s\n' % pointers) # type: ignore for id in self.next_section_ids: self.add_anchor(id, node) @@ -574,6 +621,7 @@ class TexinfoTranslator(nodes.NodeVisitor): self.section_level += 1 def depart_section(self, node): + # type: (nodes.Node) -> None self.section_level -= 1 headings = ( @@ -582,15 +630,16 @@ class TexinfoTranslator(nodes.NodeVisitor): '@section', '@subsection', '@subsubsection', - ) + ) # type: Tuple[unicode, ...] rubrics = ( '@heading', '@subheading', '@subsubheading', - ) + ) # type: Tuple[unicode, ...] def visit_title(self, node): + # type: (nodes.Node) -> None if not self.seen_title: self.seen_title = 1 raise nodes.SkipNode @@ -612,9 +661,11 @@ class TexinfoTranslator(nodes.NodeVisitor): self.body.append('\n%s ' % heading) def depart_title(self, node): + # type: (nodes.Node) -> None self.body.append('\n\n') def visit_rubric(self, node): + # type: (nodes.Node) -> None if len(node.children) == 1 and node.children[0].astext() in \ ('Footnotes', _('Footnotes')): raise nodes.SkipNode @@ -625,17 +676,21 @@ class TexinfoTranslator(nodes.NodeVisitor): self.body.append('\n%s ' % rubric) def depart_rubric(self, node): + # type: (nodes.Node) -> None self.body.append('\n\n') def visit_subtitle(self, node): + # type: (nodes.Node) -> None self.body.append('\n\n@noindent\n') def depart_subtitle(self, node): + # type: (nodes.Node) -> None self.body.append('\n\n') # -- References def visit_target(self, node): + # type: (nodes.Node) -> None # postpone the labels until after the sectioning command parindex = node.parent.index(node) try: @@ -660,9 +715,11 @@ class TexinfoTranslator(nodes.NodeVisitor): self.add_anchor(id, node) def depart_target(self, node): + # type: (nodes.Node) -> None pass def visit_reference(self, node): + # type: (nodes.Node) -> None # an xref's target is displayed in Info so we ignore a few # cases for the sake of appearance if isinstance(node.parent, (nodes.title, addnodes.desc_type)): @@ -726,14 +783,17 @@ class TexinfoTranslator(nodes.NodeVisitor): raise nodes.SkipNode def depart_reference(self, node): + # type: (nodes.Node) -> None pass def visit_number_reference(self, node): + # type: (nodes.Node) -> None text = nodes.Text(node.get('title', '#')) self.visit_Text(text) raise nodes.SkipNode def visit_title_reference(self, node): + # type: (nodes.Node) -> None text = node.astext() self.body.append('@cite{%s}' % self.escape_arg(text)) raise nodes.SkipNode @@ -741,22 +801,28 @@ class TexinfoTranslator(nodes.NodeVisitor): # -- Blocks def visit_paragraph(self, node): + # type: (nodes.Node) -> None self.body.append('\n') def depart_paragraph(self, node): + # type: (nodes.Node) -> None self.body.append('\n') def visit_block_quote(self, node): + # type: (nodes.Node) -> None self.body.append('\n@quotation\n') def depart_block_quote(self, node): + # type: (nodes.Node) -> None self.ensure_eol() self.body.append('@end quotation\n') def visit_literal_block(self, node): + # type: (nodes.Node) -> None self.body.append('\n@example\n') def depart_literal_block(self, node): + # type: (nodes.Node) -> None self.ensure_eol() self.body.append('@end example\n') @@ -764,101 +830,126 @@ class TexinfoTranslator(nodes.NodeVisitor): depart_doctest_block = depart_literal_block def visit_line_block(self, node): + # type: (nodes.Node) -> None if not isinstance(node.parent, nodes.line_block): self.body.append('\n\n') self.body.append('@display\n') def depart_line_block(self, node): + # type: (nodes.Node) -> None self.body.append('@end display\n') if not isinstance(node.parent, nodes.line_block): self.body.append('\n\n') def visit_line(self, node): + # type: (nodes.Node) -> None self.escape_newlines += 1 def depart_line(self, node): + # type: (nodes.Node) -> None self.body.append('@w{ }\n') self.escape_newlines -= 1 # -- Inline def visit_strong(self, node): + # type: (nodes.Node) -> None self.body.append('@strong{') def depart_strong(self, node): + # type: (nodes.Node) -> None self.body.append('}') def visit_emphasis(self, node): + # type: (nodes.Node) -> None self.body.append('@emph{') def depart_emphasis(self, node): + # type: (nodes.Node) -> None self.body.append('}') def visit_literal(self, node): + # type: (nodes.Node) -> None self.body.append('@code{') def depart_literal(self, node): + # type: (nodes.Node) -> None self.body.append('}') def visit_superscript(self, node): + # type: (nodes.Node) -> None self.body.append('@w{^') def depart_superscript(self, node): + # type: (nodes.Node) -> None self.body.append('}') def visit_subscript(self, node): + # type: (nodes.Node) -> None self.body.append('@w{[') def depart_subscript(self, node): + # type: (nodes.Node) -> None self.body.append(']}') # -- Footnotes def visit_footnote(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_collected_footnote(self, node): + # type: (nodes.Node) -> None self.in_footnote += 1 self.body.append('@footnote{') def depart_collected_footnote(self, node): + # type: (nodes.Node) -> None self.body.append('}') self.in_footnote -= 1 def visit_footnote_reference(self, node): + # type: (nodes.Node) -> None num = node.astext().strip() try: footnode, used = self.footnotestack[-1][num] except (KeyError, IndexError): raise nodes.SkipNode # footnotes are repeated for each reference - footnode.walkabout(self) + footnode.walkabout(self) # type: ignore raise nodes.SkipChildren def visit_citation(self, node): + # type: (nodes.Node) -> None for id in node.get('ids'): self.add_anchor(id, node) def depart_citation(self, node): + # type: (nodes.Node) -> None pass def visit_citation_reference(self, node): + # type: (nodes.Node) -> None self.body.append('@w{[') def depart_citation_reference(self, node): + # type: (nodes.Node) -> None self.body.append(']}') # -- Lists def visit_bullet_list(self, node): + # type: (nodes.Node) -> None bullet = node.get('bullet', '*') self.body.append('\n\n@itemize %s\n' % bullet) def depart_bullet_list(self, node): + # type: (nodes.Node) -> None self.ensure_eol() self.body.append('@end itemize\n') def visit_enumerated_list(self, node): + # type: (nodes.Node) -> None # doesn't support Roman numerals enum = node.get('enumtype', 'arabic') starters = {'arabic': '', @@ -868,75 +959,96 @@ class TexinfoTranslator(nodes.NodeVisitor): self.body.append('\n\n@enumerate %s\n' % start) def depart_enumerated_list(self, node): + # type: (nodes.Node) -> None self.ensure_eol() self.body.append('@end enumerate\n') def visit_list_item(self, node): + # type: (nodes.Node) -> None self.body.append('\n@item ') def depart_list_item(self, node): + # type: (nodes.Node) -> None pass # -- Option List def visit_option_list(self, node): + # type: (nodes.Node) -> None self.body.append('\n\n@table @option\n') def depart_option_list(self, node): + # type: (nodes.Node) -> None self.ensure_eol() self.body.append('@end table\n') def visit_option_list_item(self, node): + # type: (nodes.Node) -> None pass def depart_option_list_item(self, node): + # type: (nodes.Node) -> None pass def visit_option_group(self, node): + # type: (nodes.Node) -> None self.at_item_x = '@item' def depart_option_group(self, node): + # type: (nodes.Node) -> None pass def visit_option(self, node): + # type: (nodes.Node) -> None self.escape_hyphens += 1 self.body.append('\n%s ' % self.at_item_x) self.at_item_x = '@itemx' def depart_option(self, node): + # type: (nodes.Node) -> None self.escape_hyphens -= 1 def visit_option_string(self, node): + # type: (nodes.Node) -> None pass def depart_option_string(self, node): + # type: (nodes.Node) -> None pass def visit_option_argument(self, node): + # type: (nodes.Node) -> None self.body.append(node.get('delimiter', ' ')) def depart_option_argument(self, node): + # type: (nodes.Node) -> None pass def visit_description(self, node): + # type: (nodes.Node) -> None self.body.append('\n') def depart_description(self, node): + # type: (nodes.Node) -> None pass # -- Definitions def visit_definition_list(self, node): + # type: (nodes.Node) -> None self.body.append('\n\n@table @asis\n') def depart_definition_list(self, node): + # type: (nodes.Node) -> None self.ensure_eol() self.body.append('@end table\n') def visit_definition_list_item(self, node): + # type: (nodes.Node) -> None self.at_item_x = '@item' def depart_definition_list_item(self, node): + # type: (nodes.Node) -> None pass def visit_term(self, node): @@ -951,43 +1063,55 @@ class TexinfoTranslator(nodes.NodeVisitor): self.at_item_x = '@itemx' def depart_term(self, node): + # type: (nodes.Node) -> None pass def visit_termsep(self, node): + # type: (nodes.Node) -> None warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.5', DeprecationWarning) self.body.append('\n%s ' % self.at_item_x) def depart_termsep(self, node): + # type: (nodes.Node) -> None pass def visit_classifier(self, node): + # type: (nodes.Node) -> None self.body.append(' : ') def depart_classifier(self, node): + # type: (nodes.Node) -> None pass def visit_definition(self, node): + # type: (nodes.Node) -> None self.body.append('\n') def depart_definition(self, node): + # type: (nodes.Node) -> None pass # -- Tables def visit_table(self, node): + # type: (nodes.Node) -> None self.entry_sep = '@item' def depart_table(self, node): + # type: (nodes.Node) -> None self.body.append('\n@end multitable\n\n') def visit_tabular_col_spec(self, node): + # type: (nodes.Node) -> None pass def depart_tabular_col_spec(self, node): + # type: (nodes.Node) -> None pass def visit_colspec(self, node): + # type: (nodes.Node) -> None self.colwidths.append(node['colwidth']) if len(self.colwidths) != self.n_cols: return @@ -996,82 +1120,104 @@ class TexinfoTranslator(nodes.NodeVisitor): self.body.append('{%s} ' % ('x' * (n+2))) def depart_colspec(self, node): + # type: (nodes.Node) -> None pass def visit_tgroup(self, node): + # type: (nodes.Node) -> None self.colwidths = [] self.n_cols = node['cols'] def depart_tgroup(self, node): + # type: (nodes.Node) -> None pass def visit_thead(self, node): + # type: (nodes.Node) -> None self.entry_sep = '@headitem' def depart_thead(self, node): + # type: (nodes.Node) -> None pass def visit_tbody(self, node): + # type: (nodes.Node) -> None pass def depart_tbody(self, node): + # type: (nodes.Node) -> None pass def visit_row(self, node): + # type: (nodes.Node) -> None pass def depart_row(self, node): + # type: (nodes.Node) -> None self.entry_sep = '@item' def visit_entry(self, node): + # type: (nodes.Node) -> None self.body.append('\n%s\n' % self.entry_sep) self.entry_sep = '@tab' def depart_entry(self, node): + # type: (nodes.Node) -> None for i in range(node.get('morecols', 0)): self.body.append('\n@tab\n') # -- Field Lists def visit_field_list(self, node): + # type: (nodes.Node) -> None pass def depart_field_list(self, node): + # type: (nodes.Node) -> None pass def visit_field(self, node): + # type: (nodes.Node) -> None self.body.append('\n') def depart_field(self, node): + # type: (nodes.Node) -> None self.body.append('\n') def visit_field_name(self, node): + # type: (nodes.Node) -> None self.ensure_eol() self.body.append('@*') def depart_field_name(self, node): + # type: (nodes.Node) -> None self.body.append(': ') def visit_field_body(self, node): + # type: (nodes.Node) -> None pass def depart_field_body(self, node): + # type: (nodes.Node) -> None pass # -- Admonitions def visit_admonition(self, node, name=''): + # type: (nodes.Node, unicode) -> None if not name: name = self.escape(node[0].astext()) self.body.append(u'\n@cartouche\n@quotation %s ' % name) def depart_admonition(self, node): + # type: (nodes.Node) -> None self.ensure_eol() self.body.append('@end quotation\n' '@end cartouche\n') def _make_visit_admonition(name): def visit(self, node): + # type: (nodes.Node) -> None self.visit_admonition(node, admonitionlabels[name]) return visit @@ -1097,32 +1243,41 @@ class TexinfoTranslator(nodes.NodeVisitor): # -- Misc def visit_docinfo(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_generated(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_header(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_footer(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_container(self, node): + # type: (nodes.Node) -> None if node.get('literal_block'): self.body.append('\n\n@float LiteralBlock\n') def depart_container(self, node): + # type: (nodes.Node) -> None if node.get('literal_block'): self.body.append('\n@end float\n\n') def visit_decoration(self, node): + # type: (nodes.Node) -> None pass def depart_decoration(self, node): + # type: (nodes.Node) -> None pass def visit_topic(self, node): + # type: (nodes.Node) -> None # ignore TOC's since we have to have a "menu" anyway if 'contents' in node.get('classes', []): raise nodes.SkipNode @@ -1131,33 +1286,42 @@ class TexinfoTranslator(nodes.NodeVisitor): self.body.append('%s\n' % self.escape(title.astext())) def depart_topic(self, node): + # type: (nodes.Node) -> None pass def visit_transition(self, node): + # type: (nodes.Node) -> None self.body.append('\n\n%s\n\n' % ('_' * 66)) def depart_transition(self, node): + # type: (nodes.Node) -> None pass def visit_attribution(self, node): + # type: (nodes.Node) -> None self.body.append('\n\n@center --- ') def depart_attribution(self, node): + # type: (nodes.Node) -> None self.body.append('\n\n') def visit_raw(self, node): + # type: (nodes.Node) -> None format = node.get('format', '').split() if 'texinfo' in format or 'texi' in format: self.body.append(node.astext()) raise nodes.SkipNode def visit_figure(self, node): + # type: (nodes.Node) -> None self.body.append('\n\n@float Figure\n') def depart_figure(self, node): + # type: (nodes.Node) -> None self.body.append('\n@end float\n\n') def visit_caption(self, node): + # type: (nodes.Node) -> None if (isinstance(node.parent, nodes.figure) or (isinstance(node.parent, nodes.container) and node.parent.get('literal_block'))): @@ -1167,12 +1331,14 @@ class TexinfoTranslator(nodes.NodeVisitor): (self.curfilestack[-1], node.line)) def depart_caption(self, node): + # type: (nodes.Node) -> None if (isinstance(node.parent, nodes.figure) or (isinstance(node.parent, nodes.container) and node.parent.get('literal_block'))): self.body.append('}\n') def visit_image(self, node): + # type: (nodes.Node) -> None if node['uri'] in self.builder.images: uri = self.builder.images[node['uri']] else: @@ -1193,73 +1359,93 @@ class TexinfoTranslator(nodes.NodeVisitor): (name, width, height, alt, ext[1:])) def depart_image(self, node): + # type: (nodes.Node) -> None pass def visit_compound(self, node): + # type: (nodes.Node) -> None pass def depart_compound(self, node): + # type: (nodes.Node) -> None pass def visit_sidebar(self, node): + # type: (nodes.Node) -> None self.visit_topic(node) def depart_sidebar(self, node): + # type: (nodes.Node) -> None self.depart_topic(node) def visit_label(self, node): + # type: (nodes.Node) -> None self.body.append('@w{(') def depart_label(self, node): + # type: (nodes.Node) -> None self.body.append(')} ') def visit_legend(self, node): + # type: (nodes.Node) -> None pass def depart_legend(self, node): + # type: (nodes.Node) -> None pass def visit_substitution_reference(self, node): + # type: (nodes.Node) -> None pass def depart_substitution_reference(self, node): + # type: (nodes.Node) -> None pass def visit_substitution_definition(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_system_message(self, node): + # type: (nodes.Node) -> None self.body.append('\n@verbatim\n' '<SYSTEM MESSAGE: %s>\n' '@end verbatim\n' % node.astext()) raise nodes.SkipNode def visit_comment(self, node): + # type: (nodes.Node) -> None self.body.append('\n') for line in node.astext().splitlines(): self.body.append('@c %s\n' % line) raise nodes.SkipNode def visit_problematic(self, node): + # type: (nodes.Node) -> None self.body.append('>>') def depart_problematic(self, node): + # type: (nodes.Node) -> None self.body.append('<<') def unimplemented_visit(self, node): + # type: (nodes.Node) -> None self.builder.warn("unimplemented node type: %r" % node, (self.curfilestack[-1], node.line)) def unknown_visit(self, node): + # type: (nodes.Node) -> None self.builder.warn("unknown node type: %r" % node, (self.curfilestack[-1], node.line)) def unknown_departure(self, node): + # type: (nodes.Node) -> None pass # -- Sphinx specific def visit_productionlist(self, node): + # type: (nodes.Node) -> None self.visit_literal_block(None) names = [] for production in node: @@ -1278,24 +1464,31 @@ class TexinfoTranslator(nodes.NodeVisitor): raise nodes.SkipNode def visit_production(self, node): + # type: (nodes.Node) -> None pass def depart_production(self, node): + # type: (nodes.Node) -> None pass def visit_literal_emphasis(self, node): + # type: (nodes.Node) -> None self.body.append('@code{') def depart_literal_emphasis(self, node): + # type: (nodes.Node) -> None self.body.append('}') def visit_literal_strong(self, node): + # type: (nodes.Node) -> None self.body.append('@code{') def depart_literal_strong(self, node): + # type: (nodes.Node) -> None self.body.append('}') def visit_index(self, node): + # type: (nodes.Node) -> None # terminate the line but don't prevent paragraph breaks if isinstance(node.parent, nodes.paragraph): self.ensure_eol() @@ -1307,43 +1500,54 @@ class TexinfoTranslator(nodes.NodeVisitor): self.body.append('@geindex %s\n' % text) def visit_versionmodified(self, node): + # type: (nodes.Node) -> None self.body.append('\n') def depart_versionmodified(self, node): + # type: (nodes.Node) -> None self.body.append('\n') def visit_start_of_file(self, node): + # type: (nodes.Node) -> None # add a document target self.next_section_ids.add(':doc') self.curfilestack.append(node['docname']) self.footnotestack.append(self.collect_footnotes(node)) def depart_start_of_file(self, node): + # type: (nodes.Node) -> None self.curfilestack.pop() self.footnotestack.pop() def visit_centered(self, node): + # type: (nodes.Node) -> None txt = self.escape_arg(node.astext()) self.body.append('\n\n@center %s\n\n' % txt) raise nodes.SkipNode def visit_seealso(self, node): + # type: (nodes.Node) -> None self.body.append(u'\n\n@subsubheading %s\n\n' % admonitionlabels['seealso']) def depart_seealso(self, node): + # type: (nodes.Node) -> None self.body.append('\n') def visit_meta(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_glossary(self, node): + # type: (nodes.Node) -> None pass def depart_glossary(self, node): + # type: (nodes.Node) -> None pass def visit_acks(self, node): + # type: (nodes.Node) -> None self.body.append('\n\n') self.body.append(', '.join(n.astext() for n in node.children[0].children) + '.') @@ -1351,23 +1555,28 @@ class TexinfoTranslator(nodes.NodeVisitor): raise nodes.SkipNode def visit_highlightlang(self, node): + # type: (nodes.Node) -> None pass def depart_highlightlang(self, node): + # type: (nodes.Node) -> None pass # -- Desc def visit_desc(self, node): + # type: (nodes.Node) -> None self.desc = node self.at_deffnx = '@deffn' def depart_desc(self, node): + # type: (nodes.Node) -> None self.desc = None self.ensure_eol() self.body.append('@end deffn\n') def visit_desc_signature(self, node): + # type: (nodes.Node) -> None self.escape_hyphens += 1 objtype = node.parent['objtype'] if objtype != 'describe': @@ -1388,42 +1597,54 @@ class TexinfoTranslator(nodes.NodeVisitor): self.desc_type_name = name def depart_desc_signature(self, node): + # type: (nodes.Node) -> None self.body.append("\n") self.escape_hyphens -= 1 self.desc_type_name = None def visit_desc_name(self, node): + # type: (nodes.Node) -> None pass def depart_desc_name(self, node): + # type: (nodes.Node) -> None pass def visit_desc_addname(self, node): + # type: (nodes.Node) -> None pass def depart_desc_addname(self, node): + # type: (nodes.Node) -> None pass def visit_desc_type(self, node): + # type: (nodes.Node) -> None pass def depart_desc_type(self, node): + # type: (nodes.Node) -> None pass def visit_desc_returns(self, node): + # type: (nodes.Node) -> None self.body.append(' -> ') def depart_desc_returns(self, node): + # type: (nodes.Node) -> None pass def visit_desc_parameterlist(self, node): + # type: (nodes.Node) -> None self.body.append(' (') self.first_param = 1 def depart_desc_parameterlist(self, node): + # type: (nodes.Node) -> None self.body.append(')') def visit_desc_parameter(self, node): + # type: (nodes.Node) -> None if not self.first_param: self.body.append(', ') else: @@ -1435,12 +1656,15 @@ class TexinfoTranslator(nodes.NodeVisitor): raise nodes.SkipNode def visit_desc_optional(self, node): + # type: (nodes.Node) -> None self.body.append('[') def depart_desc_optional(self, node): + # type: (nodes.Node) -> None self.body.append(']') def visit_desc_annotation(self, node): + # type: (nodes.Node) -> None # Try to avoid duplicating info already displayed by the deffn category. # e.g. # @deffn {Class} Foo @@ -1453,21 +1677,27 @@ class TexinfoTranslator(nodes.NodeVisitor): raise nodes.SkipNode def depart_desc_annotation(self, node): + # type: (nodes.Node) -> None pass def visit_desc_content(self, node): + # type: (nodes.Node) -> None pass def depart_desc_content(self, node): + # type: (nodes.Node) -> None pass def visit_inline(self, node): + # type: (nodes.Node) -> None pass def depart_inline(self, node): + # type: (nodes.Node) -> None pass def visit_abbreviation(self, node): + # type: (nodes.Node) -> None abbr = node.astext() self.body.append('@abbr{') if node.hasattr('explanation') and abbr not in self.handled_abbrs: @@ -1477,39 +1707,51 @@ class TexinfoTranslator(nodes.NodeVisitor): self.context.append('}') def depart_abbreviation(self, node): + # type: (nodes.Node) -> None self.body.append(self.context.pop()) def visit_manpage(self, node): + # type: (nodes.Node) -> Any return self.visit_literal_emphasis(node) def depart_manpage(self, node): + # type: (nodes.Node) -> Any return self.depart_literal_emphasis(node) def visit_download_reference(self, node): + # type: (nodes.Node) -> None pass def depart_download_reference(self, node): + # type: (nodes.Node) -> None pass def visit_hlist(self, node): + # type: (nodes.Node) -> None self.visit_bullet_list(node) def depart_hlist(self, node): + # type: (nodes.Node) -> None self.depart_bullet_list(node) def visit_hlistcol(self, node): + # type: (nodes.Node) -> None pass def depart_hlistcol(self, node): + # type: (nodes.Node) -> None pass def visit_pending_xref(self, node): + # type: (nodes.Node) -> None pass def depart_pending_xref(self, node): + # type: (nodes.Node) -> None pass def visit_math(self, node): + # type: (nodes.Node) -> None self.builder.warn('using "math" markup without a Sphinx math extension ' 'active, please use one of the math extensions ' 'described at http://sphinx-doc.org/ext/math.html') diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py index 7032208ea..da58906e7 100644 --- a/sphinx/writers/text.py +++ b/sphinx/writers/text.py @@ -22,6 +22,11 @@ from docutils.utils import column_width from sphinx import addnodes from sphinx.locale import admonitionlabels, _ +if False: + # For type annotation + from typing import Any, Callable, Tuple, Union # NOQA + from sphinx.builders.text import TextBuilder # NOQA + class TextWrapper(textwrap.TextWrapper): """Custom subclass that uses a different word separator regex.""" @@ -33,13 +38,14 @@ class TextWrapper(textwrap.TextWrapper): r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash def _wrap_chunks(self, chunks): + # type: (List[unicode]) -> List[unicode] """_wrap_chunks(chunks : [string]) -> [string] The original _wrap_chunks uses len() to calculate width. This method respects wide/fullwidth characters for width adjustment. """ drop_whitespace = getattr(self, 'drop_whitespace', True) # py25 compat - lines = [] + lines = [] # type: List[unicode] if self.width <= 0: raise ValueError("invalid width %r (must be > 0)" % self.width) @@ -81,6 +87,7 @@ class TextWrapper(textwrap.TextWrapper): return lines def _break_word(self, word, space_left): + # type: (unicode, int) -> Tuple[unicode, unicode] """_break_word(word : string, space_left : int) -> (string, string) Break line by unicode width instead of len(word). @@ -93,14 +100,16 @@ class TextWrapper(textwrap.TextWrapper): return word, '' def _split(self, text): + # type: (unicode) -> List[unicode] """_split(text : string) -> [string] Override original method that only split by 'wordsep_re'. This '_split' split wide-characters into chunk by one character. """ def split(t): - return textwrap.TextWrapper._split(self, t) - chunks = [] + # type: (unicode) -> List[unicode] + return textwrap.TextWrapper._split(self, t) # type: ignore + chunks = [] # type: List[unicode] for chunk in split(text): for w, g in groupby(chunk, column_width): if w == 1: @@ -110,6 +119,7 @@ class TextWrapper(textwrap.TextWrapper): return chunks def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): + # type: (List[unicode], List[unicode], int, int) -> None """_handle_long_word(chunks : [string], cur_line : [string], cur_len : int, width : int) @@ -131,6 +141,7 @@ STDINDENT = 3 def my_wrap(text, width=MAXWIDTH, **kwargs): + # type: (unicode, int, Any) -> List[unicode] w = TextWrapper(width=width, **kwargs) return w.wrap(text) @@ -138,16 +149,18 @@ def my_wrap(text, width=MAXWIDTH, **kwargs): class TextWriter(writers.Writer): supported = ('text',) settings_spec = ('No options here.', '', ()) - settings_defaults = {} + settings_defaults = {} # type: Dict output = None def __init__(self, builder): + # type: (TextBuilder) -> None writers.Writer.__init__(self) self.builder = builder self.translator_class = self.builder.translator_class or TextTranslator def translate(self): + # type: () -> None visitor = self.translator_class(self.document, self.builder) self.document.walkabout(visitor) self.output = visitor.body @@ -157,6 +170,7 @@ class TextTranslator(nodes.NodeVisitor): sectionchars = '*=-~"+`' def __init__(self, document, builder): + # type: (nodes.Node, TextBuilder) -> None nodes.NodeVisitor.__init__(self, document) self.builder = builder @@ -168,28 +182,32 @@ class TextTranslator(nodes.NodeVisitor): else: self.nl = '\n' self.sectionchars = builder.config.text_sectionchars - self.states = [[]] + self.states = [[]] # type: List[List[Tuple[int, Union[unicode, List[unicode]]]]] self.stateindent = [0] - self.list_counter = [] + self.list_counter = [] # type: List[int] self.sectionlevel = 0 self.lineblocklevel = 0 - self.table = None + self.table = None # type: List[Union[unicode, List[int]]] def add_text(self, text): + # type: (unicode) -> None self.states[-1].append((-1, text)) def new_state(self, indent=STDINDENT): + # type: (int) -> None self.states.append([]) self.stateindent.append(indent) def end_state(self, wrap=True, end=[''], first=None): + # type: (bool, List[unicode], unicode) -> None content = self.states.pop() maxindent = sum(self.stateindent) indent = self.stateindent.pop() - result = [] - toformat = [] + result = [] # type: List[Tuple[int, List[unicode]]] + toformat = [] # type: List[unicode] def do_format(): + # type: () -> None if not toformat: return if wrap: @@ -201,10 +219,10 @@ class TextTranslator(nodes.NodeVisitor): result.append((indent, res)) for itemindent, item in content: if itemindent == -1: - toformat.append(item) + toformat.append(item) # type: ignore else: do_format() - result.append((indent + itemindent, item)) + result.append((indent + itemindent, item)) # type: ignore toformat = [] do_format() if first is not None and result: @@ -220,9 +238,11 @@ class TextTranslator(nodes.NodeVisitor): self.states[-1].extend(result) def visit_document(self, node): + # type: (nodes.Node) -> None self.new_state(0) def depart_document(self, node): + # type: (nodes.Node) -> None self.end_state() self.body = self.nl.join(line and (' '*indent + line) for indent, lines in self.states[0] @@ -230,126 +250,161 @@ class TextTranslator(nodes.NodeVisitor): # XXX header/footer? def visit_highlightlang(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_section(self, node): + # type: (nodes.Node) -> None self._title_char = self.sectionchars[self.sectionlevel] self.sectionlevel += 1 def depart_section(self, node): + # type: (nodes.Node) -> None self.sectionlevel -= 1 def visit_topic(self, node): + # type: (nodes.Node) -> None self.new_state(0) def depart_topic(self, node): + # type: (nodes.Node) -> None self.end_state() visit_sidebar = visit_topic depart_sidebar = depart_topic def visit_rubric(self, node): + # type: (nodes.Node) -> None self.new_state(0) self.add_text('-[ ') def depart_rubric(self, node): + # type: (nodes.Node) -> None self.add_text(' ]-') self.end_state() def visit_compound(self, node): + # type: (nodes.Node) -> None pass def depart_compound(self, node): + # type: (nodes.Node) -> None pass def visit_glossary(self, node): + # type: (nodes.Node) -> None pass def depart_glossary(self, node): + # type: (nodes.Node) -> None pass def visit_title(self, node): + # type: (nodes.Node) -> None if isinstance(node.parent, nodes.Admonition): self.add_text(node.astext()+': ') raise nodes.SkipNode self.new_state(0) def depart_title(self, node): + # type: (nodes.Node) -> None if isinstance(node.parent, nodes.section): char = self._title_char else: char = '^' - text = ''.join(x[1] for x in self.states.pop() if x[0] == -1) + text = None # type: unicode + text = ''.join(x[1] for x in self.states.pop() if x[0] == -1) # type: ignore self.stateindent.pop() - title = ['', text, '%s' % (char * column_width(text)), ''] + title = ['', text, '%s' % (char * column_width(text)), ''] # type: List[unicode] if len(self.states) == 2 and len(self.states[-1]) == 0: # remove an empty line before title if it is first section title in the document title.pop(0) self.states[-1].append((0, title)) def visit_subtitle(self, node): + # type: (nodes.Node) -> None pass def depart_subtitle(self, node): + # type: (nodes.Node) -> None pass def visit_attribution(self, node): + # type: (nodes.Node) -> None self.add_text('-- ') def depart_attribution(self, node): + # type: (nodes.Node) -> None pass def visit_desc(self, node): + # type: (nodes.Node) -> None pass def depart_desc(self, node): + # type: (nodes.Node) -> None pass def visit_desc_signature(self, node): + # type: (nodes.Node) -> None self.new_state(0) def depart_desc_signature(self, node): + # type: (nodes.Node) -> None # XXX: wrap signatures in a way that makes sense self.end_state(wrap=False, end=None) def visit_desc_signature_line(self, node): + # type: (nodes.Node) -> None pass def depart_desc_signature_line(self, node): + # type: (nodes.Node) -> None self.add_text('\n') def visit_desc_name(self, node): + # type: (nodes.Node) -> None pass def depart_desc_name(self, node): + # type: (nodes.Node) -> None pass def visit_desc_addname(self, node): + # type: (nodes.Node) -> None pass def depart_desc_addname(self, node): + # type: (nodes.Node) -> None pass def visit_desc_type(self, node): + # type: (nodes.Node) -> None pass def depart_desc_type(self, node): + # type: (nodes.Node) -> None pass def visit_desc_returns(self, node): + # type: (nodes.Node) -> None self.add_text(' -> ') def depart_desc_returns(self, node): + # type: (nodes.Node) -> None pass def visit_desc_parameterlist(self, node): + # type: (nodes.Node) -> None self.add_text('(') self.first_param = 1 def depart_desc_parameterlist(self, node): + # type: (nodes.Node) -> None self.add_text(')') def visit_desc_parameter(self, node): + # type: (nodes.Node) -> None if not self.first_param: self.add_text(', ') else: @@ -358,37 +413,48 @@ class TextTranslator(nodes.NodeVisitor): raise nodes.SkipNode def visit_desc_optional(self, node): + # type: (nodes.Node) -> None self.add_text('[') def depart_desc_optional(self, node): + # type: (nodes.Node) -> None self.add_text(']') def visit_desc_annotation(self, node): + # type: (nodes.Node) -> None pass def depart_desc_annotation(self, node): + # type: (nodes.Node) -> None pass def visit_desc_content(self, node): + # type: (nodes.Node) -> None self.new_state() self.add_text(self.nl) def depart_desc_content(self, node): + # type: (nodes.Node) -> None self.end_state() def visit_figure(self, node): + # type: (nodes.Node) -> None self.new_state() def depart_figure(self, node): + # type: (nodes.Node) -> None self.end_state() def visit_caption(self, node): + # type: (nodes.Node) -> None pass def depart_caption(self, node): + # type: (nodes.Node) -> None pass def visit_productionlist(self, node): + # type: (nodes.Node) -> None self.new_state() names = [] for production in node: @@ -406,13 +472,16 @@ class TextTranslator(nodes.NodeVisitor): raise nodes.SkipNode def visit_footnote(self, node): + # type: (nodes.Node) -> None self._footnote = node.children[0].astext().strip() self.new_state(len(self._footnote) + 3) def depart_footnote(self, node): + # type: (nodes.Node) -> None self.end_state(first='[%s] ' % self._footnote) def visit_citation(self, node): + # type: (nodes.Node) -> None if len(node) and isinstance(node[0], nodes.label): self._citlabel = node[0].astext() else: @@ -420,116 +489,150 @@ class TextTranslator(nodes.NodeVisitor): self.new_state(len(self._citlabel) + 3) def depart_citation(self, node): + # type: (nodes.Node) -> None self.end_state(first='[%s] ' % self._citlabel) def visit_label(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_legend(self, node): + # type: (nodes.Node) -> None pass def depart_legend(self, node): + # type: (nodes.Node) -> None pass # XXX: option list could use some better styling def visit_option_list(self, node): + # type: (nodes.Node) -> None pass def depart_option_list(self, node): + # type: (nodes.Node) -> None pass def visit_option_list_item(self, node): + # type: (nodes.Node) -> None self.new_state(0) def depart_option_list_item(self, node): + # type: (nodes.Node) -> None self.end_state() def visit_option_group(self, node): + # type: (nodes.Node) -> None self._firstoption = True def depart_option_group(self, node): + # type: (nodes.Node) -> None self.add_text(' ') def visit_option(self, node): + # type: (nodes.Node) -> None if self._firstoption: self._firstoption = False else: self.add_text(', ') def depart_option(self, node): + # type: (nodes.Node) -> None pass def visit_option_string(self, node): + # type: (nodes.Node) -> None pass def depart_option_string(self, node): + # type: (nodes.Node) -> None pass def visit_option_argument(self, node): + # type: (nodes.Node) -> None self.add_text(node['delimiter']) def depart_option_argument(self, node): + # type: (nodes.Node) -> None pass def visit_description(self, node): + # type: (nodes.Node) -> None pass def depart_description(self, node): + # type: (nodes.Node) -> None pass def visit_tabular_col_spec(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_colspec(self, node): - self.table[0].append(node['colwidth']) + # type: (nodes.Node) -> None + self.table[0].append(node['colwidth']) # type: ignore raise nodes.SkipNode def visit_tgroup(self, node): + # type: (nodes.Node) -> None pass def depart_tgroup(self, node): + # type: (nodes.Node) -> None pass def visit_thead(self, node): + # type: (nodes.Node) -> None pass def depart_thead(self, node): + # type: (nodes.Node) -> None pass def visit_tbody(self, node): + # type: (nodes.Node) -> None self.table.append('sep') def depart_tbody(self, node): + # type: (nodes.Node) -> None pass def visit_row(self, node): + # type: (nodes.Node) -> None self.table.append([]) def depart_row(self, node): + # type: (nodes.Node) -> None pass def visit_entry(self, node): + # type: (nodes.Node) -> None if 'morerows' in node or 'morecols' in node: raise NotImplementedError('Column or row spanning cells are ' 'not implemented.') self.new_state(0) def depart_entry(self, node): + # type: (nodes.Node) -> None text = self.nl.join(self.nl.join(x[1]) for x in self.states.pop()) self.stateindent.pop() - self.table[-1].append(text) + self.table[-1].append(text) # type: ignore def visit_table(self, node): + # type: (nodes.Node) -> None if self.table: raise NotImplementedError('Nested tables are not supported.') self.new_state(0) self.table = [[]] def depart_table(self, node): - lines = self.table[1:] - fmted_rows = [] - colwidths = self.table[0] + # type: (nodes.Node) -> None + lines = None # type: List[unicode] + lines = self.table[1:] # type: ignore + fmted_rows = [] # type: List[List[List[unicode]]] + colwidths = None # type: List[int] + colwidths = self.table[0] # type: ignore realwidths = colwidths[:] separator = 0 # don't allow paragraphs in table cells for now @@ -537,7 +640,7 @@ class TextTranslator(nodes.NodeVisitor): if line == 'sep': separator = len(fmted_rows) else: - cells = [] + cells = [] # type: List[List[unicode]] for i, cell in enumerate(line): par = my_wrap(cell, width=colwidths[i]) if par: @@ -549,13 +652,15 @@ class TextTranslator(nodes.NodeVisitor): fmted_rows.append(cells) def writesep(char='-'): - out = ['+'] + # type: (unicode) -> None + out = ['+'] # type: List[unicode] for width in realwidths: out.append(char * (width+2)) out.append('+') self.add_text(''.join(out) + self.nl) def writerow(row): + # type: (list[List[unicode]]) -> None lines = zip_longest(*row) for line in lines: out = ['|'] @@ -580,6 +685,7 @@ class TextTranslator(nodes.NodeVisitor): self.end_state(wrap=False) def visit_acks(self, node): + # type: (nodes.Node) -> None self.new_state(0) self.add_text(', '.join(n.astext() for n in node.children[0].children) + '.') @@ -587,12 +693,14 @@ class TextTranslator(nodes.NodeVisitor): raise nodes.SkipNode def visit_image(self, node): + # type: (nodes.Node) -> None if 'alt' in node.attributes: self.add_text(_('[image: %s]') % node['alt']) self.add_text(_('[image]')) raise nodes.SkipNode def visit_transition(self, node): + # type: (nodes.Node) -> None indent = sum(self.stateindent) self.new_state(0) self.add_text('=' * (MAXWIDTH - indent)) @@ -600,24 +708,31 @@ class TextTranslator(nodes.NodeVisitor): raise nodes.SkipNode def visit_bullet_list(self, node): + # type: (nodes.Node) -> None self.list_counter.append(-1) def depart_bullet_list(self, node): + # type: (nodes.Node) -> None self.list_counter.pop() def visit_enumerated_list(self, node): + # type: (nodes.Node) -> None self.list_counter.append(node.get('start', 1) - 1) def depart_enumerated_list(self, node): + # type: (nodes.Node) -> None self.list_counter.pop() def visit_definition_list(self, node): + # type: (nodes.Node) -> None self.list_counter.append(-2) def depart_definition_list(self, node): + # type: (nodes.Node) -> None self.list_counter.pop() def visit_list_item(self, node): + # type: (nodes.Node) -> None if self.list_counter[-1] == -1: # bullet list self.new_state(2) @@ -630,6 +745,7 @@ class TextTranslator(nodes.NodeVisitor): self.new_state(len(str(self.list_counter[-1])) + 2) def depart_list_item(self, node): + # type: (nodes.Node) -> None if self.list_counter[-1] == -1: self.end_state(first='* ') elif self.list_counter[-1] == -2: @@ -638,88 +754,114 @@ class TextTranslator(nodes.NodeVisitor): self.end_state(first='%s. ' % self.list_counter[-1]) def visit_definition_list_item(self, node): + # type: (nodes.Node) -> None self._classifier_count_in_li = len(node.traverse(nodes.classifier)) def depart_definition_list_item(self, node): + # type: (nodes.Node) -> None pass def visit_term(self, node): + # type: (nodes.Node) -> None self.new_state(0) def depart_term(self, node): + # type: (nodes.Node) -> None if not self._classifier_count_in_li: self.end_state(end=None) def visit_termsep(self, node): + # type: (nodes.Node) -> None warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.5', DeprecationWarning) self.add_text(', ') raise nodes.SkipNode def visit_classifier(self, node): + # type: (nodes.Node) -> None self.add_text(' : ') def depart_classifier(self, node): + # type: (nodes.Node) -> None self._classifier_count_in_li -= 1 if not self._classifier_count_in_li: self.end_state(end=None) def visit_definition(self, node): + # type: (nodes.Node) -> None self.new_state() def depart_definition(self, node): + # type: (nodes.Node) -> None self.end_state() def visit_field_list(self, node): + # type: (nodes.Node) -> None pass def depart_field_list(self, node): + # type: (nodes.Node) -> None pass def visit_field(self, node): + # type: (nodes.Node) -> None pass def depart_field(self, node): + # type: (nodes.Node) -> None pass def visit_field_name(self, node): + # type: (nodes.Node) -> None self.new_state(0) def depart_field_name(self, node): + # type: (nodes.Node) -> None self.add_text(':') self.end_state(end=None) def visit_field_body(self, node): + # type: (nodes.Node) -> None self.new_state() def depart_field_body(self, node): + # type: (nodes.Node) -> None self.end_state() def visit_centered(self, node): + # type: (nodes.Node) -> None pass def depart_centered(self, node): + # type: (nodes.Node) -> None pass def visit_hlist(self, node): + # type: (nodes.Node) -> None pass def depart_hlist(self, node): + # type: (nodes.Node) -> None pass def visit_hlistcol(self, node): + # type: (nodes.Node) -> None pass def depart_hlistcol(self, node): + # type: (nodes.Node) -> None pass def visit_admonition(self, node): + # type: (nodes.Node) -> None self.new_state(0) def depart_admonition(self, node): + # type: (nodes.Node) -> None self.end_state() def _visit_admonition(self, node): + # type: (nodes.Node) -> None self.new_state(2) if isinstance(node.children[0], nodes.Sequential): @@ -727,6 +869,7 @@ class TextTranslator(nodes.NodeVisitor): def _make_depart_admonition(name): def depart_admonition(self, node): + # type: (nodes.NodeVisitor, nodes.Node) -> None self.end_state(first=admonitionlabels[name] + ': ') return depart_admonition @@ -752,211 +895,274 @@ class TextTranslator(nodes.NodeVisitor): depart_seealso = _make_depart_admonition('seealso') def visit_versionmodified(self, node): + # type: (nodes.Node) -> None self.new_state(0) def depart_versionmodified(self, node): + # type: (nodes.Node) -> None self.end_state() def visit_literal_block(self, node): + # type: (nodes.Node) -> None self.new_state() def depart_literal_block(self, node): + # type: (nodes.Node) -> None self.end_state(wrap=False) def visit_doctest_block(self, node): + # type: (nodes.Node) -> None self.new_state(0) def depart_doctest_block(self, node): + # type: (nodes.Node) -> None self.end_state(wrap=False) def visit_line_block(self, node): + # type: (nodes.Node) -> None self.new_state() self.lineblocklevel += 1 def depart_line_block(self, node): + # type: (nodes.Node) -> None self.lineblocklevel -= 1 self.end_state(wrap=False, end=None) if not self.lineblocklevel: self.add_text('\n') def visit_line(self, node): + # type: (nodes.Node) -> None pass def depart_line(self, node): + # type: (nodes.Node) -> None self.add_text('\n') def visit_block_quote(self, node): + # type: (nodes.Node) -> None self.new_state() def depart_block_quote(self, node): + # type: (nodes.Node) -> None self.end_state() def visit_compact_paragraph(self, node): + # type: (nodes.Node) -> None pass def depart_compact_paragraph(self, node): + # type: (nodes.Node) -> None pass def visit_paragraph(self, node): + # type: (nodes.Node) -> None if not isinstance(node.parent, nodes.Admonition) or \ isinstance(node.parent, addnodes.seealso): self.new_state(0) def depart_paragraph(self, node): + # type: (nodes.Node) -> None if not isinstance(node.parent, nodes.Admonition) or \ isinstance(node.parent, addnodes.seealso): self.end_state() def visit_target(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_index(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_toctree(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_substitution_definition(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_pending_xref(self, node): + # type: (nodes.Node) -> None pass def depart_pending_xref(self, node): + # type: (nodes.Node) -> None pass def visit_reference(self, node): + # type: (nodes.Node) -> None pass def depart_reference(self, node): + # type: (nodes.Node) -> None pass def visit_number_reference(self, node): + # type: (nodes.Node) -> None text = nodes.Text(node.get('title', '#')) self.visit_Text(text) raise nodes.SkipNode def visit_download_reference(self, node): + # type: (nodes.Node) -> None pass def depart_download_reference(self, node): + # type: (nodes.Node) -> None pass def visit_emphasis(self, node): + # type: (nodes.Node) -> None self.add_text('*') def depart_emphasis(self, node): + # type: (nodes.Node) -> None self.add_text('*') def visit_literal_emphasis(self, node): + # type: (nodes.Node) -> None self.add_text('*') def depart_literal_emphasis(self, node): + # type: (nodes.Node) -> None self.add_text('*') def visit_strong(self, node): + # type: (nodes.Node) -> None self.add_text('**') def depart_strong(self, node): + # type: (nodes.Node) -> None self.add_text('**') def visit_literal_strong(self, node): + # type: (nodes.Node) -> None self.add_text('**') def depart_literal_strong(self, node): + # type: (nodes.Node) -> None self.add_text('**') def visit_abbreviation(self, node): + # type: (nodes.Node) -> None self.add_text('') def depart_abbreviation(self, node): + # type: (nodes.Node) -> None if node.hasattr('explanation'): self.add_text(' (%s)' % node['explanation']) def visit_manpage(self, node): + # type: (nodes.Node) -> Any return self.visit_literal_emphasis(node) def depart_manpage(self, node): + # type: (nodes.Node) -> Any return self.depart_literal_emphasis(node) def visit_title_reference(self, node): + # type: (nodes.Node) -> None self.add_text('*') def depart_title_reference(self, node): + # type: (nodes.Node) -> None self.add_text('*') def visit_literal(self, node): + # type: (nodes.Node) -> None self.add_text('"') def depart_literal(self, node): + # type: (nodes.Node) -> None self.add_text('"') def visit_subscript(self, node): + # type: (nodes.Node) -> None self.add_text('_') def depart_subscript(self, node): + # type: (nodes.Node) -> None pass def visit_superscript(self, node): + # type: (nodes.Node) -> None self.add_text('^') def depart_superscript(self, node): + # type: (nodes.Node) -> None pass def visit_footnote_reference(self, node): + # type: (nodes.Node) -> None self.add_text('[%s]' % node.astext()) raise nodes.SkipNode def visit_citation_reference(self, node): + # type: (nodes.Node) -> None self.add_text('[%s]' % node.astext()) raise nodes.SkipNode def visit_Text(self, node): + # type: (nodes.Node) -> None self.add_text(node.astext()) def depart_Text(self, node): + # type: (nodes.Node) -> None pass def visit_generated(self, node): + # type: (nodes.Node) -> None pass def depart_generated(self, node): + # type: (nodes.Node) -> None pass def visit_inline(self, node): + # type: (nodes.Node) -> None if 'xref' in node['classes'] or 'term' in node['classes']: self.add_text('*') def depart_inline(self, node): + # type: (nodes.Node) -> None if 'xref' in node['classes'] or 'term' in node['classes']: self.add_text('*') def visit_container(self, node): + # type: (nodes.Node) -> None pass def depart_container(self, node): + # type: (nodes.Node) -> None pass def visit_problematic(self, node): + # type: (nodes.Node) -> None self.add_text('>>') def depart_problematic(self, node): + # type: (nodes.Node) -> None self.add_text('<<') def visit_system_message(self, node): + # type: (nodes.Node) -> None self.new_state(0) self.add_text('<SYSTEM MESSAGE: %s>' % node.astext()) self.end_state() raise nodes.SkipNode def visit_comment(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_meta(self, node): + # type: (nodes.Node) -> None # only valid for HTML raise nodes.SkipNode def visit_raw(self, node): + # type: (nodes.Node) -> None if 'text' in node.get('format', '').split(): self.new_state(0) self.add_text(node.astext()) @@ -964,6 +1170,7 @@ class TextTranslator(nodes.NodeVisitor): raise nodes.SkipNode def visit_math(self, node): + # type: (nodes.Node) -> None self.builder.warn('using "math" markup without a Sphinx math extension ' 'active, please use one of the math extensions ' 'described at http://sphinx-doc.org/ext/math.html', @@ -973,4 +1180,5 @@ class TextTranslator(nodes.NodeVisitor): visit_math_block = visit_math def unknown_visit(self, node): + # type: (nodes.Node) -> None raise NotImplementedError('Unknown node: ' + node.__class__.__name__) diff --git a/sphinx/writers/xml.py b/sphinx/writers/xml.py index 5aa0ad96a..879f65dd3 100644 --- a/sphinx/writers/xml.py +++ b/sphinx/writers/xml.py @@ -12,16 +12,23 @@ from docutils import writers from docutils.writers.docutils_xml import Writer as BaseXMLWriter +if False: + # For type annotation + from typing import Any, Tuple # NOQA + from sphinx.builders import Builder # NOQA + class XMLWriter(BaseXMLWriter): def __init__(self, builder): + # type: (Builder) -> None BaseXMLWriter.__init__(self) self.builder = builder if self.builder.translator_class: self.translator_class = self.builder.translator_class def translate(self, *args, **kwargs): + # type: (Any, Any) -> None self.document.settings.newlines = \ self.document.settings.indents = \ self.builder.env.config.xml_pretty @@ -36,18 +43,21 @@ class PseudoXMLWriter(writers.Writer): """Formats this writer supports.""" config_section = 'pseudoxml writer' - config_section_dependencies = ('writers',) + config_section_dependencies = ('writers',) # type: Tuple[unicode] output = None """Final translated form of `document`.""" def __init__(self, builder): + # type: (Builder) -> None writers.Writer.__init__(self) self.builder = builder def translate(self): + # type: () -> None self.output = self.document.pformat() def supports(self, format): + # type: (unicode) -> bool """This writer supports all format-specific elements.""" return True From ceec82451bfbefc0fd720bbf48e4b9a029cacd99 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 8 Nov 2016 14:05:58 +0900 Subject: [PATCH 011/190] Add type-check annotations to sphinx.* --- sphinx/__init__.py | 2 +- sphinx/apidoc.py | 24 ++++- sphinx/cmdline.py | 25 +++-- sphinx/config.py | 44 ++++++--- sphinx/directives/__init__.py | 26 ++++- sphinx/directives/code.py | 22 ++++- sphinx/directives/other.py | 38 ++++++-- sphinx/highlighting.py | 3 +- sphinx/io.py | 30 +++++- sphinx/jinja2glue.py | 23 ++++- sphinx/locale/__init__.py | 21 ++-- sphinx/make_mode.py | 36 ++++++- sphinx/pycode/__init__.py | 38 ++++---- sphinx/pycode/nodes.py | 6 +- sphinx/pycode/pgen2/grammar.py | 18 ++-- sphinx/pycode/pgen2/parse.py | 15 ++- sphinx/pycode/pgen2/pgen.py | 27 ++++-- sphinx/pycode/pgen2/tokenize.py | 16 ++-- sphinx/quickstart.py | 5 +- sphinx/roles.py | 4 +- sphinx/search/__init__.py | 95 +++++++++++++------ sphinx/search/en.py | 5 + sphinx/search/ja.py | 21 +++- sphinx/search/ro.py | 4 +- sphinx/search/tr.py | 4 +- sphinx/search/zh.py | 11 ++- sphinx/setup_command.py | 35 ++++--- sphinx/theming.py | 26 +++-- sphinx/transforms/__init__.py | 13 +++ sphinx/transforms/compact_bullet_list.py | 6 ++ sphinx/transforms/i18n.py | 19 +++- sphinx/versioning.py | 13 ++- sphinx/websupport/__init__.py | 6 +- sphinx/websupport/storage/sqlalchemy_db.py | 10 +- .../websupport/storage/sqlalchemystorage.py | 4 +- 35 files changed, 510 insertions(+), 185 deletions(-) diff --git a/sphinx/__init__.py b/sphinx/__init__.py index 368f9d8fe..2f126b6f6 100644 --- a/sphinx/__init__.py +++ b/sphinx/__init__.py @@ -39,7 +39,7 @@ if __version__.endswith('+'): stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if out: - __display_version__ += '/' + out.decode().strip() + __display_version__ += '/' + out.decode().strip() # type: ignore except Exception: pass diff --git a/sphinx/apidoc.py b/sphinx/apidoc.py index d4793ff4d..e48a527a5 100644 --- a/sphinx/apidoc.py +++ b/sphinx/apidoc.py @@ -26,6 +26,10 @@ from fnmatch import fnmatch from sphinx.util.osutil import FileAvoidWrite, walk from sphinx import __display_version__ +if False: + # For type annotation + from typing import Any, Tuple # NOQA + # automodule options if 'SPHINX_APIDOC_OPTIONS' in os.environ: OPTIONS = os.environ['SPHINX_APIDOC_OPTIONS'].split(',') @@ -42,6 +46,7 @@ PY_SUFFIXES = set(['.py', '.pyx']) def makename(package, module): + # type: (unicode, unicode) -> unicode """Join package and module with a dot.""" # Both package and module can be None/empty. if package: @@ -54,6 +59,7 @@ def makename(package, module): def write_file(name, text, opts): + # type: (unicode, unicode, Any) -> None """Write the output file for module/package <name>.""" fname = path.join(opts.destdir, '%s.%s' % (name, opts.suffix)) if opts.dryrun: @@ -68,12 +74,14 @@ def write_file(name, text, opts): def format_heading(level, text): + # type: (int, unicode) -> unicode """Create a heading of <level> [1, 2 or 3 supported].""" underlining = ['=', '-', '~', ][level - 1] * len(text) return '%s\n%s\n\n' % (text, underlining) def format_directive(module, package=None): + # type: (unicode, unicode) -> unicode """Create the automodule directive and add the options.""" directive = '.. automodule:: %s\n' % makename(package, module) for option in OPTIONS: @@ -82,6 +90,7 @@ def format_directive(module, package=None): def create_module_file(package, module, opts): + # type: (unicode, unicode, Any) -> None """Build the text of the file and write the file.""" if not opts.noheadings: text = format_heading(1, '%s module' % module) @@ -93,6 +102,7 @@ def create_module_file(package, module, opts): def create_package_file(root, master_package, subroot, py_files, opts, subs, is_namespace): + # type: (unicode, unicode, unicode, List[unicode], Any, List[unicode], bool) -> None """Build the text of the file and write the file.""" text = format_heading(1, ('%s package' if not is_namespace else "%s namespace") % makename(master_package, subroot)) @@ -148,13 +158,14 @@ def create_package_file(root, master_package, subroot, py_files, opts, subs, is_ def create_modules_toc_file(modules, opts, name='modules'): + # type: (List[unicode], Any, unicode) -> None """Create the module's index.""" text = format_heading(1, '%s' % opts.header) text += '.. toctree::\n' text += ' :maxdepth: %s\n\n' % opts.maxdepth modules.sort() - prev_module = '' + prev_module = '' # type: unicode for module in modules: # look if the module is a subpackage and, if yes, ignore it if module.startswith(prev_module + '.'): @@ -166,6 +177,7 @@ def create_modules_toc_file(modules, opts, name='modules'): def shall_skip(module, opts): + # type: (unicode, Any) -> bool """Check if we want to skip this module.""" # skip if the file doesn't exist and not using implicit namespaces if not opts.implicit_namespaces and not path.exists(module): @@ -184,6 +196,7 @@ def shall_skip(module, opts): def recurse_tree(rootpath, excludes, opts): + # type: (unicode, List[unicode], Any) -> List[unicode] """ Look for every file in the directory tree and create the corresponding ReST files. @@ -217,7 +230,7 @@ def recurse_tree(rootpath, excludes, opts): # remove hidden ('.') and private ('_') directories, as well as # excluded dirs if includeprivate: - exclude_prefixes = ('.',) + exclude_prefixes = ('.',) # type: Tuple[unicode, ...] else: exclude_prefixes = ('.', '_') subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and @@ -247,23 +260,26 @@ def recurse_tree(rootpath, excludes, opts): def normalize_excludes(rootpath, excludes): + # type: (unicode, List[unicode]) -> List[unicode] """Normalize the excluded directory list.""" return [path.abspath(exclude) for exclude in excludes] def is_excluded(root, excludes): + # type: (unicode, List[unicode]) -> bool """Check if the directory is in the exclude list. Note: by having trailing slashes, we avoid common prefix issues, like e.g. an exlude "foo" also accidentally excluding "foobar". """ for exclude in excludes: - if fnmatch(root, exclude): + if fnmatch(root, exclude): # type: ignore return True return False def main(argv=sys.argv): + # type: (List[str]) -> int """Parse and check the command line arguments.""" parser = optparse.OptionParser( usage="""\ @@ -359,7 +375,7 @@ Note: By default this script will not overwrite already created files.""") if opts.full: from sphinx import quickstart as qs modules.sort() - prev_module = '' + prev_module = '' # type: unicode text = '' for module in modules: if module.startswith(prev_module + '.'): diff --git a/sphinx/cmdline.py b/sphinx/cmdline.py index 0d85767e4..7a97e10e2 100644 --- a/sphinx/cmdline.py +++ b/sphinx/cmdline.py @@ -16,17 +16,22 @@ import traceback from os import path from six import text_type, binary_type + from docutils.utils import SystemMessage from sphinx import __display_version__ from sphinx.errors import SphinxError from sphinx.application import Sphinx from sphinx.util import Tee, format_exception_cut_frames, save_traceback -from sphinx.util.console import red, nocolor, color_terminal +from sphinx.util.console import red, nocolor, color_terminal # type: ignore from sphinx.util.docutils import docutils_namespace from sphinx.util.osutil import abspath, fs_encoding from sphinx.util.pycompat import terminal_safe +if False: + # For type annotation + from typing import Any, IO, Union # NOQA + USAGE = """\ Sphinx v%s @@ -45,18 +50,21 @@ For more information, visit <http://sphinx-doc.org/>. class MyFormatter(optparse.IndentedHelpFormatter): def format_usage(self, usage): + # type: (Any) -> Any return usage def format_help(self, formatter): - result = [] - if self.description: + # type: (Any) -> unicode + result = [] # type: List[unicode] + if self.description: # type: ignore result.append(self.format_description(formatter)) - if self.option_list: - result.append(self.format_option_help(formatter)) + if self.option_list: # type: ignore + result.append(self.format_option_help(formatter)) # type: ignore return "\n".join(result) def handle_exception(app, opts, exception, stderr=sys.stderr): + # type: (Sphinx, Any, Union[Exception, KeyboardInterrupt], IO) -> None if opts.pdb: import pdb print(red('Exception occurred while building, starting debugger:'), @@ -107,6 +115,7 @@ def handle_exception(app, opts, exception, stderr=sys.stderr): def main(argv): + # type: (List[unicode]) -> int if not color_terminal(): nocolor() @@ -210,11 +219,11 @@ def main(argv): # handle remaining filename arguments filenames = args[2:] - err = 0 + err = 0 # type: ignore for filename in filenames: if not path.isfile(filename): print('Error: Cannot find file %r.' % filename, file=sys.stderr) - err = 1 + err = 1 # type: ignore if err: return 1 @@ -249,7 +258,7 @@ def main(argv): print('Error: Cannot open warning file %r: %s' % (opts.warnfile, exc), file=sys.stderr) sys.exit(1) - warning = Tee(warning, warnfp) + warning = Tee(warning, warnfp) # type: ignore error = warning confoverrides = {} diff --git a/sphinx/config.py b/sphinx/config.py index 5741d66bf..9bfdd2976 100644 --- a/sphinx/config.py +++ b/sphinx/config.py @@ -16,9 +16,14 @@ from six import PY2, PY3, iteritems, string_types, binary_type, text_type, integ from sphinx.errors import ConfigError from sphinx.locale import l_ +from sphinx.util.i18n import format_date from sphinx.util.osutil import cd from sphinx.util.pycompat import execfile_, NoneType -from sphinx.util.i18n import format_date + +if False: + # For type annotation + from typing import Any, Callable, Tuple # NOQA + from sphinx.util.tags import Tags # NOQA nonascii_re = re.compile(br'[\x80-\xff]') copyright_year_re = re.compile(r'^((\d{4}-)?)(\d{4})(?=[ ,])') @@ -43,13 +48,15 @@ class ENUM(object): app.add_config_value('latex_show_urls', 'no', ENUM('no', 'footnote', 'inline')) """ def __init__(self, *candidates): + # type: (unicode) -> None self.candidates = candidates def match(self, value): + # type: (unicode) -> bool return value in self.candidates -string_classes = [text_type] +string_classes = [text_type] # type: List if PY2: string_classes.append(binary_type) # => [str, unicode] @@ -114,12 +121,13 @@ class Config(object): # pre-initialized confval for HTML builder html_translator_class = (None, 'html', string_classes), - ) + ) # type: Dict[unicode, Tuple] def __init__(self, dirname, filename, overrides, tags): + # type: (unicode, unicode, Dict, Tags) -> None self.overrides = overrides self.values = Config.config_values.copy() - config = {} + config = {} # type: Dict[unicode, Any] if dirname is not None: config_file = path.join(dirname, filename) config['__file__'] = config_file @@ -137,14 +145,14 @@ class Config(object): self._raw_config = config # these two must be preinitialized because extensions can add their # own config values - self.setup = config.get('setup', None) + self.setup = config.get('setup', None) # type: Callable if 'extensions' in overrides: if isinstance(overrides['extensions'], string_types): config['extensions'] = overrides.pop('extensions').split(',') else: config['extensions'] = overrides.pop('extensions') - self.extensions = config.get('extensions', []) + self.extensions = config.get('extensions', []) # type: List[unicode] # correct values of copyright year that are not coherent with # the SOURCE_DATE_EPOCH environment variable (if set) @@ -152,10 +160,11 @@ class Config(object): if getenv('SOURCE_DATE_EPOCH') is not None: for k in ('copyright', 'epub_copyright'): if k in config: - config[k] = copyright_year_re.sub('\g<1>%s' % format_date('%Y'), + config[k] = copyright_year_re.sub('\g<1>%s' % format_date('%Y'), # type: ignore # NOQA config[k]) def check_types(self, warn): + # type: (Callable) -> None # check all values for deviation from the default value's type, since # that can result in TypeErrors all over the place # NB. since config values might use l_() we have to wait with calling @@ -197,15 +206,17 @@ class Config(object): name=name, current=type(current), default=type(default))) def check_unicode(self, warn): + # type: (Callable) -> None # check all string values for non-ASCII characters in bytestrings, # since that can result in UnicodeErrors all over the place for name, value in iteritems(self._raw_config): - if isinstance(value, binary_type) and nonascii_re.search(value): + if isinstance(value, binary_type) and nonascii_re.search(value): # type: ignore warn('the config value %r is set to a string with non-ASCII ' 'characters; this can lead to Unicode errors occurring. ' 'Please use Unicode strings, e.g. %r.' % (name, u'Content')) def convert_overrides(self, name, value): + # type: (unicode, Any) -> Any if not isinstance(value, string_types): return value else: @@ -215,10 +226,10 @@ class Config(object): 'ignoring (use %r to set individual elements)' % (name, name + '.key=value')) elif isinstance(defvalue, list): - return value.split(',') + return value.split(',') # type: ignore elif isinstance(defvalue, integer_types): try: - return int(value) + return int(value) # type: ignore except ValueError: raise ValueError('invalid number %r for config value %r, ignoring' % (value, name)) @@ -231,6 +242,7 @@ class Config(object): return value def pre_init_values(self, warn): + # type: (Callable) -> None """Initialize some limited config variables before loading extensions""" variables = ['needs_sphinx', 'suppress_warnings', 'html_translator_class'] for name in variables: @@ -243,12 +255,13 @@ class Config(object): warn(exc) def init_values(self, warn): + # type: (Callable) -> None config = self._raw_config for valname, value in iteritems(self.overrides): try: if '.' in valname: realvalname, key = valname.split('.', 1) - config.setdefault(realvalname, {})[key] = value + config.setdefault(realvalname, {})[key] = value # type: ignore continue elif valname not in self.values: warn('unknown config value %r in override, ignoring' % valname) @@ -262,10 +275,11 @@ class Config(object): for name in config: if name in self.values: self.__dict__[name] = config[name] - if isinstance(self.source_suffix, string_types): - self.source_suffix = [self.source_suffix] + if isinstance(self.source_suffix, string_types): # type: ignore + self.source_suffix = [self.source_suffix] # type: ignore def __getattr__(self, name): + # type: (unicode) -> Any if name.startswith('_'): raise AttributeError(name) if name not in self.values: @@ -276,13 +290,17 @@ class Config(object): return default def __getitem__(self, name): + # type: (unicode) -> unicode return getattr(self, name) def __setitem__(self, name, value): + # type: (unicode, Any) -> None setattr(self, name, value) def __delitem__(self, name): + # type: (unicode) -> None delattr(self, name) def __contains__(self, name): + # type: (unicode) -> bool return name in self.values diff --git a/sphinx/directives/__init__.py b/sphinx/directives/__init__.py index 76b54f9d6..ea09daff5 100644 --- a/sphinx/directives/__init__.py +++ b/sphinx/directives/__init__.py @@ -29,6 +29,12 @@ from sphinx.directives.patches import ( # noqa Figure, Meta ) +if False: + # For type annotation + from typing import Any # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.environment import BuildEnvironment # NOQA + # RE to strip backslash escapes nl_escape_re = re.compile(r'\\\n') @@ -51,9 +57,13 @@ class ObjectDescription(Directive): } # types of doc fields that this directive handles, see sphinx.util.docfields - doc_field_types = [] + doc_field_types = [] # type: List[Any] + domain = None # type: unicode + objtype = None # type: unicode + indexnode = None # type: addnodes.index def get_signatures(self): + # type: () -> List[unicode] """ Retrieve the signatures to document from the directive arguments. By default, signatures are given as arguments, one per line. @@ -65,6 +75,7 @@ class ObjectDescription(Directive): return [strip_backslash_re.sub(r'\1', line.strip()) for line in lines] def handle_signature(self, sig, signode): + # type: (unicode, addnodes.desc_signature) -> Any """ Parse the signature *sig* into individual nodes and append them to *signode*. If ValueError is raised, parsing is aborted and the whole @@ -77,6 +88,7 @@ class ObjectDescription(Directive): raise ValueError def add_target_and_index(self, name, sig, signode): + # type: (Any, unicode, addnodes.desc_signature) -> None """ Add cross-reference IDs and entries to self.indexnode, if applicable. @@ -85,6 +97,7 @@ class ObjectDescription(Directive): return # do nothing by default def before_content(self): + # type: () -> None """ Called before parsing content. Used to set information about the current directive context on the build environment. @@ -92,6 +105,7 @@ class ObjectDescription(Directive): pass def after_content(self): + # type: () -> None """ Called after parsing content. Used to reset information about the current directive context on the build environment. @@ -99,6 +113,7 @@ class ObjectDescription(Directive): pass def run(self): + # type: () -> List[nodes.Node] """ Main directive entry function, called by docutils upon encountering the directive. @@ -120,7 +135,7 @@ class ObjectDescription(Directive): self.domain, self.objtype = self.name.split(':', 1) else: self.domain, self.objtype = '', self.name - self.env = self.state.document.settings.env + self.env = self.state.document.settings.env # type: BuildEnvironment self.indexnode = addnodes.index(entries=[]) node = addnodes.desc() @@ -130,7 +145,7 @@ class ObjectDescription(Directive): node['objtype'] = node['desctype'] = self.objtype node['noindex'] = noindex = ('noindex' in self.options) - self.names = [] + self.names = [] # type: List[unicode] signatures = self.get_signatures() for i, sig in enumerate(signatures): # add a signature node for each signature in the current unit @@ -181,6 +196,7 @@ class DefaultRole(Directive): final_argument_whitespace = False def run(self): + # type: () -> List[nodes.Node] if not self.arguments: if '' in roles._roles: # restore the "default" default role @@ -209,9 +225,10 @@ class DefaultDomain(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = False - option_spec = {} + option_spec = {} # type: Dict def run(self): + # type: () -> List[nodes.Node] env = self.state.document.settings.env domain_name = self.arguments[0].lower() # if domain_name not in env.domains: @@ -225,6 +242,7 @@ class DefaultDomain(Directive): def setup(app): + # type: (Sphinx) -> None directives.register_directive('default-role', DefaultRole) directives.register_directive('default-domain', DefaultDomain) directives.register_directive('describe', ObjectDescription) diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py index 5bef8c386..e401a50de 100644 --- a/sphinx/directives/code.py +++ b/sphinx/directives/code.py @@ -11,17 +11,22 @@ import sys import codecs from difflib import unified_diff +from six import string_types + from docutils import nodes from docutils.parsers.rst import Directive, directives from docutils.statemachine import ViewList -from six import string_types - from sphinx import addnodes from sphinx.locale import _ from sphinx.util import parselinenos from sphinx.util.nodes import set_source_info +if False: + # For type annotation + from typing import Any # NOQA + from sphinx.application import Sphinx # NOQA + class Highlight(Directive): """ @@ -38,6 +43,7 @@ class Highlight(Directive): } def run(self): + # type: () -> List[nodes.Node] if 'linenothreshold' in self.options: try: linenothreshold = int(self.options['linenothreshold']) @@ -50,6 +56,7 @@ class Highlight(Directive): def dedent_lines(lines, dedent): + # type: (List[unicode], int) -> List[unicode] if not dedent: return lines @@ -64,6 +71,7 @@ def dedent_lines(lines, dedent): def container_wrapper(directive, literal_node, caption): + # type: (Directive, nodes.Node, unicode) -> nodes.container container_node = nodes.container('', literal_block=True, classes=['literal-block-wrapper']) parsed = nodes.Element() @@ -101,6 +109,7 @@ class CodeBlock(Directive): } def run(self): + # type: () -> List[nodes.Node] code = u'\n'.join(self.content) linespec = self.options.get('emphasize-lines') @@ -137,7 +146,7 @@ class CodeBlock(Directive): literal = container_wrapper(self, literal, caption) except ValueError as exc: document = self.state.document - errmsg = _('Invalid caption: %s' % exc[0][0].astext()) + errmsg = _('Invalid caption: %s' % exc[0][0].astext()) # type: ignore return [document.reporter.warning(errmsg, line=self.lineno)] # literal will be note_implicit_target that is linked from caption and numref. @@ -182,11 +191,12 @@ class LiteralInclude(Directive): } def read_with_encoding(self, filename, document, codec_info, encoding): + # type: (unicode, nodes.Node, Any, unicode) -> List try: with codecs.StreamReaderWriter(open(filename, 'rb'), codec_info[2], codec_info[3], 'strict') as f: lines = f.readlines() - lines = dedent_lines(lines, self.options.get('dedent')) + lines = dedent_lines(lines, self.options.get('dedent')) # type: ignore return lines except (IOError, OSError): return [document.reporter.warning( @@ -199,6 +209,7 @@ class LiteralInclude(Directive): (encoding, filename))] def run(self): + # type: () -> List[nodes.Node] document = self.state.document if not document.settings.file_insertion_enabled: return [document.reporter.warning('File insertion disabled', @@ -367,7 +378,7 @@ class LiteralInclude(Directive): retnode = container_wrapper(self, retnode, caption) except ValueError as exc: document = self.state.document - errmsg = _('Invalid caption: %s' % exc[0][0].astext()) + errmsg = _('Invalid caption: %s' % exc[0][0].astext()) # type: ignore return [document.reporter.warning(errmsg, line=self.lineno)] # retnode will be note_implicit_target that is linked from caption and numref. @@ -378,6 +389,7 @@ class LiteralInclude(Directive): def setup(app): + # type: (Sphinx) -> None directives.register_directive('highlight', Highlight) directives.register_directive('highlightlang', Highlight) # old directives.register_directive('code-block', CodeBlock) diff --git a/sphinx/directives/other.py b/sphinx/directives/other.py index e071b327e..15944668e 100644 --- a/sphinx/directives/other.py +++ b/sphinx/directives/other.py @@ -8,6 +8,7 @@ """ from six.moves import range + from docutils import nodes from docutils.parsers.rst import Directive, directives from docutils.parsers.rst.directives.admonitions import BaseAdmonition @@ -21,8 +22,14 @@ from sphinx.util.nodes import explicit_title_re, set_source_info, \ process_index_entry from sphinx.util.matching import patfilter +if False: + # For type annotation + from typing import Tuple # NOQA + from sphinx.application import Sphinx # NOQA + def int_or_nothing(argument): + # type: (unicode) -> int if not argument: return 999 return int(argument) @@ -50,6 +57,7 @@ class TocTree(Directive): } def run(self): + # type: () -> List[nodes.Node] env = self.state.document.settings.env suffixes = env.config.source_suffix glob = 'glob' in self.options @@ -57,7 +65,7 @@ class TocTree(Directive): ret = [] # (title, ref) pairs, where ref may be a document, or an external link, # and title may be None if the document's title is to be used - entries = [] + entries = [] # type: List[Tuple[unicode, unicode]] includefiles = [] all_docnames = env.found_docs.copy() # don't add the currently visited file in catch-all patterns @@ -136,9 +144,10 @@ class Author(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True - option_spec = {} + option_spec = {} # type: Dict def run(self): + # type: () -> List[nodes.Node] env = self.state.document.settings.env if not env.config.show_authors: return [] @@ -168,20 +177,21 @@ class Index(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True - option_spec = {} + option_spec = {} # type: Dict def run(self): + # type: () -> List[nodes.Node] arguments = self.arguments[0].split('\n') env = self.state.document.settings.env targetid = 'index-%s' % env.new_serialno('index') targetnode = nodes.target('', '', ids=[targetid]) self.state.document.note_explicit_target(targetnode) indexnode = addnodes.index() - indexnode['entries'] = ne = [] + indexnode['entries'] = [] indexnode['inline'] = False set_source_info(self, indexnode) for entry in arguments: - ne.extend(process_index_entry(entry, targetid)) + indexnode['entries'].extend(process_index_entry(entry, targetid)) return [indexnode, targetnode] @@ -193,9 +203,10 @@ class VersionChange(Directive): required_arguments = 1 optional_arguments = 1 final_argument_whitespace = True - option_spec = {} + option_spec = {} # type: Dict def run(self): + # type: () -> List[nodes.Node] node = addnodes.versionmodified() node.document = self.state.document set_source_info(self, node) @@ -248,9 +259,10 @@ class TabularColumns(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True - option_spec = {} + option_spec = {} # type: Dict def run(self): + # type: () -> List[nodes.Node] node = addnodes.tabular_col_spec() node['spec'] = self.arguments[0] set_source_info(self, node) @@ -265,9 +277,10 @@ class Centered(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True - option_spec = {} + option_spec = {} # type: Dict def run(self): + # type: () -> List[nodes.Node] if not self.arguments: return [] subnode = addnodes.centered() @@ -285,9 +298,10 @@ class Acks(Directive): required_arguments = 0 optional_arguments = 0 final_argument_whitespace = False - option_spec = {} + option_spec = {} # type: Dict def run(self): + # type: () -> List[nodes.Node] node = addnodes.acks() node.document = self.state.document self.state.nested_parse(self.content, self.content_offset, node) @@ -311,6 +325,7 @@ class HList(Directive): } def run(self): + # type: () -> List[nodes.Node] ncolumns = self.options.get('columns', 2) node = nodes.paragraph() node.document = self.state.document @@ -342,9 +357,10 @@ class Only(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True - option_spec = {} + option_spec = {} # type: Dict def run(self): + # type: () -> List[nodes.Node] node = addnodes.only() node.document = self.state.document set_source_info(self, node) @@ -398,6 +414,7 @@ class Include(BaseInclude): """ def run(self): + # type: () -> List[nodes.Node] env = self.state.document.settings.env if self.arguments[0].startswith('<') and \ self.arguments[0].endswith('>'): @@ -410,6 +427,7 @@ class Include(BaseInclude): def setup(app): + # type: (Sphinx) -> None directives.register_directive('toctree', TocTree) directives.register_directive('sectionauthor', Author) directives.register_directive('moduleauthor', Author) diff --git a/sphinx/highlighting.py b/sphinx/highlighting.py index 9594b5336..543e1485a 100644 --- a/sphinx/highlighting.py +++ b/sphinx/highlighting.py @@ -16,6 +16,7 @@ from sphinx.util.texescape import tex_hl_escape_map_new from sphinx.ext import doctest from pygments import highlight +from pygments.lexer import Lexer # NOQA from pygments.lexers import PythonLexer, Python3Lexer, PythonConsoleLexer, \ CLexer, TextLexer, RstLexer from pygments.lexers import get_lexer_by_name, guess_lexer @@ -33,7 +34,7 @@ lexers = dict( pycon3 = PythonConsoleLexer(python3=True, stripnl=False), rest = RstLexer(stripnl=False), c = CLexer(stripnl=False), -) +) # type: Dict[unicode, Lexer] for _lexer in lexers.values(): _lexer.add_filter('raiseonerror') diff --git a/sphinx/io.py b/sphinx/io.py index f1386c9a8..c6fea570e 100644 --- a/sphinx/io.py +++ b/sphinx/io.py @@ -12,6 +12,7 @@ from docutils.io import FileInput from docutils.readers import standalone from docutils.writers import UnfilteredWriter from six import string_types, text_type +from typing import Any, Union # NOQA from sphinx.transforms import ( ApplySourceWorkaround, ExtraTranslatableNodes, CitationReferences, @@ -24,23 +25,36 @@ from sphinx.transforms.i18n import ( ) from sphinx.util import import_object, split_docinfo +if False: + # For type annotation + from typing import Any, Union # NOQA + from docutils import nodes # NOQA + from docutils.io import Input # NOQA + from docutils.parsers import Parser # NOQA + from docutils.transforms import Transform # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.environment import BuildEnvironment # NOQA + class SphinxBaseReader(standalone.Reader): """ Add our source parsers """ def __init__(self, app, parsers={}, *args, **kwargs): + # type: (Sphinx, Dict[unicode, Parser], Any, Any) -> None standalone.Reader.__init__(self, *args, **kwargs) - self.parser_map = {} + self.parser_map = {} # type: Dict[unicode, Parser] for suffix, parser_class in parsers.items(): if isinstance(parser_class, string_types): - parser_class = import_object(parser_class, 'source parser') + parser_class = import_object(parser_class, 'source parser') # type: ignore parser = parser_class() if hasattr(parser, 'set_application'): parser.set_application(app) self.parser_map[suffix] = parser def read(self, source, parser, settings): + # type: (Input, Parser, Dict) -> nodes.document self.source = source for suffix in self.parser_map: @@ -56,6 +70,7 @@ class SphinxBaseReader(standalone.Reader): return self.document def get_transforms(self): + # type: () -> List[Transform] return standalone.Reader.get_transforms(self) + self.transforms @@ -84,13 +99,16 @@ class SphinxI18nReader(SphinxBaseReader): FilterSystemMessages, RefOnlyBulletListTransform] def __init__(self, *args, **kwargs): + # type: (Any, Any) -> None SphinxBaseReader.__init__(self, *args, **kwargs) - self.lineno = None + self.lineno = None # type: int def set_lineno_for_reporter(self, lineno): + # type: (int) -> None self.lineno = lineno def new_document(self): + # type: () -> nodes.document document = SphinxBaseReader.new_document(self) reporter = document.reporter @@ -105,28 +123,32 @@ class SphinxDummyWriter(UnfilteredWriter): supported = ('html',) # needed to keep "meta" nodes def translate(self): + # type: () -> None pass class SphinxFileInput(FileInput): def __init__(self, app, env, *args, **kwds): + # type: (Sphinx, BuildEnvironment, Any, Any) -> None self.app = app self.env = env kwds['error_handler'] = 'sphinx' # py3: handle error on open. FileInput.__init__(self, *args, **kwds) def decode(self, data): + # type: (Union[unicode, bytes]) -> unicode if isinstance(data, text_type): # py3: `data` already decoded. return data return data.decode(self.encoding, 'sphinx') # py2: decoding def read(self): + # type: () -> unicode def get_parser_type(source_path): for suffix in self.env.config.source_parsers: if source_path.endswith(suffix): parser_class = self.env.config.source_parsers[suffix] if isinstance(parser_class, string_types): - parser_class = import_object(parser_class, 'source parser') + parser_class = import_object(parser_class, 'source parser') # type: ignore # NOQA return parser_class.supported else: return ('restructuredtext',) diff --git a/sphinx/jinja2glue.py b/sphinx/jinja2glue.py index 6e2ef7186..c1bd04765 100644 --- a/sphinx/jinja2glue.py +++ b/sphinx/jinja2glue.py @@ -17,18 +17,28 @@ from jinja2 import FileSystemLoader, BaseLoader, TemplateNotFound, \ contextfunction from jinja2.utils import open_if_exists from jinja2.sandbox import SandboxedEnvironment +from typing import Any, Callable, Iterator, Tuple # NOQA from sphinx.application import TemplateBridge from sphinx.util.osutil import mtimes_of_files +if False: + # For type annotation + from typing import Any, Callable, Iterator, Tuple # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.environment import BuildEnvironment # NOQA + from sphinx.themes import Theme # NOQA + def _tobool(val): + # type: (unicode) -> bool if isinstance(val, string_types): - return val.lower() in ('true', '1', 'yes', 'on') + return val.lower() in ('true', '1', 'yes', 'on') # type: ignore return bool(val) def _toint(val): + # type: (unicode) -> int try: return int(val) except ValueError: @@ -36,6 +46,7 @@ def _toint(val): def _slice_index(values, slices): + # type: (List, int) -> Iterator[List] seq = list(values) length = 0 for value in values: @@ -57,6 +68,7 @@ def _slice_index(values, slices): def accesskey(context, key): + # type: (Any, unicode) -> unicode """Helper to output each access key only once.""" if '_accesskeys' not in context: context.vars['_accesskeys'] = {} @@ -68,12 +80,15 @@ def accesskey(context, key): class idgen(object): def __init__(self): + # type: () -> None self.id = 0 def current(self): + # type: () -> int return self.id def __next__(self): + # type: () -> int self.id += 1 return self.id next = __next__ # Python 2/Jinja compatibility @@ -86,6 +101,7 @@ class SphinxFileSystemLoader(FileSystemLoader): """ def get_source(self, environment, template): + # type: (BuildEnvironment, unicode) -> Tuple[unicode, unicode, Callable] for searchpath in self.searchpath: filename = path.join(searchpath, template) f = open_if_exists(filename) @@ -113,6 +129,7 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader): # TemplateBridge interface def init(self, builder, theme=None, dirs=None): + # type: (Builder, Theme, List[unicode]) -> None # create a chain of paths to search if theme: # the theme's own dir and its bases' dirs @@ -155,17 +172,21 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader): builder.app.translator) def render(self, template, context): + # type: (unicode, Dict) -> None return self.environment.get_template(template).render(context) def render_string(self, source, context): + # type: (unicode, Dict) -> unicode return self.environment.from_string(source).render(context) def newest_template_mtime(self): + # type: () -> float return max(mtimes_of_files(self.pathchain, '.html')) # Loader interface def get_source(self, environment, template): + # type: (BuildEnvironment, unicode) -> Tuple[unicode, unicode, Callable] loaders = self.loaders # exclamation mark starts search from theme if template.startswith('!'): diff --git a/sphinx/locale/__init__.py b/sphinx/locale/__init__.py index d6ce7329b..44ad64304 100644 --- a/sphinx/locale/__init__.py +++ b/sphinx/locale/__init__.py @@ -14,6 +14,10 @@ import gettext from six import PY3, text_type from six.moves import UserString +if False: + # For type annotation + from typing import Any, Tuple # NOQA + class _TranslationProxy(UserString, object): """ @@ -140,6 +144,7 @@ class _TranslationProxy(UserString, object): def mygettext(string): + # type: (unicode) -> unicode """Used instead of _ when creating TranslationProxies, because _ is not bound yet at that time. """ @@ -147,10 +152,11 @@ def mygettext(string): def lazy_gettext(string): + # type: (unicode) -> unicode """A lazy version of `gettext`.""" # if isinstance(string, _TranslationProxy): # return string - return _TranslationProxy(mygettext, string) + return _TranslationProxy(mygettext, string) # type: ignore l_ = lazy_gettext @@ -184,19 +190,22 @@ pairindextypes = { 'exception': l_('exception'), 'statement': l_('statement'), 'builtin': l_('built-in function'), -} +} # Dict[unicode, _TranslationProxy] -translators = {} +translators = {} # type: Dict[unicode, Any] if PY3: def _(message): + # type: (unicode) -> unicode return translators['sphinx'].gettext(message) else: def _(message): + # type: (unicode) -> unicode return translators['sphinx'].ugettext(message) def init(locale_dirs, language, catalog='sphinx'): + # type: (List, unicode, unicode) -> Tuple[Any, bool] """Look for message catalogs in `locale_dirs` and *ensure* that there is at least a NullTranslations catalog set in `translators`. If called multiple times or if several ``.mo`` files are found, their contents are merged @@ -213,12 +222,12 @@ def init(locale_dirs, language, catalog='sphinx'): # loading for dir_ in locale_dirs: try: - trans = gettext.translation(catalog, localedir=dir_, - languages=[language]) + trans = gettext.translation(catalog, localedir=dir_, # type: ignore + languages=[language]) # type: ignore if translator is None: translator = trans else: - translator._catalog.update(trans._catalog) + translator._catalog.update(trans._catalog) # type: ignore except Exception: # Language couldn't be found in the specified path pass diff --git a/sphinx/make_mode.py b/sphinx/make_mode.py index 28316458e..87333301c 100644 --- a/sphinx/make_mode.py +++ b/sphinx/make_mode.py @@ -22,7 +22,7 @@ from os import path from subprocess import call import sphinx -from sphinx.util.console import bold, blue +from sphinx.util.console import bold, blue # type: ignore from sphinx.util.osutil import cd, rmtree proj_name = os.getenv('SPHINXPROJ', '<project>') @@ -59,14 +59,17 @@ BUILDERS = [ class Make(object): def __init__(self, srcdir, builddir, opts): + # type: (unicode, unicode, List[unicode]) -> None self.srcdir = srcdir self.builddir = builddir self.opts = opts def builddir_join(self, *comps): + # type: (unicode) -> unicode return path.join(self.builddir, *comps) def build_clean(self): + # type: () -> int if not path.exists(self.builddir): return elif not path.isdir(self.builddir): @@ -77,19 +80,22 @@ class Make(object): rmtree(self.builddir_join(item)) def build_help(self): + # type: () -> None print(bold("Sphinx v%s" % sphinx.__display_version__)) - print("Please use `make %s' where %s is one of" % ((blue('target'),)*2)) + print("Please use `make %s' where %s is one of" % ((blue('target'),)*2)) # type: ignore # NOQA for osname, bname, description in BUILDERS: if not osname or os.name == osname: print(' %s %s' % (blue(bname.ljust(10)), description)) def build_html(self): + # type: () -> int if self.run_generic_build('html') > 0: return 1 print() print('Build finished. The HTML pages are in %s.' % self.builddir_join('html')) def build_dirhtml(self): + # type: () -> int if self.run_generic_build('dirhtml') > 0: return 1 print() @@ -97,6 +103,7 @@ class Make(object): self.builddir_join('dirhtml')) def build_singlehtml(self): + # type: () -> int if self.run_generic_build('singlehtml') > 0: return 1 print() @@ -104,18 +111,21 @@ class Make(object): self.builddir_join('singlehtml')) def build_pickle(self): + # type: () -> int if self.run_generic_build('pickle') > 0: return 1 print() print('Build finished; now you can process the pickle files.') def build_json(self): + # type: () -> int if self.run_generic_build('json') > 0: return 1 print() print('Build finished; now you can process the JSON files.') def build_htmlhelp(self): + # type: () -> int if self.run_generic_build('htmlhelp') > 0: return 1 print() @@ -123,6 +133,7 @@ class Make(object): '.hhp project file in %s.' % self.builddir_join('htmlhelp')) def build_qthelp(self): + # type: () -> int if self.run_generic_build('qthelp') > 0: return 1 print() @@ -134,6 +145,7 @@ class Make(object): self.builddir_join('qthelp', proj_name)) def build_devhelp(self): + # type: () -> int if self.run_generic_build('devhelp') > 0: return 1 print() @@ -145,12 +157,14 @@ class Make(object): print("$ devhelp") def build_epub(self): + # type: () -> int if self.run_generic_build('epub') > 0: return 1 print() print('Build finished. The ePub file is in %s.' % self.builddir_join('epub')) def build_latex(self): + # type: () -> int if self.run_generic_build('latex') > 0: return 1 print("Build finished; the LaTeX files are in %s." % self.builddir_join('latex')) @@ -159,24 +173,28 @@ class Make(object): print("(use `make latexpdf' here to do that automatically).") def build_latexpdf(self): + # type: () -> int if self.run_generic_build('latex') > 0: return 1 with cd(self.builddir_join('latex')): os.system('make all-pdf') def build_latexpdfja(self): + # type: () -> int if self.run_generic_build('latex') > 0: return 1 with cd(self.builddir_join('latex')): os.system('make all-pdf-ja') def build_text(self): + # type: () -> int if self.run_generic_build('text') > 0: return 1 print() print('Build finished. The text files are in %s.' % self.builddir_join('text')) def build_texinfo(self): + # type: () -> int if self.run_generic_build('texinfo') > 0: return 1 print("Build finished; the Texinfo files are in %s." % @@ -186,12 +204,14 @@ class Make(object): print("(use `make info' here to do that automatically).") def build_info(self): + # type: () -> int if self.run_generic_build('texinfo') > 0: return 1 with cd(self.builddir_join('texinfo')): os.system('make info') def build_gettext(self): + # type: () -> int dtdir = self.builddir_join('gettext', '.doctrees') if self.run_generic_build('gettext', doctreedir=dtdir) > 0: return 1 @@ -200,6 +220,7 @@ class Make(object): self.builddir_join('gettext')) def build_changes(self): + # type: () -> int if self.run_generic_build('changes') > 0: return 1 print() @@ -207,6 +228,7 @@ class Make(object): self.builddir_join('changes')) def build_linkcheck(self): + # type: () -> int res = self.run_generic_build('linkcheck') print() print('Link check complete; look for any errors in the above output ' @@ -214,12 +236,14 @@ class Make(object): return res def build_doctest(self): + # type: () -> int res = self.run_generic_build('doctest') print("Testing of doctests in the sources finished, look at the " "results in %s." % self.builddir_join('doctest', 'output.txt')) return res def build_coverage(self): + # type: () -> int if self.run_generic_build('coverage') > 0: print("Has the coverage extension been enabled?") return 1 @@ -228,12 +252,14 @@ class Make(object): "results in %s." % self.builddir_join('coverage')) def build_xml(self): + # type: () -> int if self.run_generic_build('xml') > 0: return 1 print() print('Build finished. The XML files are in %s.' % self.builddir_join('xml')) def build_pseudoxml(self): + # type: () -> int if self.run_generic_build('pseudoxml') > 0: return 1 print() @@ -241,6 +267,7 @@ class Make(object): self.builddir_join('pseudoxml')) def run_generic_build(self, builder, doctreedir=None): + # type: (unicode, unicode) -> int # compatibility with old Makefile papersize = os.getenv('PAPER', '') opts = self.opts @@ -261,11 +288,12 @@ class Make(object): # linux, mac: 'sphinx-build' or 'sphinx-build.py' cmd = [sys.executable, orig_cmd] - return call(cmd + ['-b', builder] + opts + - ['-d', doctreedir, self.srcdir, self.builddir_join(builder)]) + return call(cmd + ['-b', builder] + opts + # type: ignore + ['-d', doctreedir, self.srcdir, self.builddir_join(builder)]) # type: ignore # NOQA def run_make_mode(args): + # type: (List[unicode]) -> int if len(args) < 3: print('Error: at least 3 arguments (builder, source ' 'dir, build dir) are required.', file=sys.stderr) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index baf5c0068..2c898560b 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -24,6 +24,10 @@ from sphinx.util import get_module_source, detect_encoding from sphinx.util.pycompat import TextIOWrapper from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc +if False: + # For type annotation + from typing import Any, Tuple # NOQA + # load the Python grammar _grammarfile = path.join(package_dir, 'pycode', @@ -63,10 +67,10 @@ class AttrDocVisitor(nodes.NodeVisitor): self.scope = scope self.in_init = 0 self.encoding = encoding - self.namespace = [] - self.collected = {} + self.namespace = [] # type: List[unicode] + self.collected = {} # type: Dict[Tuple[unicode, unicode], unicode] self.tagnumber = 0 - self.tagorder = {} + self.tagorder = {} # type: Dict[unicode, int] def add_tag(self, name): name = '.'.join(self.namespace + [name]) @@ -102,10 +106,10 @@ class AttrDocVisitor(nodes.NodeVisitor): parent = node.parent idx = parent.children.index(node) + 1 while idx < len(parent): - if parent[idx].type == sym.SEMI: + if parent[idx].type == sym.SEMI: # type: ignore idx += 1 continue # skip over semicolon - if parent[idx].type == sym.NEWLINE: + if parent[idx].type == sym.NEWLINE: # type: ignore prefix = parent[idx].get_prefix() if not isinstance(prefix, text_type): prefix = prefix.decode(self.encoding) @@ -138,8 +142,8 @@ class AttrDocVisitor(nodes.NodeVisitor): prev = node.get_prev_sibling() if not prev: return - if prev.type == sym.simple_stmt and \ - prev[0].type == sym.expr_stmt and _eq in prev[0].children: + if (prev.type == sym.simple_stmt and # type: ignore + prev[0].type == sym.expr_stmt and _eq in prev[0].children): # type: ignore # need to "eval" the string because it's returned in its # original form docstring = literals.evalString(node[0].value, self.encoding) @@ -178,7 +182,7 @@ class AttrDocVisitor(nodes.NodeVisitor): class ModuleAnalyzer(object): # cache for analyzer objects -- caches both by module and file name - cache = {} + cache = {} # type: Dict[Tuple[unicode, unicode], Any] @classmethod def for_string(cls, string, modname, srcname='<string>'): @@ -240,14 +244,14 @@ class ModuleAnalyzer(object): self.source.seek(pos) # will be filled by tokenize() - self.tokens = None + self.tokens = None # type: List[unicode] # will be filled by parse() - self.parsetree = None + self.parsetree = None # type: Any # will be filled by find_attr_docs() - self.attr_docs = None - self.tagorder = None + self.attr_docs = None # type: List[unicode] + self.tagorder = None # type: Dict[unicode, int] # will be filled by find_tags() - self.tags = None + self.tags = None # type: List[unicode] def tokenize(self): """Generate tokens from the source.""" @@ -289,8 +293,8 @@ class ModuleAnalyzer(object): return self.tags self.tokenize() result = {} - namespace = [] - stack = [] + namespace = [] # type: List[unicode] + stack = [] # type: List[Tuple[unicode, unicode, unicode, int]] indent = 0 defline = False expect_indent = False @@ -301,7 +305,7 @@ class ModuleAnalyzer(object): if tokentup[0] not in ignore: yield tokentup tokeniter = tokeniter() - for type, tok, spos, epos, line in tokeniter: + for type, tok, spos, epos, line in tokeniter: # type: ignore if expect_indent and type != token.NL: if type != token.INDENT: # no suite -- one-line definition @@ -312,7 +316,7 @@ class ModuleAnalyzer(object): result[fullname] = (dtype, startline, endline - emptylines) expect_indent = False if tok in ('def', 'class'): - name = next(tokeniter)[1] + name = next(tokeniter)[1] # type: ignore namespace.append(name) fullname = '.'.join(namespace) stack.append((tok, fullname, spos[0], indent)) diff --git a/sphinx/pycode/nodes.py b/sphinx/pycode/nodes.py index ee40f3c0d..b6b3355c0 100644 --- a/sphinx/pycode/nodes.py +++ b/sphinx/pycode/nodes.py @@ -14,7 +14,7 @@ class BaseNode(object): """ Node superclass for both terminal and nonterminal nodes. """ - parent = None + parent = None # type: BaseNode def _eq(self, other): raise NotImplementedError @@ -29,7 +29,7 @@ class BaseNode(object): return NotImplemented return not self._eq(other) - __hash__ = None + __hash__ = None # type: str def get_prev_sibling(self): """Return previous child in parent's children, or None.""" @@ -204,5 +204,5 @@ class NodeVisitor(object): def generic_visit(self, node): """Called if no explicit visitor function exists for a node.""" if isinstance(node, Node): - for child in node: + for child in node: # type: ignore self.visit(child) diff --git a/sphinx/pycode/pgen2/grammar.py b/sphinx/pycode/pgen2/grammar.py index 42e6d72ee..cd6a435d5 100644 --- a/sphinx/pycode/pgen2/grammar.py +++ b/sphinx/pycode/pgen2/grammar.py @@ -19,6 +19,10 @@ import pickle # Local imports from sphinx.pycode.pgen2 import token +if False: + # For type annotation + from typing import Tuple # NOQA + class Grammar(object): """Pgen parsing tables tables conversion class. @@ -75,14 +79,14 @@ class Grammar(object): """ def __init__(self): - self.symbol2number = {} - self.number2symbol = {} - self.states = [] - self.dfas = {} + self.symbol2number = {} # type: Dict[unicode, int] + self.number2symbol = {} # type: Dict[int, unicode] + self.states = [] # type: List[List[List[Tuple[int, int]]]] + self.dfas = {} # type: Dict[int, Tuple[List[List[Tuple[int, int]]], unicode]] self.labels = [(0, "EMPTY")] - self.keywords = {} - self.tokens = {} - self.symbol2label = {} + self.keywords = {} # type: Dict[unicode, unicode] + self.tokens = {} # type: Dict[unicode, unicode] + self.symbol2label = {} # type: Dict[unicode, unicode] self.start = 256 def dump(self, filename): diff --git a/sphinx/pycode/pgen2/parse.py b/sphinx/pycode/pgen2/parse.py index 60eec05ea..43b88b519 100644 --- a/sphinx/pycode/pgen2/parse.py +++ b/sphinx/pycode/pgen2/parse.py @@ -13,6 +13,10 @@ how this parsing engine works. # Local imports from sphinx.pycode.pgen2 import token +if False: + # For type annotation + from typing import Any, Tuple # NOQA + class ParseError(Exception): """Exception to signal the parser is stuck.""" @@ -104,11 +108,12 @@ class Parser(object): # Each stack entry is a tuple: (dfa, state, node). # A node is a tuple: (type, value, context, children), # where children is a list of nodes or None, and context may be None. - newnode = (start, None, None, []) + newnode = (start, None, None, []) # type: Tuple[unicode, unicode, unicode, List] stackentry = (self.grammar.dfas[start], 0, newnode) self.stack = [stackentry] - self.rootnode = None - self.used_names = set() # Aliased to self.rootnode.used_names in pop() + self.rootnode = None # type: Any + self.used_names = set() # type: Set[unicode] + # Aliased to self.rootnode.used_names in pop() def addtoken(self, type, value, context): """Add a token; return True iff this is the end of the program.""" @@ -175,7 +180,7 @@ class Parser(object): def shift(self, type, value, newstate, context): """Shift a token. (Internal)""" dfa, state, node = self.stack[-1] - newnode = (type, value, context, None) + newnode = (type, value, context, None) # type: Tuple[unicode, unicode, unicode, List] newnode = self.convert(self.grammar, newnode) if newnode is not None: node[-1].append(newnode) @@ -184,7 +189,7 @@ class Parser(object): def push(self, type, newdfa, newstate, context): """Push a nonterminal. (Internal)""" dfa, state, node = self.stack[-1] - newnode = (type, None, context, []) + newnode = (type, None, context, []) # type: Tuple[unicode, unicode, unicode, List] self.stack[-1] = (dfa, newstate, node) self.stack.append((newdfa, 0, newnode)) diff --git a/sphinx/pycode/pgen2/pgen.py b/sphinx/pycode/pgen2/pgen.py index 7598e6abc..3fe91e57e 100644 --- a/sphinx/pycode/pgen2/pgen.py +++ b/sphinx/pycode/pgen2/pgen.py @@ -7,9 +7,13 @@ from six import iteritems from collections import OrderedDict # Pgen imports - from sphinx.pycode.pgen2 import grammar, token, tokenize +if False: + # For type annotation + from typing import Any, Tuple # NOQA + + class PgenGrammar(grammar.Grammar): pass @@ -27,7 +31,8 @@ class ParserGenerator(object): self.dfas, self.startsymbol = self.parse() if close_stream is not None: close_stream() - self.first = {} # map from symbol name to set of tokens + self.first = {} # type: Dict[unicode, List[unicode]] + # map from symbol name to set of tokens self.addfirstsets() def make_grammar(self): @@ -42,7 +47,7 @@ class ParserGenerator(object): c.number2symbol[i] = name for name in names: dfa = self.dfas[name] - states = [] + states = [] # type: List[List[Tuple[int, int]]] for state in dfa: arcs = [] for label, next in iteritems(state.arcs): @@ -122,7 +127,7 @@ class ParserGenerator(object): dfa = self.dfas[name] self.first[name] = None # dummy to detect left recursion state = dfa[0] - totalset = {} + totalset = {} # type: Dict[unicode, int] overlapcheck = {} for label, next in iteritems(state.arcs): if label in self.dfas: @@ -138,7 +143,7 @@ class ParserGenerator(object): else: totalset[label] = 1 overlapcheck[label] = {label: 1} - inverse = {} + inverse = {} # type: Dict[unicode, unicode] for label, itsfirst in sorted(overlapcheck.items()): for symbol in sorted(itsfirst): if symbol in inverse: @@ -180,7 +185,7 @@ class ParserGenerator(object): assert isinstance(start, NFAState) assert isinstance(finish, NFAState) def closure(state): - base = {} + base = {} # type: Dict addclosure(state, base) return base def addclosure(state, base): @@ -188,12 +193,12 @@ class ParserGenerator(object): if state in base: return base[state] = 1 - for label, next in state.arcs: + for label, next in state.arcs: # type: ignore if label is None: addclosure(next, base) states = [DFAState(closure(start), finish)] for state in states: # NB states grows while we're iterating - arcs = {} + arcs = {} # type: Dict[unicode, Dict] for nfastate in state.nfaset: for label, next in nfastate.arcs: if label is not None: @@ -343,7 +348,8 @@ class ParserGenerator(object): class NFAState(object): def __init__(self): - self.arcs = [] # list of (label, NFAState) pairs + self.arcs = [] # type: List[Tuple[unicode, Any]] + # list of (label, NFAState) pairs def addarc(self, next, label=None): assert label is None or isinstance(label, str) @@ -361,7 +367,8 @@ class DFAState(object): assert isinstance(final, NFAState) self.nfaset = nfaset self.isfinal = final in nfaset - self.arcs = OrderedDict() # map from label to DFAState + self.arcs = OrderedDict() # type: OrderedDict + # map from label to DFAState def __hash__(self): return hash(tuple(self.arcs)) diff --git a/sphinx/pycode/pgen2/tokenize.py b/sphinx/pycode/pgen2/tokenize.py index c7013bf91..a096795f8 100644 --- a/sphinx/pycode/pgen2/tokenize.py +++ b/sphinx/pycode/pgen2/tokenize.py @@ -183,7 +183,7 @@ def tokenize_loop(readline, tokeneater): class Untokenizer: def __init__(self): - self.tokens = [] + self.tokens = [] # type: List[unicode] self.prev_row = 1 self.prev_col = 0 @@ -294,17 +294,17 @@ def generate_tokens(readline): if contstr: # continued string if not line: - raise TokenError("EOF in multi-line string", strstart) - endmatch = endprog.match(line) + raise TokenError("EOF in multi-line string", strstart) # type: ignore + endmatch = endprog.match(line) # type: ignore if endmatch: pos = end = endmatch.end(0) - yield (STRING, contstr + line[:end], - strstart, (lnum, end), contline + line) + yield (STRING, contstr + line[:end], # type: ignore + strstart, (lnum, end), contline + line) # type: ignore contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': - yield (ERRORTOKEN, contstr + line, - strstart, (lnum, len(line)), contline) + yield (ERRORTOKEN, contstr + line, # type: ignore + strstart, (lnum, len(line)), contline) # type: ignore contstr = '' contline = None continue @@ -333,7 +333,7 @@ def generate_tokens(readline): yield (NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) else: - yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], + yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], # type: ignore (lnum, pos), (lnum, len(line)), line) continue diff --git a/sphinx/quickstart.py b/sphinx/quickstart.py index 3c7ab3d97..eb5349ede 100644 --- a/sphinx/quickstart.py +++ b/sphinx/quickstart.py @@ -36,8 +36,9 @@ from docutils.utils import column_width from sphinx import __display_version__, package_dir from sphinx.util.osutil import make_filename -from sphinx.util.console import purple, bold, red, turquoise, \ - nocolor, color_terminal +from sphinx.util.console import ( # type: ignore + purple, bold, red, turquoise, nocolor, color_terminal +) from sphinx.util.template import SphinxRenderer from sphinx.util import texescape diff --git a/sphinx/roles.py b/sphinx/roles.py index 6e8de3b4a..01e34fa71 100644 --- a/sphinx/roles.py +++ b/sphinx/roles.py @@ -175,7 +175,7 @@ def indexmarkup_role(typ, rawtext, text, lineno, inliner, typ = env.config.default_role else: typ = typ.lower() - has_explicit_title, title, target = split_explicit_title(text) + has_explicit_title, title, target = split_explicit_title(text) # type: bool, unicode, unicode # NOQA title = utils.unescape(title) target = utils.unescape(target) targetid = 'index-%s' % env.new_serialno('index') @@ -186,7 +186,7 @@ def indexmarkup_role(typ, rawtext, text, lineno, inliner, indexnode['entries'] = [ ('single', _('Python Enhancement Proposals; PEP %s') % target, targetid, '', None)] - anchor = '' + anchor = '' # type: unicode anchorindex = target.find('#') if anchorindex > 0: target, anchor = target[:anchorindex], target[anchorindex:] diff --git a/sphinx/search/__init__.py b/sphinx/search/__init__.py index d3c6c0eba..959e335c3 100644 --- a/sphinx/search/__init__.py +++ b/sphinx/search/__init__.py @@ -9,16 +9,23 @@ :license: BSD, see LICENSE for details. """ import re +from os import path from six import iteritems, itervalues, text_type, string_types from six.moves import cPickle as pickle + from docutils.nodes import raw, comment, title, Text, NodeVisitor, SkipNode -from os import path import sphinx from sphinx.util import jsdump, rpartition from sphinx.util.pycompat import htmlescape +if False: + # For type annotation + from typing import Any, IO, Iterable, Tuple, Type # NOQA + from docutils import nodes # NOQA + from sphinx.environment import BuildEnvironment # NOQA + class SearchLanguage(object): """ @@ -42,10 +49,10 @@ class SearchLanguage(object): This class is used to preprocess search word which Sphinx HTML readers type, before searching index. Default implementation does nothing. """ - lang = None - language_name = None - stopwords = set() - js_stemmer_rawcode = None + lang = None # type: unicode + language_name = None # type: unicode + stopwords = set() # type: Set[unicode] + js_stemmer_rawcode = None # type: unicode js_stemmer_code = """ /** * Dummy stemmer for languages without stemming rules. @@ -60,23 +67,27 @@ var Stemmer = function() { _word_re = re.compile(r'\w+(?u)') def __init__(self, options): + # type: (Dict) -> None self.options = options self.init(options) def init(self, options): + # type: (Dict) -> None """ Initialize the class with the options the user has given. """ def split(self, input): + # type: (unicode) -> List[unicode] """ This method splits a sentence into words. Default splitter splits input at white spaces, which should be enough for most languages except CJK languages. """ - return self._word_re.findall(input) + return self._word_re.findall(input) # type: ignore def stem(self, word): + # type: (unicode) -> unicode """ This method implements stemming algorithm of the Python version. @@ -90,6 +101,7 @@ var Stemmer = function() { return word def word_filter(self, word): + # type: (unicode) -> bool """ Return true if the target word should be registered in the search index. This method is called after stemming. @@ -107,6 +119,7 @@ from sphinx.search.en import SearchEnglish def parse_stop_word(source): + # type: (unicode) -> Set[unicode] """ parse snowball style word list like this: @@ -138,7 +151,7 @@ languages = { 'sv': 'sphinx.search.sv.SearchSwedish', 'tr': 'sphinx.search.tr.SearchTurkish', 'zh': 'sphinx.search.zh.SearchChinese', -} +} # type: Dict[unicode, Any] class _JavaScriptIndex(object): @@ -151,9 +164,11 @@ class _JavaScriptIndex(object): SUFFIX = ')' def dumps(self, data): + # type: (Any) -> unicode return self.PREFIX + jsdump.dumps(data) + self.SUFFIX def loads(self, s): + # type: (str) -> Any data = s[len(self.PREFIX):-len(self.SUFFIX)] if not data or not s.startswith(self.PREFIX) or not \ s.endswith(self.SUFFIX): @@ -161,9 +176,11 @@ class _JavaScriptIndex(object): return jsdump.loads(data) def dump(self, data, f): + # type: (Any, IO) -> None f.write(self.dumps(data)) def load(self, f): + # type: (IO) -> Any return self.loads(f.read()) @@ -176,12 +193,14 @@ class WordCollector(NodeVisitor): """ def __init__(self, document, lang): + # type: (nodes.Node, SearchLanguage) -> None NodeVisitor.__init__(self, document) - self.found_words = [] - self.found_title_words = [] + self.found_words = [] # type: List[unicode] + self.found_title_words = [] # type: List[unicode] self.lang = lang def is_meta_keywords(self, node, nodetype): + # type: (nodes.Node, Type) -> bool if isinstance(node, sphinx.addnodes.meta) and node.get('name') == 'keywords': meta_lang = node.get('lang') if meta_lang is None: # lang not specified @@ -192,6 +211,7 @@ class WordCollector(NodeVisitor): return False def dispatch_visit(self, node): + # type: (nodes.Node) -> None nodetype = type(node) if issubclass(nodetype, comment): raise SkipNode @@ -223,28 +243,29 @@ class IndexBuilder(object): formats = { 'jsdump': jsdump, 'pickle': pickle - } + } # type: Dict[unicode, Any] def __init__(self, env, lang, options, scoring): + # type: (BuildEnvironment, unicode, Dict, unicode) -> None self.env = env - # docname -> title - self._titles = {} - # docname -> filename - self._filenames = {} - # stemmed word -> set(docname) - self._mapping = {} - # stemmed words in titles -> set(docname) - self._title_mapping = {} - # word -> stemmed word - self._stem_cache = {} - # objtype -> index - self._objtypes = {} - # objtype index -> (domain, type, objname (localized)) - self._objnames = {} - # add language-specific SearchLanguage instance - lang_class = languages.get(lang) + self._titles = {} # type: Dict[unicode, unicode] + # docname -> title + self._filenames = {} # type: Dict[unicode, unicode] + # docname -> filename + self._mapping = {} # type: Dict[unicode, Set[unicode]] + # stemmed word -> set(docname) + self._title_mapping = {} # type: Dict[unicode, Set[unicode]] + # stemmed words in titles -> set(docname) + self._stem_cache = {} # type: Dict[unicode, unicode] + # word -> stemmed word + self._objtypes = {} # type: Dict[Tuple[unicode, unicode], int] + # objtype -> index + self._objnames = {} # type: Dict[int, Tuple[unicode, unicode, unicode]] + # objtype index -> (domain, type, objname (localized)) + lang_class = languages.get(lang) # type: Type[SearchLanguage] + # add language-specific SearchLanguage instance if lang_class is None: - self.lang = SearchEnglish(options) + self.lang = SearchEnglish(options) # type: SearchLanguage elif isinstance(lang_class, str): module, classname = lang_class.rsplit('.', 1) lang_class = getattr(__import__(module, None, None, [classname]), @@ -261,6 +282,7 @@ class IndexBuilder(object): self.js_scorer_code = u'' def load(self, stream, format): + # type: (IO, Any) -> None """Reconstruct from frozen data.""" if isinstance(format, string_types): format = self.formats[format] @@ -273,6 +295,7 @@ class IndexBuilder(object): self._titles = dict(zip(index2fn, frozen['titles'])) def load_terms(mapping): + # type: (Dict[unicode, Any]) -> Dict[unicode, Set[unicode]] rv = {} for k, v in iteritems(mapping): if isinstance(v, int): @@ -286,13 +309,15 @@ class IndexBuilder(object): # no need to load keywords/objtypes def dump(self, stream, format): + # type: (IO, Any) -> None """Dump the frozen index to a stream.""" if isinstance(format, string_types): format = self.formats[format] - format.dump(self.freeze(), stream) + format.dump(self.freeze(), stream) # type: ignore def get_objects(self, fn2index): - rv = {} + # type: (Dict[unicode, int]) -> Dict[unicode, Dict[unicode, Tuple[int, int, int, unicode]]] # NOQA + rv = {} # type: Dict[unicode, Dict[unicode, Tuple[int, int, int, unicode]]] otypes = self._objtypes onames = self._objnames for domainname, domain in sorted(iteritems(self.env.domains)): @@ -319,7 +344,7 @@ class IndexBuilder(object): else: onames[typeindex] = (domainname, type, type) if anchor == fullname: - shortanchor = '' + shortanchor = '' # type: unicode elif anchor == type + '-' + fullname: shortanchor = '-' else: @@ -328,7 +353,8 @@ class IndexBuilder(object): return rv def get_terms(self, fn2index): - rvs = {}, {} + # type: (Dict) -> Tuple[Dict[unicode, List[unicode]], Dict[unicode, List[unicode]]] + rvs = {}, {} # type: Tuple[Dict[unicode, List[unicode]], Dict[unicode, List[unicode]]] for rv, mapping in zip(rvs, (self._mapping, self._title_mapping)): for k, v in iteritems(mapping): if len(v) == 1: @@ -340,6 +366,7 @@ class IndexBuilder(object): return rvs def freeze(self): + # type: () -> Dict[unicode, Any] """Create a usable data structure for serializing.""" docnames, titles = zip(*sorted(self._titles.items())) filenames = [self._filenames.get(docname) for docname in docnames] @@ -355,9 +382,11 @@ class IndexBuilder(object): titleterms=title_terms, envversion=self.env.version) def label(self): + # type: () -> unicode return "%s (code: %s)" % (self.lang.language_name, self.lang.lang) def prune(self, filenames): + # type: (Iterable[unicode]) -> None """Remove data for all filenames not in the list.""" new_titles = {} for filename in filenames: @@ -370,6 +399,7 @@ class IndexBuilder(object): wordnames.intersection_update(filenames) def feed(self, docname, filename, title, doctree): + # type: (unicode, unicode, unicode, nodes.Node) -> None """Feed a doctree to the index.""" self._titles[docname] = title self._filenames[docname] = filename @@ -379,6 +409,7 @@ class IndexBuilder(object): # memoize self.lang.stem def stem(word): + # type: (unicode) -> unicode try: return self._stem_cache[word] except KeyError: @@ -403,6 +434,7 @@ class IndexBuilder(object): self._mapping.setdefault(stemmed_word, set()).add(docname) def context_for_searchtool(self): + # type: () -> Dict[unicode, Any] return dict( search_language_stemming_code = self.lang.js_stemmer_code, search_language_stop_words = jsdump.dumps(sorted(self.lang.stopwords)), @@ -410,6 +442,7 @@ class IndexBuilder(object): ) def get_js_stemmer_rawcode(self): + # type: () -> unicode if self.lang.js_stemmer_rawcode: return path.join( path.dirname(path.abspath(__file__)), diff --git a/sphinx/search/en.py b/sphinx/search/en.py index d5259bed7..22d4e5acb 100644 --- a/sphinx/search/en.py +++ b/sphinx/search/en.py @@ -224,12 +224,15 @@ class SearchEnglish(SearchLanguage): stopwords = english_stopwords def init(self, options): + # type: (Dict) -> None if PYSTEMMER: class Stemmer(object): def __init__(self): + # type: () -> None self.stemmer = PyStemmer('porter') def stem(self, word): + # type: (unicode) -> unicode return self.stemmer.stemWord(word) else: class Stemmer(PorterStemmer): @@ -237,9 +240,11 @@ class SearchEnglish(SearchLanguage): make at least the stem method nicer. """ def stem(self, word): + # type: (unicode) -> unicode return PorterStemmer.stem(self, word, 0, len(word) - 1) self.stemmer = Stemmer() def stem(self, word): + # type: (unicode) -> unicode return self.stemmer.stem(word.lower()) diff --git a/sphinx/search/ja.py b/sphinx/search/ja.py index 0d4d01b9c..cf3b67c00 100644 --- a/sphinx/search/ja.py +++ b/sphinx/search/ja.py @@ -43,9 +43,11 @@ from sphinx.util import import_object class BaseSplitter(object): def __init__(self, options): + # type: (Dict) -> None self.options = options def split(self, input): + # type: (unicode) -> List[unicode] """ :param str input: @@ -57,9 +59,10 @@ class BaseSplitter(object): class MecabSplitter(BaseSplitter): def __init__(self, options): + # type: (Dict) -> None super(MecabSplitter, self).__init__(options) - self.ctypes_libmecab = None - self.ctypes_mecab = None + self.ctypes_libmecab = None # type: ignore + self.ctypes_mecab = None # type: ignore if not native_module: self.init_ctypes(options) else: @@ -67,6 +70,7 @@ class MecabSplitter(BaseSplitter): self.dict_encode = options.get('dic_enc', 'utf-8') def split(self, input): + # type: (unicode) -> List[unicode] input2 = input if PY3 else input.encode(self.dict_encode) if native_module: result = self.native.parse(input2) @@ -79,6 +83,7 @@ class MecabSplitter(BaseSplitter): return result.decode(self.dict_encode).split(' ') def init_native(self, options): + # type: (Dict) -> None param = '-Owakati' dict = options.get('dict') if dict: @@ -86,6 +91,7 @@ class MecabSplitter(BaseSplitter): self.native = MeCab.Tagger(param) def init_ctypes(self, options): + # type: (Dict) -> None import ctypes.util lib = options.get('lib') @@ -122,6 +128,7 @@ class MecabSplitter(BaseSplitter): raise SphinxError('mecab initialization failed') def __del__(self): + # type: () -> None if self.ctypes_libmecab: self.ctypes_libmecab.mecab_destroy(self.ctypes_mecab) @@ -130,17 +137,20 @@ MeCabBinder = MecabSplitter # keep backward compatibility until Sphinx-1.6 class JanomeSplitter(BaseSplitter): def __init__(self, options): + # type: (Dict) -> None super(JanomeSplitter, self).__init__(options) self.user_dict = options.get('user_dic') self.user_dict_enc = options.get('user_dic_enc', 'utf8') self.init_tokenizer() def init_tokenizer(self): + # type: () -> None if not janome_module: raise RuntimeError('Janome is not available') self.tokenizer = janome.tokenizer.Tokenizer(udic=self.user_dict, udic_enc=self.user_dict_enc) def split(self, input): + # type: (unicode) -> List[unicode] result = u' '.join(token.surface for token in self.tokenizer.tokenize(input)) return result.split(u' ') @@ -417,6 +427,7 @@ class DefaultSplitter(BaseSplitter): # ctype_ def ctype_(self, char): + # type: (unicode) -> unicode for pattern, value in iteritems(self.patterns_): if pattern.match(char): return value @@ -424,12 +435,14 @@ class DefaultSplitter(BaseSplitter): # ts_ def ts_(self, dict, key): + # type: (Dict[unicode, int], unicode) -> int if key in dict: return dict[key] return 0 # segment def split(self, input): + # type: (unicode) -> List[unicode] if not input: return [] @@ -538,6 +551,7 @@ class SearchJapanese(SearchLanguage): } def init(self, options): + # type: (Dict) -> None type = options.get('type', 'default') if type in self.splitters: dotted_path = self.splitters[type] @@ -550,10 +564,13 @@ class SearchJapanese(SearchLanguage): dotted_path) def split(self, input): + # type: (unicode) -> List[unicode] return self.splitter.split(input) def word_filter(self, stemmed_word): + # type: (unicode) -> bool return len(stemmed_word) > 1 def stem(self, word): + # type: (unicode) -> unicode return word diff --git a/sphinx/search/ro.py b/sphinx/search/ro.py index 78ae01851..f44f38e34 100644 --- a/sphinx/search/ro.py +++ b/sphinx/search/ro.py @@ -24,10 +24,12 @@ class SearchRomanian(SearchLanguage): language_name = 'Romanian' js_stemmer_rawcode = 'romanian-stemmer.js' js_stemmer_code = js_stemmer - stopwords = [] + stopwords = [] # type: List[unicode] def init(self, options): + # type: (Dict) -> None self.stemmer = snowballstemmer.stemmer('romanian') def stem(self, word): + # type: (unicode) -> unicode return self.stemmer.stemWord(word) diff --git a/sphinx/search/tr.py b/sphinx/search/tr.py index 33c5c5192..14cc710f8 100644 --- a/sphinx/search/tr.py +++ b/sphinx/search/tr.py @@ -24,10 +24,12 @@ class SearchTurkish(SearchLanguage): language_name = 'Turkish' js_stemmer_rawcode = 'turkish-stemmer.js' js_stemmer_code = js_stemmer - stopwords = [] + stopwords = [] # type: List[unicode] def init(self, options): + # type: (Dict) -> None self.stemmer = snowballstemmer.stemmer('turkish') def stem(self, word): + # type: (unicode) -> unicode return self.stemmer.stemWord(word) diff --git a/sphinx/search/zh.py b/sphinx/search/zh.py index c1fecefc6..bd4787506 100644 --- a/sphinx/search/zh.py +++ b/sphinx/search/zh.py @@ -238,6 +238,7 @@ class SearchChinese(SearchLanguage): latin1_letters = re.compile(r'\w+(?u)[\u0000-\u00ff]') def init(self, options): + # type: (Dict) -> None if JIEBA: dict_path = options.get('dict') if dict_path and os.path.isfile(dict_path): @@ -246,9 +247,11 @@ class SearchChinese(SearchLanguage): if PYSTEMMER: class Stemmer(object): def __init__(self): + # type: () -> None self.stemmer = PyStemmer('porter') def stem(self, word): + # type: (unicode) -> unicode return self.stemmer.stemWord(word) else: class Stemmer(PorterStemmer): @@ -256,20 +259,24 @@ class SearchChinese(SearchLanguage): make at least the stem method nicer. """ def stem(self, word): + # type: (unicode) -> unicode return PorterStemmer.stem(self, word, 0, len(word) - 1) self.stemmer = Stemmer() def split(self, input): - chinese = [] + # type: (unicode) -> List[unicode] + chinese = [] # type: List[unicode] if JIEBA: chinese = list(jieba.cut_for_search(input)) - latin1 = self.latin1_letters.findall(input) + latin1 = self.latin1_letters.findall(input) # type: ignore return chinese + latin1 def word_filter(self, stemmed_word): + # type: (unicode) -> bool return len(stemmed_word) > 1 def stem(self, word): + # type: (unicode) -> unicode return self.stemmer.stem(word) diff --git a/sphinx/setup_command.py b/sphinx/setup_command.py index c23f22228..f263f8df1 100644 --- a/sphinx/setup_command.py +++ b/sphinx/setup_command.py @@ -18,7 +18,7 @@ import os from six import StringIO, string_types from distutils.cmd import Command -from distutils.errors import DistutilsOptionError, DistutilsExecError +from distutils.errors import DistutilsOptionError, DistutilsExecError # type: ignore from sphinx.application import Sphinx from sphinx.cmdline import handle_exception @@ -26,6 +26,10 @@ from sphinx.util.console import nocolor, color_terminal from sphinx.util.docutils import docutils_namespace from sphinx.util.osutil import abspath +if False: + # For type annotation + from typing import Any # NOQA + class BuildDoc(Command): """ @@ -87,22 +91,24 @@ class BuildDoc(Command): 'link-index'] def initialize_options(self): + # type: () -> None self.fresh_env = self.all_files = False self.pdb = False - self.source_dir = self.build_dir = None + self.source_dir = self.build_dir = None # type: unicode self.builder = 'html' self.warning_is_error = False self.project = '' self.version = '' self.release = '' self.today = '' - self.config_dir = None + self.config_dir = None # type: unicode self.link_index = False self.copyright = '' self.verbosity = 0 self.traceback = False def _guess_source_dir(self): + # type: () -> unicode for guess in ('doc', 'docs'): if not os.path.isdir(guess): continue @@ -115,6 +121,7 @@ class BuildDoc(Command): # unicode, causing finalize_options to fail if invoked again. Workaround # for http://bugs.python.org/issue19570 def _ensure_stringlike(self, option, what, default=None): + # type: (unicode, unicode, Any) -> Any val = getattr(self, option) if val is None: setattr(self, option, default) @@ -125,10 +132,11 @@ class BuildDoc(Command): return val def finalize_options(self): + # type: () -> None if self.source_dir is None: self.source_dir = self._guess_source_dir() - self.announce('Using source directory %s' % self.source_dir) - self.ensure_dirname('source_dir') + self.announce('Using source directory %s' % self.source_dir) # type: ignore + self.ensure_dirname('source_dir') # type: ignore if self.source_dir is None: self.source_dir = os.curdir self.source_dir = abspath(self.source_dir) @@ -137,22 +145,23 @@ class BuildDoc(Command): self.config_dir = abspath(self.config_dir) if self.build_dir is None: - build = self.get_finalized_command('build') + build = self.get_finalized_command('build') # type: ignore self.build_dir = os.path.join(abspath(build.build_base), 'sphinx') - self.mkpath(self.build_dir) + self.mkpath(self.build_dir) # type: ignore self.build_dir = abspath(self.build_dir) self.doctree_dir = os.path.join(self.build_dir, 'doctrees') - self.mkpath(self.doctree_dir) + self.mkpath(self.doctree_dir) # type: ignore self.builder_target_dir = os.path.join(self.build_dir, self.builder) - self.mkpath(self.builder_target_dir) + self.mkpath(self.builder_target_dir) # type: ignore def run(self): + # type: () -> None if not color_terminal(): nocolor() - if not self.verbose: + if not self.verbose: # type: ignore status_stream = StringIO() else: - status_stream = sys.stdout + status_stream = sys.stdout # type: ignore confoverrides = {} if self.project: confoverrides['project'] = self.project @@ -182,6 +191,6 @@ class BuildDoc(Command): raise SystemExit(1) if self.link_index: - src = app.config.master_doc + app.builder.out_suffix - dst = app.builder.get_outfilename('index') + src = app.config.master_doc + app.builder.out_suffix # type: ignore + dst = app.builder.get_outfilename('index') # type: ignore os.symlink(src, dst) diff --git a/sphinx/theming.py b/sphinx/theming.py index 42e4448db..4e05652cd 100644 --- a/sphinx/theming.py +++ b/sphinx/theming.py @@ -16,7 +16,8 @@ import tempfile from os import path from six import string_types, iteritems -from six.moves import configparser +from six.moves import configparser # type: ignore +from typing import Any, Callable, Tuple # NOQA try: import pkg_resources @@ -26,6 +27,10 @@ except ImportError: from sphinx import package_dir from sphinx.errors import ThemeError +if False: + # For type annotation + from typing import Any, Callable, Tuple # NOQA + NODEFAULT = object() THEMECONF = 'theme.conf' @@ -34,10 +39,12 @@ class Theme(object): """ Represents the theme chosen in the configuration. """ - themes = {} + themes = {} # type: Dict[unicode, Tuple[unicode, zipfile.ZipFile]] + themepath = [] # type: List[unicode] @classmethod def init_themes(cls, confdir, theme_path, warn=None): + # type: (unicode, unicode, Callable) -> None """Search all theme paths for available themes.""" cls.themepath = list(theme_path) cls.themepath.append(path.join(package_dir, 'themes')) @@ -49,7 +56,7 @@ class Theme(object): for theme in os.listdir(themedir): if theme.lower().endswith('.zip'): try: - zfile = zipfile.ZipFile(path.join(themedir, theme)) + zfile = zipfile.ZipFile(path.join(themedir, theme)) # type: ignore if THEMECONF not in zfile.namelist(): continue tname = theme[:-4] @@ -68,6 +75,7 @@ class Theme(object): @classmethod def load_extra_theme(cls, name): + # type: (unicode) -> None themes = ['alabaster'] try: import sphinx_rtd_theme @@ -98,6 +106,7 @@ class Theme(object): return def __init__(self, name, warn=None): + # type: (unicode, Callable) -> None if name not in self.themes: self.load_extra_theme(name) if name not in self.themes: @@ -156,6 +165,7 @@ class Theme(object): self.base = Theme(inherit, warn=warn) def get_confstr(self, section, name, default=NODEFAULT): + # type: (unicode, unicode, Any) -> Any """Return the value for a theme configuration setting, searching the base theme chain. """ @@ -171,13 +181,14 @@ class Theme(object): return default def get_options(self, overrides): + # type: (Dict) -> Any """Return a dictionary of theme options and their values.""" chain = [self.themeconf] base = self.base while base is not None: chain.append(base.themeconf) base = base.base - options = {} + options = {} # type: Dict[unicode, Any] for conf in reversed(chain): try: options.update(conf.items('options')) @@ -190,6 +201,7 @@ class Theme(object): return options def get_dirchain(self): + # type: () -> List[unicode] """Return a list of theme directories, beginning with this theme's, then the base theme's, then that one's base theme's, etc. """ @@ -201,6 +213,7 @@ class Theme(object): return chain def cleanup(self): + # type: () -> None """Remove temporary directories.""" if self.themedir_created: try: @@ -212,6 +225,7 @@ class Theme(object): def load_theme_plugins(): + # type: () -> List[unicode] """load plugins by using``sphinx_themes`` section in setuptools entry_points. This API will return list of directory that contain some theme directory. """ @@ -219,7 +233,7 @@ def load_theme_plugins(): if not pkg_resources: return [] - theme_paths = [] + theme_paths = [] # type: List[unicode] for plugin in pkg_resources.iter_entry_points('sphinx_themes'): func_or_path = plugin.load() @@ -229,7 +243,7 @@ def load_theme_plugins(): path = func_or_path if isinstance(path, string_types): - theme_paths.append(path) + theme_paths.append(path) # type: ignore else: raise ThemeError('Plugin %r does not response correctly.' % plugin.module_name) diff --git a/sphinx/transforms/__init__.py b/sphinx/transforms/__init__.py index 79ac99c9f..68e45d62d 100644 --- a/sphinx/transforms/__init__.py +++ b/sphinx/transforms/__init__.py @@ -33,6 +33,7 @@ class DefaultSubstitutions(Transform): default_priority = 210 def apply(self): + # type: () -> None env = self.document.settings.env config = self.document.settings.env.config # only handle those not otherwise defined in the document @@ -58,6 +59,7 @@ class MoveModuleTargets(Transform): default_priority = 210 def apply(self): + # type: () -> None for node in self.document.traverse(nodes.target): if not node['ids']: continue @@ -76,6 +78,7 @@ class HandleCodeBlocks(Transform): default_priority = 210 def apply(self): + # type: () -> None # move doctest blocks out of blockquotes for node in self.document.traverse(nodes.block_quote): if all(isinstance(child, nodes.doctest_block) for child @@ -100,6 +103,7 @@ class AutoNumbering(Transform): default_priority = 210 def apply(self): + # type: () -> None domain = self.document.settings.env.domains['std'] for node in self.document.traverse(nodes.Element): @@ -114,6 +118,7 @@ class SortIds(Transform): default_priority = 261 def apply(self): + # type: () -> None for node in self.document.traverse(nodes.section): if len(node['ids']) > 1 and node['ids'][0].startswith('id'): node['ids'] = node['ids'][1:] + [node['ids'][0]] @@ -127,6 +132,7 @@ class CitationReferences(Transform): default_priority = 619 def apply(self): + # type: () -> None for citnode in self.document.traverse(nodes.citation_reference): cittext = citnode.astext() refnode = addnodes.pending_xref(cittext, refdomain='std', reftype='citation', @@ -154,6 +160,7 @@ class ApplySourceWorkaround(Transform): default_priority = 10 def apply(self): + # type: () -> None for n in self.document.traverse(): if isinstance(n, nodes.TextElement): apply_source_workaround(n) @@ -166,6 +173,7 @@ class AutoIndexUpgrader(Transform): default_priority = 210 def apply(self): + # type: () -> None env = self.document.settings.env for node in self.document.traverse(addnodes.index): if 'entries' in node and any(len(entry) == 4 for entry in node['entries']): @@ -184,12 +192,14 @@ class ExtraTranslatableNodes(Transform): default_priority = 10 def apply(self): + # type: () -> None targets = self.document.settings.env.config.gettext_additional_targets target_nodes = [v for k, v in TRANSLATABLE_NODES.items() if k in targets] if not target_nodes: return def is_translatable_node(node): + # type: (nodes.Node) -> bool return isinstance(node, tuple(target_nodes)) for node in self.document.traverse(is_translatable_node): @@ -201,6 +211,7 @@ class FilterSystemMessages(Transform): default_priority = 999 def apply(self): + # type: () -> None env = self.document.settings.env filterlevel = env.config.keep_warnings and 2 or 5 for node in self.document.traverse(nodes.system_message): @@ -215,9 +226,11 @@ class SphinxContentsFilter(ContentsFilter): within table-of-contents link nodes. """ def visit_pending_xref(self, node): + # type: (nodes.Node) -> None text = node.astext() self.parent.append(nodes.literal(text, text)) raise nodes.SkipNode def visit_image(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode diff --git a/sphinx/transforms/compact_bullet_list.py b/sphinx/transforms/compact_bullet_list.py index 61b23f382..0fe2e8b83 100644 --- a/sphinx/transforms/compact_bullet_list.py +++ b/sphinx/transforms/compact_bullet_list.py @@ -23,12 +23,15 @@ class RefOnlyListChecker(nodes.GenericNodeVisitor): """ def default_visit(self, node): + # type: (nodes.Node) -> None raise nodes.NodeFound def visit_bullet_list(self, node): + # type: (nodes.Node) -> None pass def visit_list_item(self, node): + # type: (nodes.Node) -> None children = [] for child in node.children: if not isinstance(child, nodes.Invisible): @@ -45,6 +48,7 @@ class RefOnlyListChecker(nodes.GenericNodeVisitor): raise nodes.SkipChildren def invisible_visit(self, node): + # type: (nodes.Node) -> None """Invisible nodes should be ignored.""" pass @@ -58,11 +62,13 @@ class RefOnlyBulletListTransform(Transform): default_priority = 100 def apply(self): + # type: () -> None env = self.document.settings.env if env.config.html_compact_lists: return def check_refonly_list(node): + # type: (nodes.Node) -> bool """Check for list with only references in it.""" visitor = RefOnlyListChecker(self.document) try: diff --git a/sphinx/transforms/i18n.py b/sphinx/transforms/i18n.py index 38c5aef25..693ae663e 100644 --- a/sphinx/transforms/i18n.py +++ b/sphinx/transforms/i18n.py @@ -27,8 +27,15 @@ from sphinx.util.pycompat import indent from sphinx.locale import init as init_locale from sphinx.domains.std import make_glossary_term, split_term_classifiers +if False: + # For type annotation + from typing import Any, Tuple # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.config import Config # NOQA + def publish_msgstr(app, source, source_path, source_line, config, settings): + # type: (Sphinx, unicode, unicode, int, Config, Dict) -> nodes.document """Publish msgstr (single line) into docutils document :param sphinx.application.Sphinx app: sphinx application @@ -66,6 +73,7 @@ class PreserveTranslatableMessages(Transform): default_priority = 10 # this MUST be invoked before Locale transform def apply(self): + # type: () -> None for node in self.document.traverse(addnodes.translatable): node.preserve_original_messages() @@ -77,6 +85,7 @@ class Locale(Transform): default_priority = 20 def apply(self): + # type: () -> None env = self.document.settings.env settings, source = self.document.settings, self.document['source'] # XXX check if this is reliable @@ -176,6 +185,7 @@ class Locale(Transform): # replace target's refname to new target name def is_named_target(node): + # type: (nodes.Node) -> bool return isinstance(node, nodes.target) and \ node.get('refname') == old_name for old_target in self.document.traverse(is_named_target): @@ -249,10 +259,12 @@ class Locale(Transform): # auto-numbered foot note reference should use original 'ids'. def is_autonumber_footnote_ref(node): + # type: (nodes.Node) -> bool return isinstance(node, nodes.footnote_reference) and \ node.get('auto') == 1 def list_replace_or_append(lst, old, new): + # type: (List, Any, Any) -> None if old in lst: lst[lst.index(old)] = new else: @@ -262,7 +274,7 @@ class Locale(Transform): if len(old_foot_refs) != len(new_foot_refs): env.warn_node('inconsistent footnote references in ' 'translated message', node) - old_foot_namerefs = {} + old_foot_namerefs = {} # type: Dict[unicode, List[nodes.footnote_reference]] for r in old_foot_refs: old_foot_namerefs.setdefault(r.get('refname'), []).append(r) for new in new_foot_refs: @@ -315,6 +327,7 @@ class Locale(Transform): # refnamed footnote and citation should use original 'ids'. def is_refnamed_footnote_ref(node): + # type: (nodes.Node) -> bool footnote_ref_classes = (nodes.footnote_reference, nodes.citation_reference) return isinstance(node, footnote_ref_classes) and \ @@ -343,6 +356,7 @@ class Locale(Transform): 'translated message', node) def get_ref_key(node): + # type: (nodes.Node) -> Tuple[unicode, unicode, unicode] case = node["refdomain"], node["reftype"] if case == ('std', 'term'): return None @@ -384,7 +398,7 @@ class Locale(Transform): if 'index' in env.config.gettext_additional_targets: # Extract and translate messages for index entries. for node, entries in traverse_translatable_index(self.document): - new_entries = [] + new_entries = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode]] # NOQA for type, msg, tid, main, key_ in entries: msg_parts = split_index_msg(type, msg) msgstr_parts = [] @@ -407,6 +421,7 @@ class RemoveTranslatableInline(Transform): default_priority = 999 def apply(self): + # type: () -> None from sphinx.builders.gettext import MessageCatalogBuilder env = self.document.settings.env builder = env.app.builder diff --git a/sphinx/versioning.py b/sphinx/versioning.py index f6c446b4f..0f862ac67 100644 --- a/sphinx/versioning.py +++ b/sphinx/versioning.py @@ -16,6 +16,11 @@ from itertools import product from six import iteritems from six.moves import range, zip_longest +if False: + # For type annotation + from typing import Any, Iterator # NOQA + from docutils import nodes # NOQA + try: import Levenshtein IS_SPEEDUP = True @@ -27,6 +32,7 @@ VERSIONING_RATIO = 65 def add_uids(doctree, condition): + # type: (nodes.Node, Any) -> Iterator[nodes.Node] """Add a unique id to every node in the `doctree` which matches the condition and yield the nodes. @@ -42,6 +48,7 @@ def add_uids(doctree, condition): def merge_doctrees(old, new, condition): + # type: (nodes.Node, nodes.Node, Any) -> Iterator[nodes.Node] """Merge the `old` doctree with the `new` one while looking at nodes matching the `condition`. @@ -90,7 +97,7 @@ def merge_doctrees(old, new, condition): # choose the old node with the best ratio for each new node and set the uid # as long as the ratio is under a certain value, in which case we consider # them not changed but different - ratios = sorted(iteritems(ratios), key=itemgetter(1)) + ratios = sorted(iteritems(ratios), key=itemgetter(1)) # type: ignore for (old_node, new_node), ratio in ratios: if new_node in seen: continue @@ -109,6 +116,7 @@ def merge_doctrees(old, new, condition): def get_ratio(old, new): + # type: (unicode, unicode) -> float """Return a "similiarity ratio" (in percent) representing the similarity between the two strings where 0 is equal and anything above less than equal. """ @@ -122,6 +130,7 @@ def get_ratio(old, new): def levenshtein_distance(a, b): + # type: (unicode, unicode) -> int """Return the Levenshtein edit distance between two strings *a* and *b*.""" if a == b: return 0 @@ -137,5 +146,5 @@ def levenshtein_distance(a, b): deletions = current_row[j] + 1 substitutions = previous_row[j] + (column1 != column2) current_row.append(min(insertions, deletions, substitutions)) - previous_row = current_row + previous_row = current_row # type: ignore return previous_row[-1] diff --git a/sphinx/websupport/__init__.py b/sphinx/websupport/__init__.py index 69914da95..f7b215f83 100644 --- a/sphinx/websupport/__init__.py +++ b/sphinx/websupport/__init__.py @@ -66,7 +66,7 @@ class WebSupport(object): self._init_search(search) self._init_storage(storage) - self._globalcontext = None + self._globalcontext = None # type: ignore self._make_base_comment_options() @@ -119,7 +119,7 @@ class WebSupport(object): raise RuntimeError('No srcdir associated with WebSupport object') app = Sphinx(self.srcdir, self.srcdir, self.outdir, self.doctreedir, 'websupport', status=self.status, warning=self.warning) - app.builder.set_webinfo(self.staticdir, self.staticroot, + app.builder.set_webinfo(self.staticdir, self.staticroot, # type: ignore self.search, self.storage) self.storage.pre_build() @@ -384,7 +384,7 @@ class WebSupport(object): that remains the same throughout the lifetime of the :class:`~sphinx.websupport.WebSupport` object. """ - self.base_comment_opts = {} + self.base_comment_opts = {} # type: Dict[unicode, unicode] if self.docroot != '': comment_urls = [ diff --git a/sphinx/websupport/storage/sqlalchemy_db.py b/sphinx/websupport/storage/sqlalchemy_db.py index b412ad488..16418ec8f 100644 --- a/sphinx/websupport/storage/sqlalchemy_db.py +++ b/sphinx/websupport/storage/sqlalchemy_db.py @@ -14,7 +14,7 @@ from datetime import datetime from sqlalchemy import Column, Integer, Text, String, Boolean, \ ForeignKey, DateTime -from sqlalchemy.orm import relation, sessionmaker, aliased +from sqlalchemy.orm import relation, sessionmaker, aliased # type: ignore from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() @@ -23,7 +23,7 @@ Session = sessionmaker() db_prefix = 'sphinx_' -class Node(Base): +class Node(Base): # type: ignore """Data about a Node in a doctree.""" __tablename__ = db_prefix + 'nodes' @@ -74,7 +74,7 @@ class Node(Base): :param results: the flat list of comments :param username: the name of the user requesting the comments. """ - comments = [] + comments = [] # type: List list_stack = [comments] for r in results: if username: @@ -101,7 +101,7 @@ class Node(Base): self.source = source -class CommentVote(Base): +class CommentVote(Base): # type: ignore """A vote a user has made on a Comment.""" __tablename__ = db_prefix + 'commentvote' @@ -117,7 +117,7 @@ class CommentVote(Base): self.value = value -class Comment(Base): +class Comment(Base): # type: ignore """An individual Comment being stored.""" __tablename__ = db_prefix + 'comments' diff --git a/sphinx/websupport/storage/sqlalchemystorage.py b/sphinx/websupport/storage/sqlalchemystorage.py index c8794f75c..8b7d76714 100644 --- a/sphinx/websupport/storage/sqlalchemystorage.py +++ b/sphinx/websupport/storage/sqlalchemystorage.py @@ -12,7 +12,7 @@ from datetime import datetime import sqlalchemy -from sqlalchemy.orm import aliased +from sqlalchemy.orm import aliased # type: ignore from sqlalchemy.sql import func from sphinx.websupport.errors import CommentNotAllowedError, \ @@ -22,7 +22,7 @@ from sphinx.websupport.storage.sqlalchemy_db import Base, Node, \ Comment, CommentVote, Session from sphinx.websupport.storage.differ import CombinedHtmlDiff -if sqlalchemy.__version__[:3] < '0.5': +if sqlalchemy.__version__[:3] < '0.5': # type: ignore raise ImportError('SQLAlchemy version 0.5 or greater is required for this ' 'storage backend; you have version %s' % sqlalchemy.__version__) From b0a11d171c99d3719d11b6ab7885e885d63a75b9 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 17 Nov 2016 21:20:58 +0900 Subject: [PATCH 012/190] Use env.get_domain() instead env.domains[] --- sphinx/application.py | 2 +- sphinx/environment/managers/toctree.py | 2 +- sphinx/ext/intersphinx.py | 2 +- sphinx/transforms/__init__.py | 2 +- sphinx/util/docutils.py | 4 ++-- sphinx/writers/latex.py | 2 +- sphinx/writers/texinfo.py | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index 08075d8e1..e8e212696 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -332,7 +332,7 @@ class Sphinx(object): def _init_enumerable_nodes(self): # type: () -> None for node, settings in iteritems(self.enumerable_nodes): - self.env.domains['std'].enumerable_nodes[node] = settings # type: ignore + self.env.get_domain('std').enumerable_nodes[node] = settings # type: ignore # ---- main "build" method ------------------------------------------------- diff --git a/sphinx/environment/managers/toctree.py b/sphinx/environment/managers/toctree.py index 195349d3e..26c8f385d 100644 --- a/sphinx/environment/managers/toctree.py +++ b/sphinx/environment/managers/toctree.py @@ -559,7 +559,7 @@ class Toctree(EnvironmentManager): continue - figtype = self.env.domains['std'].get_figtype(subnode) # type: ignore + figtype = self.env.get_domain('std').get_figtype(subnode) # type: ignore if figtype and subnode['ids']: register_fignumber(docname, secnum, figtype, subnode) diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py index df561204e..42aafdf94 100644 --- a/sphinx/ext/intersphinx.py +++ b/sphinx/ext/intersphinx.py @@ -341,7 +341,7 @@ def missing_reference(app, env, node, contnode): if not domain: # only objects in domains are in the inventory return - objtypes = env.domains[domain].objtypes_for_role(node['reftype']) + objtypes = env.get_domain(domain).objtypes_for_role(node['reftype']) if not objtypes: return objtypes = ['%s:%s' % (domain, objtype) for objtype in objtypes] diff --git a/sphinx/transforms/__init__.py b/sphinx/transforms/__init__.py index 68e45d62d..ab8f86500 100644 --- a/sphinx/transforms/__init__.py +++ b/sphinx/transforms/__init__.py @@ -104,7 +104,7 @@ class AutoNumbering(Transform): def apply(self): # type: () -> None - domain = self.document.settings.env.domains['std'] + domain = self.document.settings.env.get_domain('std') for node in self.document.traverse(nodes.Element): if domain.is_enumerable_node(node) and domain.get_numfig_title(node) is not None: diff --git a/sphinx/util/docutils.py b/sphinx/util/docutils.py index a18d0b560..286b2729b 100644 --- a/sphinx/util/docutils.py +++ b/sphinx/util/docutils.py @@ -77,7 +77,7 @@ class sphinx_domains(object): if ':' in name: domain_name, name = name.split(':', 1) if domain_name in self.env.domains: - domain = self.env.domains[domain_name] + domain = self.env.get_domain(domain_name) element = getattr(domain, type)(name) if element is not None: return element, [] @@ -90,7 +90,7 @@ class sphinx_domains(object): return element, [] # always look in the std domain - element = getattr(self.env.domains['std'], type)(name) + element = getattr(self.env.get_domain('std'), type)(name) if element is not None: return element, [] diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index e084c0b49..9bae7a4b4 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -1812,7 +1812,7 @@ class LaTeXTranslator(nodes.NodeVisitor): self.next_section_ids.update(node['ids']) return else: - domain = self.builder.env.domains['std'] + domain = self.builder.env.get_domain('std') figtype = domain.get_figtype(next) if figtype and domain.get_numfig_title(next): ids = set() diff --git a/sphinx/writers/texinfo.py b/sphinx/writers/texinfo.py index 0a9a42aca..fbd8b17a5 100644 --- a/sphinx/writers/texinfo.py +++ b/sphinx/writers/texinfo.py @@ -1584,7 +1584,7 @@ class TexinfoTranslator(nodes.NodeVisitor): self.add_anchor(id, node) # use the full name of the objtype for the category try: - domain = self.builder.env.domains[node.parent['domain']] + domain = self.builder.env.get_domain(node.parent['domain']) primary = self.builder.config.primary_domain name = domain.get_type_name(domain.object_types[objtype], primary == domain.name) From 45e4417393ec04654e1769c0102072001a5c653b Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 17 Nov 2016 23:02:59 +0900 Subject: [PATCH 013/190] Handle ExtensionError on get_domain() --- sphinx/writers/texinfo.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sphinx/writers/texinfo.py b/sphinx/writers/texinfo.py index fbd8b17a5..76d963554 100644 --- a/sphinx/writers/texinfo.py +++ b/sphinx/writers/texinfo.py @@ -20,6 +20,7 @@ from six.moves import range from docutils import nodes, writers from sphinx import addnodes, __display_version__ +from sphinx.errors import ExtensionError from sphinx.locale import admonitionlabels, _ from sphinx.util.i18n import format_date from sphinx.writers.latex import collected_footnote @@ -1588,7 +1589,7 @@ class TexinfoTranslator(nodes.NodeVisitor): primary = self.builder.config.primary_domain name = domain.get_type_name(domain.object_types[objtype], primary == domain.name) - except KeyError: + except ExtensionError: name = objtype # by convention, the deffn category should be capitalized like a title category = self.escape_arg(smart_capwords(name)) From 1a4c41a7691e8f78d42e2db221192962c53b27df Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 17 Nov 2016 23:50:35 +0900 Subject: [PATCH 014/190] Fix texinfo writer handles KeyError --- sphinx/writers/texinfo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/writers/texinfo.py b/sphinx/writers/texinfo.py index 76d963554..6311d3b84 100644 --- a/sphinx/writers/texinfo.py +++ b/sphinx/writers/texinfo.py @@ -1589,7 +1589,7 @@ class TexinfoTranslator(nodes.NodeVisitor): primary = self.builder.config.primary_domain name = domain.get_type_name(domain.object_types[objtype], primary == domain.name) - except ExtensionError: + except (KeyError, ExtensionError): name = objtype # by convention, the deffn category should be capitalized like a title category = self.escape_arg(smart_capwords(name)) From 595be7aef06489fb9e7545232cdd5a7d5e4061aa Mon Sep 17 00:00:00 2001 From: Wheerd <admin@wheerd.de> Date: Mon, 21 Nov 2016 19:11:04 +0100 Subject: [PATCH 015/190] Fixed the regular expression for xref to only match roles that are valid. This caused errors when having multiple successive xrefs without whitespace between them. --- sphinx/ext/napoleon/docstring.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/ext/napoleon/docstring.py b/sphinx/ext/napoleon/docstring.py index 7df6e83ab..6fee87b34 100644 --- a/sphinx/ext/napoleon/docstring.py +++ b/sphinx/ext/napoleon/docstring.py @@ -33,7 +33,7 @@ _google_section_regex = re.compile(r'^(\s|\w)+:\s*$') _google_typed_arg_regex = re.compile(r'\s*(.+?)\s*\(\s*(.*[^\s]+)\s*\)') _numpy_section_regex = re.compile(r'^[=\-`:\'"~^_*+#<>]{2,}\s*$') _single_colon_regex = re.compile(r'(?<!:):(?!:)') -_xref_regex = re.compile(r'(:\w+:\S+:`.+?`|:\S+:`.+?`|`.+?`)') +_xref_regex = re.compile(r'(:(?:[a-zA-Z0-9]+[\-_+:.])*[a-zA-Z0-9]+:`.+?`)') _bullet_list_regex = re.compile(r'^(\*|\+|\-)(\s+\S|\s*$)') _enumerated_list_regex = re.compile( r'^(?P<paren>\()?' From 407a525ac8c68a8e4bfdce94251f6e584bb0e56a Mon Sep 17 00:00:00 2001 From: Rob Ruana <rob@robruana.com> Date: Mon, 21 Nov 2016 16:12:23 -0800 Subject: [PATCH 016/190] [Napoleon] adds xref test data for pull request #3168 --- tests/test_ext_napoleon.py | 1 + tests/test_ext_napoleon_docstring.py | 12 +++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/test_ext_napoleon.py b/tests/test_ext_napoleon.py index 7ecd08292..21d095a79 100644 --- a/tests/test_ext_napoleon.py +++ b/tests/test_ext_napoleon.py @@ -68,6 +68,7 @@ class SampleError(Exception): def __special_undoc__(self): pass + SampleNamedTuple = namedtuple('SampleNamedTuple', 'user_id block_type def_id') diff --git a/tests/test_ext_napoleon_docstring.py b/tests/test_ext_napoleon_docstring.py index 37dcca90c..be9103063 100644 --- a/tests/test_ext_napoleon_docstring.py +++ b/tests/test_ext_napoleon_docstring.py @@ -284,8 +284,9 @@ Construct a new XBlock. This class should only be used by runtimes. Arguments: - runtime (:class:`Runtime`): Use it to access the environment. - It is available in XBlock code as ``self.runtime``. + runtime (:class:`~typing.Dict`\[:class:`int`,:class:`str`\]): Use it to + access the environment. It is available in XBlock code + as ``self.runtime``. field_data (:class:`FieldData`): Interface used by the XBlock fields to access their data from wherever it is persisted. @@ -300,9 +301,10 @@ Construct a new XBlock. This class should only be used by runtimes. -:param runtime: Use it to access the environment. - It is available in XBlock code as ``self.runtime``. -:type runtime: :class:`Runtime` +:param runtime: Use it to + access the environment. It is available in XBlock code + as ``self.runtime``. +:type runtime: :class:`~typing.Dict`\[:class:`int`,:class:`str`\] :param field_data: Interface used by the XBlock fields to access their data from wherever it is persisted. :type field_data: :class:`FieldData` From 43fe104501912077dde3890b392a2518f784bef9 Mon Sep 17 00:00:00 2001 From: shimizukawa <shimizukawa@gmail.com> Date: Wed, 23 Nov 2016 11:56:49 +0900 Subject: [PATCH 017/190] make-mode is used by default. --- doc/invocation.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/invocation.rst b/doc/invocation.rst index 436c652f9..4b4593014 100644 --- a/doc/invocation.rst +++ b/doc/invocation.rst @@ -128,7 +128,10 @@ Makefile and Batchfile creation options .. option:: --use-make-mode, --no-use-make-mode - Makefile/make.bat uses (or not use) make-mode. Default is not use. + Makefile/make.bat uses (or not use) make-mode. Default is use. + + .. versionchanged:: 1.5 + make-mode is default. .. option:: --makefile, --no-makefile From f45fe6fc8ca95f5b71b57b0ab80a3a44560c0295 Mon Sep 17 00:00:00 2001 From: Rob Ruana <rob@robruana.com> Date: Wed, 23 Nov 2016 10:45:39 -0800 Subject: [PATCH 018/190] Fix #3174: [Napoleon] Defers autodoc-skip-member to other extensions if Napoleon doesn't care if the member is skipped --- doc/ext/autodoc.rst | 5 +++++ setup.cfg | 2 +- sphinx/ext/napoleon/__init__.py | 2 +- tests/test_ext_napoleon.py | 10 +++++----- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/doc/ext/autodoc.rst b/doc/ext/autodoc.rst index 63c959869..50004e575 100644 --- a/doc/ext/autodoc.rst +++ b/doc/ext/autodoc.rst @@ -446,6 +446,11 @@ member should be included in the documentation by using the following event: documentation. The member is excluded if a handler returns ``True``. It is included if the handler returns ``False``. + If more than one enabled extension handles the ``autodoc-skip-member`` + event, autodoc will use the first non-``None`` value returned by a handler. + Handlers should return ``None`` to fall back to the skipping behavior of + autodoc and other enabled extensions. + :param app: the Sphinx application object :param what: the type of the object which the docstring belongs to (one of ``"module"``, ``"class"``, ``"exception"``, ``"function"``, ``"method"``, diff --git a/setup.cfg b/setup.cfg index a65719461..533a71b0a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -26,4 +26,4 @@ universal = 1 [flake8] max-line-length=95 ignore=E113,E116,E221,E226,E241,E251,E901 -exclude=tests/*,build/*,sphinx/search/*,sphinx/pycode/pgen2/*,doc/ext/example*.py +exclude=tests/*,build/*,sphinx/search/*,sphinx/pycode/pgen2/*,doc/ext/example*.py,.tox/* diff --git a/sphinx/ext/napoleon/__init__.py b/sphinx/ext/napoleon/__init__.py index b74dfb75d..f6fccac7d 100644 --- a/sphinx/ext/napoleon/__init__.py +++ b/sphinx/ext/napoleon/__init__.py @@ -464,4 +464,4 @@ def _skip_member(app, what, name, obj, skip, options): (is_private and inc_private) or (is_init and inc_init)): return False - return skip + return None diff --git a/tests/test_ext_napoleon.py b/tests/test_ext_napoleon.py index 21d095a79..5f68ba7c0 100644 --- a/tests/test_ext_napoleon.py +++ b/tests/test_ext_napoleon.py @@ -123,19 +123,19 @@ class SetupTest(TestCase): class SkipMemberTest(TestCase): - def assertSkip(self, what, member, obj, expect_skip, config_name): - skip = 'default skip' + def assertSkip(self, what, member, obj, expect_default_skip, config_name): + skip = True app = mock.Mock() app.config = Config() setattr(app.config, config_name, True) - if expect_skip: - self.assertEqual(skip, _skip_member(app, what, member, obj, skip, + if expect_default_skip: + self.assertEqual(None, _skip_member(app, what, member, obj, skip, mock.Mock())) else: self.assertFalse(_skip_member(app, what, member, obj, skip, mock.Mock())) setattr(app.config, config_name, False) - self.assertEqual(skip, _skip_member(app, what, member, obj, skip, + self.assertEqual(None, _skip_member(app, what, member, obj, skip, mock.Mock())) def test_namedtuple(self): From 88887bad88d9724048a5a67a52094881a24d0759 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 3 Dec 2016 16:51:18 +0900 Subject: [PATCH 019/190] Update type annotation --- sphinx/builders/linkcheck.py | 3 ++- sphinx/ext/inheritance_diagram.py | 4 ++-- sphinx/search/__init__.py | 4 ++-- sphinx/util/requests.py | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/sphinx/builders/linkcheck.py b/sphinx/builders/linkcheck.py index 6df9480a4..836709375 100644 --- a/sphinx/builders/linkcheck.py +++ b/sphinx/builders/linkcheck.py @@ -43,6 +43,7 @@ if False: # For type annotation from typing import Any, Tuple, Union # NOQA from sphinx.application import Sphinx # NOQA + from sphinx.util.requests.requests import Response # NOQA class AnchorCheckParser(HTMLParser): @@ -64,7 +65,7 @@ class AnchorCheckParser(HTMLParser): def check_anchor(response, anchor): - # type: (requests.Response, unicode) -> bool + # type: (Response, unicode) -> bool """Reads HTML data from a response object `response` searching for `anchor`. Returns True if anchor was found, False otherwise. """ diff --git a/sphinx/ext/inheritance_diagram.py b/sphinx/ext/inheritance_diagram.py index f37d7cf8b..f355aa6c4 100644 --- a/sphinx/ext/inheritance_diagram.py +++ b/sphinx/ext/inheritance_diagram.py @@ -79,9 +79,9 @@ def try_import(objname): """ try: __import__(objname) - return sys.modules.get(objname) + return sys.modules.get(objname) # type: ignore except ImportError: - modname, attrname = module_sig_re.match(objname).groups() + modname, attrname = module_sig_re.match(objname).groups() # type: ignore if modname is None: return None try: diff --git a/sphinx/search/__init__.py b/sphinx/search/__init__.py index df374ae46..d2baf363c 100644 --- a/sphinx/search/__init__.py +++ b/sphinx/search/__init__.py @@ -287,7 +287,7 @@ class IndexBuilder(object): # type: (IO, Any) -> None """Reconstruct from frozen data.""" if isinstance(format, string_types): - format = self.formats[format] + format = self.formats[format] # type: ignore frozen = format.load(stream) # if an old index is present, we treat it as not existing. if not isinstance(frozen, dict) or \ @@ -314,7 +314,7 @@ class IndexBuilder(object): # type: (IO, Any) -> None """Dump the frozen index to a stream.""" if isinstance(format, string_types): - format = self.formats[format] + format = self.formats[format] # type: ignore format.dump(self.freeze(), stream) # type: ignore def get_objects(self, fn2index): diff --git a/sphinx/util/requests.py b/sphinx/util/requests.py index e2ac94e80..3576b0088 100644 --- a/sphinx/util/requests.py +++ b/sphinx/util/requests.py @@ -71,7 +71,7 @@ def _get_tls_cacert(url, config): certs = getattr(config, 'tls_cacerts', None) if not certs: return True - elif isinstance(certs, (string_types, tuple)): + elif isinstance(certs, (string_types, tuple)): # type: ignore return certs else: hostname = urlsplit(url)[1] From a265670b79e737a0c88ea76dbac5485c19f6e350 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 3 Dec 2016 16:51:35 +0900 Subject: [PATCH 020/190] Adjust code to type annotation --- sphinx/ext/coverage.py | 2 +- sphinx/make_mode.py | 2 +- sphinx/util/console.py | 2 +- sphinx/util/i18n.py | 7 ++++--- sphinx/writers/texinfo.py | 4 ++-- 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/sphinx/ext/coverage.py b/sphinx/ext/coverage.py index 98681466c..11e017f69 100644 --- a/sphinx/ext/coverage.py +++ b/sphinx/ext/coverage.py @@ -105,7 +105,7 @@ class CoverageBuilder(Builder): if match: name = match.groups()[0] if name not in c_objects: - for exp in self.c_ignorexps.get(key, ()): + for exp in self.c_ignorexps.get(key, []): if exp.match(name): break else: diff --git a/sphinx/make_mode.py b/sphinx/make_mode.py index 87333301c..7751b8b40 100644 --- a/sphinx/make_mode.py +++ b/sphinx/make_mode.py @@ -71,7 +71,7 @@ class Make(object): def build_clean(self): # type: () -> int if not path.exists(self.builddir): - return + return 0 elif not path.isdir(self.builddir): print("Error: %r is not a directory!" % self.builddir) return 1 diff --git a/sphinx/util/console.py b/sphinx/util/console.py index b952d7183..6dc4b88ca 100644 --- a/sphinx/util/console.py +++ b/sphinx/util/console.py @@ -36,7 +36,7 @@ def get_terminal_width(): terminal_width = width except Exception: # FALLBACK - terminal_width = int(os.environ.get('COLUMNS', 80)) - 1 + terminal_width = int(os.environ.get('COLUMNS', "80")) - 1 return terminal_width diff --git a/sphinx/util/i18n.py b/sphinx/util/i18n.py index efbbb75f7..9ec6f9136 100644 --- a/sphinx/util/i18n.py +++ b/sphinx/util/i18n.py @@ -108,10 +108,11 @@ def find_catalog_source_files(locale_dirs, locale, domains=None, gettext_compact default is False. :return: [CatalogInfo(), ...] """ - if not locale: - return [] # locale is not specified - catalogs = set() # type: Set[CatalogInfo] + + if not locale: + return catalogs # locale is not specified + for locale_dir in locale_dirs: if not locale_dir: continue # skip system locale directory diff --git a/sphinx/writers/texinfo.py b/sphinx/writers/texinfo.py index 6311d3b84..792318a36 100644 --- a/sphinx/writers/texinfo.py +++ b/sphinx/writers/texinfo.py @@ -324,7 +324,7 @@ class TexinfoTranslator(nodes.NodeVisitor): top['node_name'] = 'Top' # handle the indices for name, content in self.indices: - node_menus[name] = () + node_menus[name] = [] node_menus['Top'].append(name) def collect_rellinks(self): @@ -642,7 +642,7 @@ class TexinfoTranslator(nodes.NodeVisitor): def visit_title(self, node): # type: (nodes.Node) -> None if not self.seen_title: - self.seen_title = 1 + self.seen_title = True raise nodes.SkipNode parent = node.parent if isinstance(parent, nodes.table): From 82ef05431a8283f348da39950104819352eacc30 Mon Sep 17 00:00:00 2001 From: shimizukawa <shimizukawa@gmail.com> Date: Mon, 5 Dec 2016 23:31:37 +0900 Subject: [PATCH 021/190] fix flake8 --- sphinx/util/pycompat.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sphinx/util/pycompat.py b/sphinx/util/pycompat.py index da2bcc255..f25e14023 100644 --- a/sphinx/util/pycompat.py +++ b/sphinx/util/pycompat.py @@ -18,11 +18,12 @@ from six import PY3, class_types, text_type, exec_ from six.moves import zip_longest from itertools import product +from sphinx.deprecation import RemovedInSphinx16Warning + if False: # For type annotation from typing import Any, Callable # NOQA -from sphinx.deprecation import RemovedInSphinx16Warning NoneType = type(None) From f7c0a8b7026b2ff0f300ee1aa64a7676f493f2a7 Mon Sep 17 00:00:00 2001 From: Mike Fiedler <miketheman@gmail.com> Date: Sun, 11 Dec 2016 09:01:02 -0500 Subject: [PATCH 022/190] Update faq epub link The epubcheck project was moved to GitHub. --- doc/faq.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/faq.rst b/doc/faq.rst index eaa663d92..5924f5f68 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -201,7 +201,7 @@ The following list gives some hints for the creation of epub files: Error(prcgen):E24011: TOC section scope is not included in the parent chapter:(title) Error(prcgen):E24001: The table of content could not be built. -.. _Epubcheck: https://code.google.com/archive/p/epubcheck +.. _Epubcheck: https://github.com/IDPF/epubcheck .. _Calibre: http://calibre-ebook.com/ .. _FBreader: https://fbreader.org/ .. _Bookworm: http://www.oreilly.com/bookworm/index.html From cbd3eb223d1efa8ed295104a83d41097369a92ba Mon Sep 17 00:00:00 2001 From: Matthias Geier <Matthias.Geier@gmail.com> Date: Sun, 11 Dec 2016 11:53:16 +0100 Subject: [PATCH 023/190] LaTeX: Make sure sphinxVerbatim environment is not cut into pieces See https://github.com/spatialaudio/nbsphinx/issues/78 --- sphinx/writers/latex.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 20a652890..68144f3aa 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -2183,10 +2183,11 @@ class LaTeXTranslator(nodes.NodeVisitor): '\\begin{sphinxVerbatim}') # get consistent trailer hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim} - self.body.append('\n' + hlcode + '\\end{sphinxVerbatim') if self.table and not self.in_footnote: - self.body.append('intable') - self.body.append('}\n') + hlcode += '\\end{sphinxVerbatimintable}' + else: + hlcode += '\\end{sphinxVerbatim}' + self.body.append('\n' + hlcode + '\n') if ids: self.body.append('\\let\\sphinxLiteralBlockLabel\\empty\n') raise nodes.SkipNode From 2df0bb8d03d05fe7052315438382d49014e3dcf9 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 13 Dec 2016 11:36:03 +0900 Subject: [PATCH 024/190] ``sphinx.util.compat.Directive`` class is now deprecated. --- CHANGES | 3 +++ sphinx/domains/cpp.py | 2 +- sphinx/domains/python.py | 3 +-- sphinx/domains/std.py | 3 +-- sphinx/ext/autodoc.py | 2 +- sphinx/ext/autosummary/__init__.py | 3 +-- sphinx/ext/doctest.py | 3 +-- sphinx/ext/graphviz.py | 3 +-- sphinx/ext/ifconfig.py | 2 +- sphinx/ext/inheritance_diagram.py | 3 +-- sphinx/ext/mathbase.py | 3 +-- sphinx/util/compat.py | 27 +++++++++++++++++++++++++++ 12 files changed, 40 insertions(+), 17 deletions(-) diff --git a/CHANGES b/CHANGES index 3cdd47ba5..2267a465d 100644 --- a/CHANGES +++ b/CHANGES @@ -4,6 +4,9 @@ Release 1.6 (in development) Incompatible changes -------------------- +* ``sphinx.util.compat.Directive`` class is now deprecated. Please use instead + ``docutils.parsers.rsr.Directive`` + Features added -------------- diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py index 5eeabcb11..637e75993 100644 --- a/sphinx/domains/cpp.py +++ b/sphinx/domains/cpp.py @@ -15,6 +15,7 @@ from copy import deepcopy from six import iteritems, text_type from docutils import nodes +from docutils.parsers.rst import Directive from sphinx import addnodes from sphinx.roles import XRefRole @@ -22,7 +23,6 @@ from sphinx.locale import l_, _ from sphinx.domains import Domain, ObjType from sphinx.directives import ObjectDescription from sphinx.util.nodes import make_refnode -from sphinx.util.compat import Directive from sphinx.util.pycompat import UnicodeMixin from sphinx.util.docfields import Field, GroupedField diff --git a/sphinx/domains/python.py b/sphinx/domains/python.py index 4f0d0f1ae..377785122 100644 --- a/sphinx/domains/python.py +++ b/sphinx/domains/python.py @@ -14,7 +14,7 @@ import re from six import iteritems from docutils import nodes -from docutils.parsers.rst import directives +from docutils.parsers.rst import Directive, directives from sphinx import addnodes from sphinx.roles import XRefRole @@ -22,7 +22,6 @@ from sphinx.locale import l_, _ from sphinx.domains import Domain, ObjType, Index from sphinx.directives import ObjectDescription from sphinx.util.nodes import make_refnode -from sphinx.util.compat import Directive from sphinx.util.docfields import Field, GroupedField, TypedField if False: diff --git a/sphinx/domains/std.py b/sphinx/domains/std.py index 53305899d..eb0ff68a4 100644 --- a/sphinx/domains/std.py +++ b/sphinx/domains/std.py @@ -15,7 +15,7 @@ import unicodedata from six import PY3, iteritems from docutils import nodes -from docutils.parsers.rst import directives +from docutils.parsers.rst import Directive, directives from docutils.statemachine import ViewList from sphinx import addnodes @@ -25,7 +25,6 @@ from sphinx.domains import Domain, ObjType from sphinx.directives import ObjectDescription from sphinx.util import ws_re from sphinx.util.nodes import clean_astext, make_refnode -from sphinx.util.compat import Directive if False: # For type annotation diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index fbdd8d1ae..b09893282 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -22,6 +22,7 @@ from six import PY2, iterkeys, iteritems, itervalues, text_type, class_types, \ from docutils import nodes from docutils.utils import assemble_option_dict +from docutils.parsers.rst import Directive from docutils.statemachine import ViewList import sphinx @@ -30,7 +31,6 @@ from sphinx.locale import _ from sphinx.pycode import ModuleAnalyzer, PycodeError from sphinx.application import ExtensionError from sphinx.util.nodes import nested_parse_with_titles -from sphinx.util.compat import Directive from sphinx.util.inspect import getargspec, isdescriptor, safe_getmembers, \ safe_getattr, object_description, is_builtin_class_method, isenumattribute from sphinx.util.docstrings import prepare_docstring diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py index 886623217..158692c1e 100644 --- a/sphinx/ext/autosummary/__init__.py +++ b/sphinx/ext/autosummary/__init__.py @@ -63,14 +63,13 @@ from types import ModuleType from six import text_type -from docutils.parsers.rst import directives +from docutils.parsers.rst import Directive, directives from docutils.statemachine import ViewList from docutils import nodes import sphinx from sphinx import addnodes from sphinx.util import import_object, rst -from sphinx.util.compat import Directive from sphinx.pycode import ModuleAnalyzer, PycodeError from sphinx.ext.autodoc import Options diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index 31ccb22d9..b0c1f61f5 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -21,13 +21,12 @@ import doctest from six import itervalues, StringIO, binary_type, text_type, PY2 from docutils import nodes -from docutils.parsers.rst import directives +from docutils.parsers.rst import Directive, directives import sphinx from sphinx.builders import Builder from sphinx.util import force_decode from sphinx.util.nodes import set_source_info -from sphinx.util.compat import Directive from sphinx.util.console import bold # type: ignore from sphinx.util.osutil import fs_encoding diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py index 0c29777dd..a87d7ca58 100644 --- a/sphinx/ext/graphviz.py +++ b/sphinx/ext/graphviz.py @@ -20,7 +20,7 @@ from hashlib import sha1 from six import text_type from docutils import nodes -from docutils.parsers.rst import directives +from docutils.parsers.rst import Directive, directives from docutils.statemachine import ViewList import sphinx @@ -28,7 +28,6 @@ from sphinx.errors import SphinxError from sphinx.locale import _ from sphinx.util.i18n import search_image_for_language from sphinx.util.osutil import ensuredir, ENOENT, EPIPE, EINVAL -from sphinx.util.compat import Directive if False: # For type annotation diff --git a/sphinx/ext/ifconfig.py b/sphinx/ext/ifconfig.py index 923e2d080..18504d94e 100644 --- a/sphinx/ext/ifconfig.py +++ b/sphinx/ext/ifconfig.py @@ -21,10 +21,10 @@ """ from docutils import nodes +from docutils.parsers.rst import Directive import sphinx from sphinx.util.nodes import set_source_info -from sphinx.util.compat import Directive if False: # For type annotation diff --git a/sphinx/ext/inheritance_diagram.py b/sphinx/ext/inheritance_diagram.py index f355aa6c4..3b23c845a 100644 --- a/sphinx/ext/inheritance_diagram.py +++ b/sphinx/ext/inheritance_diagram.py @@ -48,14 +48,13 @@ from six import text_type from six.moves import builtins # type: ignore from docutils import nodes -from docutils.parsers.rst import directives +from docutils.parsers.rst import Directive, directives import sphinx from sphinx.ext.graphviz import render_dot_html, render_dot_latex, \ render_dot_texinfo, figure_wrapper from sphinx.pycode import ModuleAnalyzer from sphinx.util import force_decode -from sphinx.util.compat import Directive if False: # For type annotation diff --git a/sphinx/ext/mathbase.py b/sphinx/ext/mathbase.py index 4a5bcfb6e..4e12f62f7 100644 --- a/sphinx/ext/mathbase.py +++ b/sphinx/ext/mathbase.py @@ -10,13 +10,12 @@ """ from docutils import nodes, utils -from docutils.parsers.rst import directives +from docutils.parsers.rst import Directive, directives from sphinx.roles import XRefRole from sphinx.locale import _ from sphinx.domains import Domain from sphinx.util.nodes import make_refnode, set_source_info -from sphinx.util.compat import Directive if False: # For type annotation diff --git a/sphinx/util/compat.py b/sphinx/util/compat.py index 0af65cbe3..73b68f5a2 100644 --- a/sphinx/util/compat.py +++ b/sphinx/util/compat.py @@ -10,12 +10,17 @@ """ from __future__ import absolute_import +import sys import warnings from docutils import nodes from docutils.parsers.rst import Directive # noqa +from docutils.parsers.rst import Directive # noqa from docutils import __version__ as _du_version + +from sphinx.deprecation import RemovedInSphinx17Warning + docutils_version = tuple(int(x) for x in _du_version.split('.')[:2]) @@ -38,3 +43,25 @@ def make_admonition(node_class, name, arguments, options, content, lineno, admonition_node['classes'] += classes state.nested_parse(content, content_offset, admonition_node) return [admonition_node] + + +class _DeprecationWrapper(object): + def __init__(self, mod, deprecated): + # type: (Any, Dict) -> None + self._mod = mod + self._deprecated = deprecated + + def __getattr__(self, attr): + if attr in self._deprecated: + warnings.warn("sphinx.util.compat.%s is deprecated and will be " + "removed in Sphinx 1.7, please use the standard " + "library version instead." % attr, + RemovedInSphinx17Warning, stacklevel=2) + return self._deprecated[attr] + return getattr(self._mod, attr) + + +sys.modules[__name__] = _DeprecationWrapper(sys.modules[__name__], dict( # type: ignore + docutils_version = docutils_version, + Directive = Directive, +)) From 2390c5549c24d4d97f033bf4969e526d2cdfaf06 Mon Sep 17 00:00:00 2001 From: Luc Saffre <luc.saffre@gmail.com> Date: Tue, 13 Dec 2016 11:19:22 +0200 Subject: [PATCH 025/190] fix #2336 autosummary imported members --- sphinx/ext/autosummary/generate.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py index 3e81a14a2..a47a1105f 100644 --- a/sphinx/ext/autosummary/generate.py +++ b/sphinx/ext/autosummary/generate.py @@ -165,17 +165,19 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst', except TemplateNotFound: template = template_env.get_template('autosummary/base.rst') - def get_members(obj, typ, include_public=[]): + def get_members(obj, typ, include_public=[], imported=False): # type: (Any, unicode, List[unicode]) -> Tuple[List[unicode], List[unicode]] items = [] # type: List[unicode] for name in dir(obj): try: - documenter = get_documenter(safe_getattr(obj, name), - obj) + value = safe_getattr(obj, name) except AttributeError: continue + documenter = get_documenter(value, obj) if documenter.objtype == typ: - items.append(name) + if imported or getattr(value, '__module__', None) == obj.__name__: + + items.append(name) public = [x for x in items if x in include_public or not x.startswith('_')] return public, items From 50b35bb4913410f97778212e863cbc75da5820c6 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 13 Dec 2016 21:21:06 +0900 Subject: [PATCH 026/190] ``sphinx.util.compat.docutils_version`` is now deprecated --- CHANGES | 1 + sphinx/__init__.py | 4 ++-- sphinx/writers/manpage.py | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGES b/CHANGES index 2267a465d..e6d72afbe 100644 --- a/CHANGES +++ b/CHANGES @@ -6,6 +6,7 @@ Incompatible changes * ``sphinx.util.compat.Directive`` class is now deprecated. Please use instead ``docutils.parsers.rsr.Directive`` +* ``sphinx.util.compat.docutils_version`` is now deprecated Features added -------------- diff --git a/sphinx/__init__.py b/sphinx/__init__.py index 405fc2ff2..fb47a88ae 100644 --- a/sphinx/__init__.py +++ b/sphinx/__init__.py @@ -99,8 +99,8 @@ def build_main(argv=sys.argv): return 1 raise - from sphinx.util.compat import docutils_version - if docutils_version < (0, 10): + import sphinx.util.docutils + if sphinx.util.docutils.__version_info__ < (0, 10): sys.stderr.write('Error: Sphinx requires at least Docutils 0.10 to ' 'run.\n') return 1 diff --git a/sphinx/writers/manpage.py b/sphinx/writers/manpage.py index 53cf29767..249256576 100644 --- a/sphinx/writers/manpage.py +++ b/sphinx/writers/manpage.py @@ -21,7 +21,7 @@ from docutils.writers.manpage import ( from sphinx import addnodes from sphinx.deprecation import RemovedInSphinx16Warning from sphinx.locale import admonitionlabels, _ -from sphinx.util.compat import docutils_version +import sphinx.util.docutils from sphinx.util.i18n import format_date @@ -105,7 +105,7 @@ class ManualPageTranslator(BaseTranslator): self._docinfo['manual_group'] = builder.config.project # In docutils < 0.11 self.append_header() was never called - if docutils_version < (0, 11): + if sphinx.util.docutils.__version_info__ < (0, 11): self.body.append(MACRO_DEF) # Overwrite admonition label translations with our own From 3f7eefdd1acfa2dbe4e17378ddfae8f4152a6564 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 13 Dec 2016 23:38:08 +0900 Subject: [PATCH 027/190] Update CHANGES --- CHANGES | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/CHANGES b/CHANGES index e6d72afbe..f8c1a4ad0 100644 --- a/CHANGES +++ b/CHANGES @@ -4,10 +4,6 @@ Release 1.6 (in development) Incompatible changes -------------------- -* ``sphinx.util.compat.Directive`` class is now deprecated. Please use instead - ``docutils.parsers.rsr.Directive`` -* ``sphinx.util.compat.docutils_version`` is now deprecated - Features added -------------- @@ -16,6 +12,13 @@ Features added Bugs fixed ---------- +Deprecated +---------- + +* ``sphinx.util.compat.Directive`` class is now deprecated. Please use instead + ``docutils.parsers.rsr.Directive`` +* ``sphinx.util.compat.docutils_version`` is now deprecated + Release 1.5.1 (in development) ============================== From 3f0db5469e2d468fefa9ef7bdca8a295db2e538e Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 14 Dec 2016 00:37:16 +0900 Subject: [PATCH 028/190] Fix miss-merging --- sphinx/environment/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index 4473b801d..ecd8230d2 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -427,7 +427,7 @@ class BuildEnvironment(object): enc_rel_fn = rel_fn.encode(sys.getfilesystemencoding()) return rel_fn, path.abspath(path.join(self.srcdir, enc_rel_fn)) - def find_files(self, config): + def find_files(self, config, buildername): # type: (Config, unicode) -> None """Find all source files in the source dir and put them in self.found_docs. From 938d44e2ded54e23da677a120381228bca4c6a5f Mon Sep 17 00:00:00 2001 From: shimizukawa <shimizukawa@gmail.com> Date: Wed, 14 Dec 2016 00:51:12 +0900 Subject: [PATCH 029/190] Fix #1061, #2336, #3235: Now generation of autosummary doesn't contain imported members by default. Thanks to Luc Saffre. --- CHANGES | 3 +++ sphinx/ext/autosummary/generate.py | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index dc7a6e2ef..415561fc3 100644 --- a/CHANGES +++ b/CHANGES @@ -4,6 +4,9 @@ Release 1.6 (in development) Incompatible changes -------------------- +* #1061, #2336, #3235: Now generation of autosummary doesn't contain imported + members by default. Thanks to Luc Saffre. + Features added -------------- diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py index a47a1105f..d3dfbe5e1 100644 --- a/sphinx/ext/autosummary/generate.py +++ b/sphinx/ext/autosummary/generate.py @@ -176,7 +176,6 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst', documenter = get_documenter(value, obj) if documenter.objtype == typ: if imported or getattr(value, '__module__', None) == obj.__name__: - items.append(name) public = [x for x in items if x in include_public or not x.startswith('_')] From c016c6461635edd8883629362a3b466938ed8291 Mon Sep 17 00:00:00 2001 From: shimizukawa <shimizukawa@gmail.com> Date: Wed, 14 Dec 2016 01:03:34 +0900 Subject: [PATCH 030/190] refs #3222 add issue template --- .github/ISSUE_TEMPLATE.md | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE.md diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..5caf7d6a4 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,27 @@ +Subject: <what happen when you do on which document project> + +# Problem +- <Detail of problem> + +## Procedure to reproduce the problem +``` +<Paste your command-line here which cause the problem> +``` + +## Error logs / results +``` +<Paste your error log here> +``` +- <public link of unexpected result if you have> + +## Expected results +<Describe what to actually do> + +# Reproducible project / your project +- <link to your project, or attach zipped small project sample> + +# Environment info +- OS: <Unix/Linux/Mac/Win/other with version> +- Python version: +- Sphinx version: +- <Extra tools e.g.: Browser, tex or something else> From ade26037679e1ae03906e03e26f36ccfa58d993f Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 14 Dec 2016 16:50:43 +0900 Subject: [PATCH 031/190] Use docutils' Directive class instead compat module --- tests/root/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/root/conf.py b/tests/root/conf.py index f2684e33f..a23aec482 100644 --- a/tests/root/conf.py +++ b/tests/root/conf.py @@ -84,8 +84,8 @@ tags.add('confpytag') # -- extension API from docutils import nodes +from docutils.parsers.rst import Directive from sphinx import addnodes -from sphinx.util.compat import Directive def userdesc_parse(env, sig, signode): From 3549b7c6218304448aeab9b22a8eff36e61c151c Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Wed, 14 Dec 2016 17:55:44 +0100 Subject: [PATCH 032/190] Keep original ``\includegraphics``, use only ``\sphinxincludegraphics``. modified: CHANGES modified: sphinx/texinputs/sphinx.sty --- CHANGES | 2 ++ sphinx/texinputs/sphinx.sty | 20 ++++++-------------- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/CHANGES b/CHANGES index d6fa85299..8cb02ce4c 100644 --- a/CHANGES +++ b/CHANGES @@ -6,6 +6,8 @@ Incompatible changes * #1061, #2336, #3235: Now generation of autosummary doesn't contain imported members by default. Thanks to Luc Saffre. +* LaTeX ``\includegraphics`` command isn't overloaded: only ``\sphinxincludegraphics`` + has the custom code to fit image to available width if oversized. Features added -------------- diff --git a/sphinx/texinputs/sphinx.sty b/sphinx/texinputs/sphinx.sty index eb2febbb0..43a9b4e4a 100644 --- a/sphinx/texinputs/sphinx.sty +++ b/sphinx/texinputs/sphinx.sty @@ -894,33 +894,25 @@ \raggedright} {\end{list}} -% Redefine \includegraphics to resize images larger than the line width, +% \sphinxincludegraphics defined to resize images larger than the line width, % except if height or width option present. % % If scale is present, rescale before fitting to line width. (since 1.5) -% -% Warning: future version of Sphinx will not modify original \includegraphics, -% below code will be definition only of \sphinxincludegraphics. -\let\py@Oldincludegraphics\includegraphics \newbox\spx@image@box -\renewcommand*{\includegraphics}[2][]{% +\newcommand*{\sphinxincludegraphics}[2][]{% \in@{height}{#1}\ifin@\else\in@{width}{#1}\fi \ifin@ % height or width present - \py@Oldincludegraphics[#1]{#2}% + \includegraphics[#1]{#2}% \else % no height nor width (but #1 may be "scale=...") - \setbox\spx@image@box\hbox{\py@Oldincludegraphics[#1,draft]{#2}}% + \setbox\spx@image@box\hbox{\includegraphics[#1,draft]{#2}}% \ifdim \wd\spx@image@box>\linewidth \setbox\spx@image@box\box\voidb@x % clear memory - \py@Oldincludegraphics[#1,width=\linewidth]{#2}% + \includegraphics[#1,width=\linewidth]{#2}% \else - \py@Oldincludegraphics[#1]{#2}% + \includegraphics[#1]{#2}% \fi \fi } -% Writer will put \sphinxincludegraphics in LaTeX source, and with this, -% documents which used their own modified \includegraphics will compile -% as before. But see warning above. -\newcommand*{\sphinxincludegraphics}{\includegraphics} % to make pdf with correct encoded bookmarks in Japanese % this should precede the hyperref package From e4a0f99dda967820e494fd80c7ec07cbc69401d1 Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Wed, 14 Dec 2016 19:20:41 +0100 Subject: [PATCH 033/190] Update latex version string --- sphinx/texinputs/sphinx.sty | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/texinputs/sphinx.sty b/sphinx/texinputs/sphinx.sty index 43a9b4e4a..fff4b9536 100644 --- a/sphinx/texinputs/sphinx.sty +++ b/sphinx/texinputs/sphinx.sty @@ -6,7 +6,7 @@ % \NeedsTeXFormat{LaTeX2e}[1995/12/01] -\ProvidesPackage{sphinx}[2016/12/14 v1.5.2 LaTeX package (Sphinx markup)] +\ProvidesPackage{sphinx}[2016/12/14 v1.6 LaTeX package (Sphinx markup)] % we delay handling of options to after having loaded packages, because % of the need to use \definecolor. From d5288567fd90b77c139a0fb0dbd48e7dbe3454a5 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 15 Dec 2016 19:22:40 +0900 Subject: [PATCH 034/190] Update type annotations --- sphinx/builders/applehelp.py | 3 ++- sphinx/builders/changes.py | 2 +- sphinx/builders/devhelp.py | 2 +- sphinx/builders/epub.py | 2 +- sphinx/builders/gettext.py | 2 +- sphinx/builders/html.py | 2 +- sphinx/builders/latex.py | 2 +- sphinx/builders/linkcheck.py | 2 +- sphinx/builders/manpage.py | 2 +- sphinx/builders/qthelp.py | 2 +- sphinx/builders/texinfo.py | 2 +- sphinx/directives/__init__.py | 2 +- sphinx/directives/code.py | 2 +- sphinx/directives/other.py | 2 +- sphinx/domains/c.py | 2 +- sphinx/domains/cpp.py | 2 +- sphinx/domains/javascript.py | 4 ++-- sphinx/domains/python.py | 2 +- sphinx/domains/rst.py | 4 ++-- sphinx/domains/std.py | 2 +- sphinx/ext/autosummary/generate.py | 2 +- sphinx/util/compat.py | 1 - 22 files changed, 24 insertions(+), 24 deletions(-) diff --git a/sphinx/builders/applehelp.py b/sphinx/builders/applehelp.py index 66ee82ff8..3c2782802 100644 --- a/sphinx/builders/applehelp.py +++ b/sphinx/builders/applehelp.py @@ -30,6 +30,7 @@ import subprocess if False: # For type annotation + from typing import Any # NOQA from sphinx.application import Sphinx # NOQA # Use plistlib.dump in 3.4 and above @@ -271,7 +272,7 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.setup_extension('sphinx.builders.html') app.add_builder(AppleHelpBuilder) diff --git a/sphinx/builders/changes.py b/sphinx/builders/changes.py index 708cefcde..1140f854a 100644 --- a/sphinx/builders/changes.py +++ b/sphinx/builders/changes.py @@ -165,7 +165,7 @@ class ChangesBuilder(Builder): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.add_builder(ChangesBuilder) return { diff --git a/sphinx/builders/devhelp.py b/sphinx/builders/devhelp.py index e615de39b..f1ed3a495 100644 --- a/sphinx/builders/devhelp.py +++ b/sphinx/builders/devhelp.py @@ -132,7 +132,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.setup_extension('sphinx.builders.html') app.add_builder(DevhelpBuilder) diff --git a/sphinx/builders/epub.py b/sphinx/builders/epub.py index d119c62db..5d4686af6 100644 --- a/sphinx/builders/epub.py +++ b/sphinx/builders/epub.py @@ -838,7 +838,7 @@ class EpubBuilder(StandaloneHTMLBuilder): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.setup_extension('sphinx.builders.html') app.add_builder(EpubBuilder) diff --git a/sphinx/builders/gettext.py b/sphinx/builders/gettext.py index ecf8cb1f3..ced63e8f5 100644 --- a/sphinx/builders/gettext.py +++ b/sphinx/builders/gettext.py @@ -277,7 +277,7 @@ class MessageCatalogBuilder(I18nBuilder): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.add_builder(MessageCatalogBuilder) app.add_config_value('gettext_compact', True, 'gettext') diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index 5e1608132..e64f4e5a4 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -1275,7 +1275,7 @@ def validate_config_values(app): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] # builders app.add_builder(StandaloneHTMLBuilder) app.add_builder(DirectoryHTMLBuilder) diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py index 53269da07..c8e1449f9 100644 --- a/sphinx/builders/latex.py +++ b/sphinx/builders/latex.py @@ -286,7 +286,7 @@ def validate_config_values(app): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.add_builder(LaTeXBuilder) app.connect('builder-inited', validate_config_values) diff --git a/sphinx/builders/linkcheck.py b/sphinx/builders/linkcheck.py index dbb60b344..0b95bcd6c 100644 --- a/sphinx/builders/linkcheck.py +++ b/sphinx/builders/linkcheck.py @@ -310,7 +310,7 @@ class CheckExternalLinksBuilder(Builder): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.add_builder(CheckExternalLinksBuilder) app.add_config_value('linkcheck_ignore', [], None) diff --git a/sphinx/builders/manpage.py b/sphinx/builders/manpage.py index f688a38d9..e6f7c4104 100644 --- a/sphinx/builders/manpage.py +++ b/sphinx/builders/manpage.py @@ -103,7 +103,7 @@ class ManualPageBuilder(Builder): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.add_builder(ManualPageBuilder) app.add_config_value('man_pages', diff --git a/sphinx/builders/qthelp.py b/sphinx/builders/qthelp.py index d8f43ea45..c49f3d767 100644 --- a/sphinx/builders/qthelp.py +++ b/sphinx/builders/qthelp.py @@ -318,7 +318,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.setup_extension('sphinx.builders.html') app.add_builder(QtHelpBuilder) diff --git a/sphinx/builders/texinfo.py b/sphinx/builders/texinfo.py index eee4f931a..354575157 100644 --- a/sphinx/builders/texinfo.py +++ b/sphinx/builders/texinfo.py @@ -242,7 +242,7 @@ class TexinfoBuilder(Builder): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.add_builder(TexinfoBuilder) app.add_config_value('texinfo_documents', diff --git a/sphinx/directives/__init__.py b/sphinx/directives/__init__.py index 6c1081624..58efd68a5 100644 --- a/sphinx/directives/__init__.py +++ b/sphinx/directives/__init__.py @@ -242,7 +242,7 @@ class DefaultDomain(Directive): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] directives.register_directive('default-role', DefaultRole) directives.register_directive('default-domain', DefaultDomain) directives.register_directive('describe', ObjectDescription) diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py index c6280b927..03936f4e8 100644 --- a/sphinx/directives/code.py +++ b/sphinx/directives/code.py @@ -389,7 +389,7 @@ class LiteralInclude(Directive): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] directives.register_directive('highlight', Highlight) directives.register_directive('highlightlang', Highlight) # old directives.register_directive('code-block', CodeBlock) diff --git a/sphinx/directives/other.py b/sphinx/directives/other.py index 1bdef5ebc..3dcd3c60b 100644 --- a/sphinx/directives/other.py +++ b/sphinx/directives/other.py @@ -427,7 +427,7 @@ class Include(BaseInclude): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] directives.register_directive('toctree', TocTree) directives.register_directive('sectionauthor', Author) directives.register_directive('moduleauthor', Author) diff --git a/sphinx/domains/c.py b/sphinx/domains/c.py index a9655c661..22b221318 100644 --- a/sphinx/domains/c.py +++ b/sphinx/domains/c.py @@ -325,7 +325,7 @@ class CDomain(Domain): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.add_domain(CDomain) return { diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py index a676296fb..9e702e4e7 100644 --- a/sphinx/domains/cpp.py +++ b/sphinx/domains/cpp.py @@ -4987,7 +4987,7 @@ class CPPDomain(Domain): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.add_domain(CPPDomain) app.add_config_value("cpp_index_common_prefix", [], 'env') app.add_config_value("cpp_id_attributes", [], 'env') diff --git a/sphinx/domains/javascript.py b/sphinx/domains/javascript.py index a2c10af94..6d7920411 100644 --- a/sphinx/domains/javascript.py +++ b/sphinx/domains/javascript.py @@ -20,7 +20,7 @@ from sphinx.util.docfields import Field, GroupedField, TypedField if False: # For type annotation - from typing import Iterator, Tuple # NOQA + from typing import Any, Iterator, Tuple # NOQA from docutils import nodes # NOQA from sphinx.application import Sphinx # NOQA from sphinx.builders import Builder # NOQA @@ -255,7 +255,7 @@ class JavaScriptDomain(Domain): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.add_domain(JavaScriptDomain) return { diff --git a/sphinx/domains/python.py b/sphinx/domains/python.py index b6c2f4e99..562c0be3d 100644 --- a/sphinx/domains/python.py +++ b/sphinx/domains/python.py @@ -841,7 +841,7 @@ class PythonDomain(Domain): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.add_domain(PythonDomain) return { diff --git a/sphinx/domains/rst.py b/sphinx/domains/rst.py index b433e83c6..450b0faa2 100644 --- a/sphinx/domains/rst.py +++ b/sphinx/domains/rst.py @@ -22,7 +22,7 @@ from sphinx.util.nodes import make_refnode if False: # For type annotation - from typing import Iterator, Tuple # NOQA + from typing import Any, Iterator, Tuple # NOQA from docutils import nodes # NOQA from sphinx.application import Sphinx # NOQA from sphinx.builders import Builder # NOQA @@ -177,7 +177,7 @@ class ReSTDomain(Domain): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.add_domain(ReSTDomain) return { diff --git a/sphinx/domains/std.py b/sphinx/domains/std.py index b980602f1..359690239 100644 --- a/sphinx/domains/std.py +++ b/sphinx/domains/std.py @@ -909,7 +909,7 @@ class StandardDomain(Domain): def setup(app): - # type: (Sphinx) -> None + # type: (Sphinx) -> Dict[unicode, Any] app.add_domain(StandardDomain) return { diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py index d3dfbe5e1..aed6aa1f7 100644 --- a/sphinx/ext/autosummary/generate.py +++ b/sphinx/ext/autosummary/generate.py @@ -166,7 +166,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst', template = template_env.get_template('autosummary/base.rst') def get_members(obj, typ, include_public=[], imported=False): - # type: (Any, unicode, List[unicode]) -> Tuple[List[unicode], List[unicode]] + # type: (Any, unicode, List[unicode], bool) -> Tuple[List[unicode], List[unicode]] items = [] # type: List[unicode] for name in dir(obj): try: diff --git a/sphinx/util/compat.py b/sphinx/util/compat.py index 73b68f5a2..a9348ce75 100644 --- a/sphinx/util/compat.py +++ b/sphinx/util/compat.py @@ -47,7 +47,6 @@ def make_admonition(node_class, name, arguments, options, content, lineno, class _DeprecationWrapper(object): def __init__(self, mod, deprecated): - # type: (Any, Dict) -> None self._mod = mod self._deprecated = deprecated From 4187b606a4ed611239fe77b7b5d79f349c3bef62 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 15 Dec 2016 20:42:08 +0900 Subject: [PATCH 035/190] Update type annotations --- sphinx/directives/other.py | 2 +- sphinx/domains/cpp.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sphinx/directives/other.py b/sphinx/directives/other.py index 3dcd3c60b..06a3f745d 100644 --- a/sphinx/directives/other.py +++ b/sphinx/directives/other.py @@ -24,7 +24,7 @@ from sphinx.util.matching import patfilter if False: # For type annotation - from typing import Tuple # NOQA + from typing import Any, Tuple # NOQA from sphinx.application import Sphinx # NOQA diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py index 9e702e4e7..55d6e8a43 100644 --- a/sphinx/domains/cpp.py +++ b/sphinx/domains/cpp.py @@ -3315,7 +3315,7 @@ class DefinitionParser(object): return self.match(re.compile(r'\b%s\b' % re.escape(word))) def skip_ws(self): - # type: (unicode) -> bool + # type: () -> bool return self.match(_whitespace_re) def skip_word_and_ws(self, word): From 4e50a82675b3ddbb57ec7d36b7a97f088dbd94cc Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 15 Dec 2016 22:30:15 +0900 Subject: [PATCH 036/190] Fix flake8 violation --- sphinx/ext/autosummary/generate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py index aed6aa1f7..28907167e 100644 --- a/sphinx/ext/autosummary/generate.py +++ b/sphinx/ext/autosummary/generate.py @@ -166,7 +166,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst', template = template_env.get_template('autosummary/base.rst') def get_members(obj, typ, include_public=[], imported=False): - # type: (Any, unicode, List[unicode], bool) -> Tuple[List[unicode], List[unicode]] + # type: (Any, unicode, List[unicode], bool) -> Tuple[List[unicode], List[unicode]] # NOQA items = [] # type: List[unicode] for name in dir(obj): try: From d94e6a331b33b2fa11a7d8cb0afa1db2794bb8a5 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Fri, 16 Dec 2016 00:16:24 +0900 Subject: [PATCH 037/190] Move stemmer classes to sphinx.util.stemmer --- sphinx/search/en.py | 28 +--------- sphinx/search/zh.py | 28 +--------- sphinx/util/stemmer/__init__.py | 51 +++++++++++++++++++ sphinx/util/{stemmer.py => stemmer/porter.py} | 4 +- 4 files changed, 57 insertions(+), 54 deletions(-) create mode 100644 sphinx/util/stemmer/__init__.py rename sphinx/util/{stemmer.py => stemmer/porter.py} (99%) diff --git a/sphinx/search/en.py b/sphinx/search/en.py index 22d4e5acb..c6658ffdc 100644 --- a/sphinx/search/en.py +++ b/sphinx/search/en.py @@ -10,13 +10,7 @@ """ from sphinx.search import SearchLanguage - -try: - from Stemmer import Stemmer as PyStemmer - PYSTEMMER = True -except ImportError: - from sphinx.util.stemmer import PorterStemmer - PYSTEMMER = False +from sphinx.util.stemmer import get_stemmer english_stopwords = set(""" a and are as at @@ -225,25 +219,7 @@ class SearchEnglish(SearchLanguage): def init(self, options): # type: (Dict) -> None - if PYSTEMMER: - class Stemmer(object): - def __init__(self): - # type: () -> None - self.stemmer = PyStemmer('porter') - - def stem(self, word): - # type: (unicode) -> unicode - return self.stemmer.stemWord(word) - else: - class Stemmer(PorterStemmer): - """All those porter stemmer implementations look hideous; - make at least the stem method nicer. - """ - def stem(self, word): - # type: (unicode) -> unicode - return PorterStemmer.stem(self, word, 0, len(word) - 1) - - self.stemmer = Stemmer() + self.stemmer = get_stemmer() def stem(self, word): # type: (unicode) -> unicode diff --git a/sphinx/search/zh.py b/sphinx/search/zh.py index bd4787506..520dd6493 100644 --- a/sphinx/search/zh.py +++ b/sphinx/search/zh.py @@ -13,13 +13,7 @@ import os import re from sphinx.search import SearchLanguage - -try: - from Stemmer import Stemmer as PyStemmer - PYSTEMMER = True -except ImportError: - from sphinx.util.stemmer import PorterStemmer - PYSTEMMER = False +from sphinx.util.stemmer import get_stemmer try: import jieba @@ -244,25 +238,7 @@ class SearchChinese(SearchLanguage): if dict_path and os.path.isfile(dict_path): jieba.set_dictionary(dict_path) - if PYSTEMMER: - class Stemmer(object): - def __init__(self): - # type: () -> None - self.stemmer = PyStemmer('porter') - - def stem(self, word): - # type: (unicode) -> unicode - return self.stemmer.stemWord(word) - else: - class Stemmer(PorterStemmer): - """All those porter stemmer implementations look hideous; - make at least the stem method nicer. - """ - def stem(self, word): - # type: (unicode) -> unicode - return PorterStemmer.stem(self, word, 0, len(word) - 1) - - self.stemmer = Stemmer() + self.stemmer = get_stemmer() def split(self, input): # type: (unicode) -> List[unicode] diff --git a/sphinx/util/stemmer/__init__.py b/sphinx/util/stemmer/__init__.py new file mode 100644 index 000000000..ae9f76f1b --- /dev/null +++ b/sphinx/util/stemmer/__init__.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +""" + sphinx.util.stemmer + ~~~~~~~~~~~~~~~~~~~ + + Word stemming utilities for Sphinx. + + :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from sphinx.util.stemmer.porter import PorterStemmer + +try: + from Stemmer import Stemmer as _PyStemmer + PYSTEMMER = True +except ImportError: + PYSTEMMER = False + + +class BaseStemmer(object): + def stem(self, word): + # type: (unicode) -> unicode + raise NotImplemented + + +class PyStemmer(BaseStemmer): + def __init__(self): + # type: () -> None + self.stemmer = _PyStemmer('porter') + + def stem(self, word): + # type: (unicode) -> unicode + return self.stemmer.stemWord(word) + + +class StandardStemmer(BaseStemmer, PorterStemmer): + """All those porter stemmer implementations look hideous; + make at least the stem method nicer. + """ + def stem(self, word): + # type: (unicode) -> unicode + return PorterStemmer.stem(self, word, 0, len(word) - 1) + + +def get_stemmer(): + # type: () -> BaseStemmer + if PYSTEMMER: + return PyStemmer() + else: + return StandardStemmer() diff --git a/sphinx/util/stemmer.py b/sphinx/util/stemmer/porter.py similarity index 99% rename from sphinx/util/stemmer.py rename to sphinx/util/stemmer/porter.py index 47fc41e87..7cff74b6c 100644 --- a/sphinx/util/stemmer.py +++ b/sphinx/util/stemmer/porter.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """ - sphinx.util.stemmer - ~~~~~~~~~~~~~~~~~~~ + sphinx.util.stemmer.porter + ~~~~~~~~~~~~~~~~~~~~~~~~~~ Porter Stemming Algorithm From b1e4036640b5d2290674e1a04f957f015d89f62d Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Fri, 16 Dec 2016 00:43:17 +0900 Subject: [PATCH 038/190] Update type annotation --- sphinx/util/stemmer/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sphinx/util/stemmer/__init__.py b/sphinx/util/stemmer/__init__.py index ae9f76f1b..f36924223 100644 --- a/sphinx/util/stemmer/__init__.py +++ b/sphinx/util/stemmer/__init__.py @@ -34,11 +34,11 @@ class PyStemmer(BaseStemmer): return self.stemmer.stemWord(word) -class StandardStemmer(BaseStemmer, PorterStemmer): +class StandardStemmer(BaseStemmer, PorterStemmer): # type: ignore """All those porter stemmer implementations look hideous; make at least the stem method nicer. """ - def stem(self, word): + def stem(self, word): # type: ignore # type: (unicode) -> unicode return PorterStemmer.stem(self, word, 0, len(word) - 1) From d8a420bd167f98d3a97f327d0c1c486b8e2607a4 Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Sat, 17 Dec 2016 11:52:57 +0100 Subject: [PATCH 039/190] Reduce header levels in ISSUE_TEMPLATE.md --- .github/ISSUE_TEMPLATE.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 5caf7d6a4..4fbf6ff2e 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,26 +1,26 @@ Subject: <what happen when you do on which document project> -# Problem +### Problem - <Detail of problem> -## Procedure to reproduce the problem +#### Procedure to reproduce the problem ``` <Paste your command-line here which cause the problem> ``` -## Error logs / results +#### Error logs / results ``` <Paste your error log here> ``` - <public link of unexpected result if you have> -## Expected results +#### Expected results <Describe what to actually do> -# Reproducible project / your project +### Reproducible project / your project - <link to your project, or attach zipped small project sample> -# Environment info +### Environment info - OS: <Unix/Linux/Mac/Win/other with version> - Python version: - Sphinx version: From ffa6f5a8e97801e60a4e8a2adad05ffabb8ccdaf Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 17 Dec 2016 23:49:35 +0900 Subject: [PATCH 040/190] Refactor env.dependencies using defaultdict --- sphinx/environment/__init__.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index ecd8230d2..6ee3013ce 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -18,6 +18,7 @@ import codecs import fnmatch from os import path from glob import glob +from collections import defaultdict from six import iteritems, itervalues, class_types, next from six.moves import cPickle as pickle @@ -176,7 +177,7 @@ class BuildEnvironment(object): self.all_docs = {} # type: Dict[unicode, float] # docname -> mtime at the time of reading # contains all read docnames - self.dependencies = {} # type: Dict[unicode, Set[unicode]] + self.dependencies = defaultdict(set) # type: Dict[unicode, Set[unicode]] # docname -> set of dependent file # names, relative to documentation root self.included = set() # type: Set[unicode] @@ -462,7 +463,7 @@ class BuildEnvironment(object): self.config.language, self.config.gettext_compact) for filename in catalog_files: - self.dependencies.setdefault(docname, set()).add(filename) + self.dependencies[docname].add(filename) def get_outdated_files(self, config_changed): # type: (bool) -> Tuple[Set[unicode], Set[unicode], Set[unicode]] @@ -497,7 +498,7 @@ class BuildEnvironment(object): changed.add(docname) continue # finally, check the mtime of dependencies - for dep in self.dependencies.get(docname, ()): + for dep in self.dependencies[docname]: try: # this will do the right thing when dep is absolute too deppath = path.join(self.srcdir, dep) @@ -844,7 +845,7 @@ class BuildEnvironment(object): *filename* should be absolute or relative to the source directory. """ - self.dependencies.setdefault(self.docname, set()).add(filename) + self.dependencies[self.docname].add(filename) def note_included(self, filename): # type: (unicode) -> None @@ -887,7 +888,7 @@ class BuildEnvironment(object): dep = dep.decode(fs_encoding) relpath = relative_path(frompath, path.normpath(path.join(cwd, dep))) - self.dependencies.setdefault(docname, set()).add(relpath) + self.dependencies[docname].add(relpath) def process_downloads(self, docname, doctree): # type: (unicode, nodes.Node) -> None @@ -895,7 +896,7 @@ class BuildEnvironment(object): for node in doctree.traverse(addnodes.download_reference): targetname = node['reftarget'] rel_filename, filename = self.relfn2path(targetname, docname) - self.dependencies.setdefault(docname, set()).add(rel_filename) + self.dependencies[docname].add(rel_filename) if not os.access(filename, os.R_OK): self.warn_node('download file not readable: %s' % filename, node) @@ -962,7 +963,7 @@ class BuildEnvironment(object): # map image paths to unique image names (so that they can be put # into a single directory) for imgpath in itervalues(candidates): - self.dependencies.setdefault(docname, set()).add(imgpath) + self.dependencies[docname].add(imgpath) if not os.access(path.join(self.srcdir, imgpath), os.R_OK): self.warn_node('image file not readable: %s' % imgpath, node) From 9c6f2a4969ce2125f334deb17f88ef112c849c80 Mon Sep 17 00:00:00 2001 From: shimizukawa <shimizukawa@gmail.com> Date: Sun, 18 Dec 2016 01:18:05 +0900 Subject: [PATCH 041/190] refs #3222: add PULL_REQUEST_TEMPLATE.md --- .github/PULL_REQUEST_TEMPLATE.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 .github/PULL_REQUEST_TEMPLATE.md diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..073a57795 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,18 @@ +Subject: <short purpose of this pull request> + +### Feature or Bugfix +<!-- please choose --> +- Feature +- Bugfix + +### Purpose +- <long purpose of this pull request> +- <Environment if this PR depends on> + +### Detail +- <feature1 or bug1> +- <feature2 or bug2> + +### Relates +- <URL or Ticket> + From 4ece70109361ed8f753063a9d924731285c1d2c8 Mon Sep 17 00:00:00 2001 From: shimizukawa <shimizukawa@gmail.com> Date: Sun, 18 Dec 2016 09:45:24 +0900 Subject: [PATCH 042/190] refs #2336: Add ``imported_members`` option to ``sphinx-autogen`` command to document imported members. --- CHANGES | 2 ++ sphinx/ext/autosummary/generate.py | 21 +++++++++++++-------- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/CHANGES b/CHANGES index 5bae0ac56..086b6f131 100644 --- a/CHANGES +++ b/CHANGES @@ -13,6 +13,8 @@ Features added -------------- * #3136: Add ``:name:`` option to the directives in ``sphinx.ext.graphviz`` +* #2336: Add ``imported_members`` option to ``sphinx-autogen`` command to document + imported members. Bugs fixed ---------- diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py index 28907167e..70cbee71e 100644 --- a/sphinx/ext/autosummary/generate.py +++ b/sphinx/ext/autosummary/generate.py @@ -70,6 +70,9 @@ def main(argv=sys.argv): p.add_option("-t", "--templates", action="store", type="string", dest="templates", default=None, help="Custom template directory (default: %default)") + p.add_option("-i", "--imported-members", action="store_true", + dest="imported_members", default=False, + help="Document imported members (default: %default)") options, args = p.parse_args(argv[1:]) if len(args) < 1: @@ -77,7 +80,8 @@ def main(argv=sys.argv): generate_autosummary_docs(args, options.output_dir, "." + options.suffix, - template_dir=options.templates) + template_dir=options.templates, + imported_members=options.imported_members) def _simple_info(msg): @@ -94,8 +98,9 @@ def _simple_warn(msg): def generate_autosummary_docs(sources, output_dir=None, suffix='.rst', warn=_simple_warn, info=_simple_info, - base_path=None, builder=None, template_dir=None): - # type: (List[unicode], unicode, unicode, Callable, Callable, unicode, Builder, unicode) -> None # NOQA + base_path=None, builder=None, template_dir=None, + imported_members=False): + # type: (List[unicode], unicode, unicode, Callable, Callable, unicode, Builder, unicode, bool) -> None # NOQA showed_sources = list(sorted(sources)) if len(showed_sources) > 20: @@ -186,17 +191,17 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst', if doc.objtype == 'module': ns['members'] = dir(obj) ns['functions'], ns['all_functions'] = \ - get_members(obj, 'function') + get_members(obj, 'function', imported=imported_members) ns['classes'], ns['all_classes'] = \ - get_members(obj, 'class') + get_members(obj, 'class', imported=imported_members) ns['exceptions'], ns['all_exceptions'] = \ - get_members(obj, 'exception') + get_members(obj, 'exception', imported=imported_members) elif doc.objtype == 'class': ns['members'] = dir(obj) ns['methods'], ns['all_methods'] = \ - get_members(obj, 'method', ['__init__']) + get_members(obj, 'method', ['__init__'], imported=imported_members) ns['attributes'], ns['all_attributes'] = \ - get_members(obj, 'attribute') + get_members(obj, 'attribute', imported=imported_members) parts = name.split('.') if doc.objtype in ('method', 'attribute'): From f95b978b580b275520c693d0be960a5d66070c04 Mon Sep 17 00:00:00 2001 From: shimizukawa <shimizukawa@gmail.com> Date: Sun, 18 Dec 2016 09:45:39 +0900 Subject: [PATCH 043/190] fix nits for typing --- sphinx/ext/autosummary/generate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py index 70cbee71e..cf57d3ea2 100644 --- a/sphinx/ext/autosummary/generate.py +++ b/sphinx/ext/autosummary/generate.py @@ -51,7 +51,7 @@ add_documenter(InstanceAttributeDocumenter) if False: # For type annotation - from typing import Any, Callable, Tuple # NOQA + from typing import Any, Callable, Tuple, List # NOQA from sphinx import addnodes # NOQA from sphinx.builders import Builder # NOQA from sphinx.environment import BuildEnvironment # NOQA From 673418e7701bc291679de5f41480cff62dec77ce Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 18 Dec 2016 11:04:29 +0900 Subject: [PATCH 044/190] Increment ENV_VERSION --- sphinx/environment/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index 6ee3013ce..a952146fe 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -76,7 +76,7 @@ default_settings = { # or changed to properly invalidate pickle files. # # NOTE: increase base version by 2 to have distinct numbers for Py2 and 3 -ENV_VERSION = 50 + (sys.version_info[0] - 2) +ENV_VERSION = 51 + (sys.version_info[0] - 2) dummy_reporter = Reporter('', 4, 4) From e9abc5182f55c110f4856a1b620e4ca0a701fdbe Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 25 Dec 2016 14:50:38 +0900 Subject: [PATCH 045/190] Add type annotation --- sphinx/builders/html.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index e64f4e5a4..f4b043b06 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -763,7 +763,7 @@ class StandaloneHTMLBuilder(Builder): self.indexer.feed(pagename, filename, title, doctree) except TypeError: # fallback for old search-adapters - self.indexer.feed(pagename, title, doctree) + self.indexer.feed(pagename, title, doctree) # type: ignore def _get_local_toctree(self, docname, collapse=True, **kwds): # type: (unicode, bool, Any) -> unicode From ef60dbfce09286b20b7385333d63a60321784e68 Mon Sep 17 00:00:00 2001 From: Rob Ruana <rob@robruana.com> Date: Sun, 25 Dec 2016 14:36:52 -0800 Subject: [PATCH 046/190] Fixes #3276: Removes innappropriate hyphenation from field-name elements --- sphinx/themes/basic/static/basic.css_t | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sphinx/themes/basic/static/basic.css_t b/sphinx/themes/basic/static/basic.css_t index d70003d42..c6a304f14 100644 --- a/sphinx/themes/basic/static/basic.css_t +++ b/sphinx/themes/basic/static/basic.css_t @@ -398,6 +398,13 @@ table.field-list td, table.field-list th { margin: 0; } +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + /* -- other body styles ----------------------------------------------------- */ ol.arabic { From da148eceb863770ea5e1f9bf5b122e3be0b8784a Mon Sep 17 00:00:00 2001 From: Jakub Wilk <jwilk@jwilk.net> Date: Tue, 27 Dec 2016 22:01:48 +0100 Subject: [PATCH 047/190] Update obsolete comment about Mercurial Sphinx has been maintained in Git since 2015. --- sphinx/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/__init__.py b/sphinx/__init__.py index fb47a88ae..ad9493ecd 100644 --- a/sphinx/__init__.py +++ b/sphinx/__init__.py @@ -42,7 +42,7 @@ package_dir = path.abspath(path.dirname(__file__)) __display_version__ = __version__ # used for command line version if __version__.endswith('+'): - # try to find out the changeset hash if checked out from hg, and append + # try to find out the commit hash if checked out from git, and append # it to __version__ (since we use this value from setup.py, it gets # automatically propagated to an installed copy as well) __display_version__ = __version__ From 528de30ed3092401b27fab19be15470a8bf65c64 Mon Sep 17 00:00:00 2001 From: Eric Wieser <wieser.eric@gmail.com> Date: Sat, 31 Dec 2016 10:03:53 +0000 Subject: [PATCH 048/190] Fix typos in sphinx.domains.cpp.AST class names Assuming this is a public API, the old names need to stick around --- sphinx/domains/cpp.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py index 55d6e8a43..e9c274eda 100644 --- a/sphinx/domains/cpp.py +++ b/sphinx/domains/cpp.py @@ -1418,7 +1418,7 @@ class ASTTrailingTypeSpecName(ASTBase): self.nestedName.describe_signature(signode, mode, env, symbol=symbol) -class ASTFunctinoParameter(ASTBase): +class ASTFunctionParameter(ASTBase): def __init__(self, arg, ellipsis=False): # type: (Any, bool) -> None self.arg = arg @@ -1453,6 +1453,8 @@ class ASTFunctinoParameter(ASTBase): else: self.arg.describe_signature(signode, mode, env, symbol=symbol) +# backwards-compatible typo +ASTFunctinoParameter = ASTFunctionParameter class ASTParametersQualifiers(ASTBase): def __init__(self, args, volatile, const, refQual, exceptionSpec, override, @@ -2186,7 +2188,7 @@ class ASTDeclaratorParen(ASTBase): self.next.describe_signature(signode, "noneIsName", env, symbol) -class ASTDecleratorNameParamQual(ASTBase): +class ASTDeclaratorNameParamQual(ASTBase): def __init__(self, declId, arrayOps, paramQual): # type: (Any, List[Any], Any) -> None self.declId = declId @@ -2286,6 +2288,9 @@ class ASTDecleratorNameParamQual(ASTBase): if self.paramQual: self.paramQual.describe_signature(signode, mode, env, symbol) +# backwards-compatible typo +ASTDecleratorNameParamQual = ASTDeclaratorNameParamQual + class ASTInitializer(ASTBase): def __init__(self, value): @@ -3644,7 +3649,7 @@ class DefinitionParser(object): while 1: self.skip_ws() if self.skip_string('...'): - args.append(ASTFunctinoParameter(None, True)) + args.append(ASTFunctionParameter(None, True)) self.skip_ws() if not self.skip_string(')'): self.fail('Expected ")" after "..." in ' @@ -3654,7 +3659,7 @@ class DefinitionParser(object): # even in function pointers and similar. arg = self._parse_type_with_init(outer=None, named='single') # TODO: parse default parameters # TODO: didn't we just do that? - args.append(ASTFunctinoParameter(arg)) + args.append(ASTFunctionParameter(arg)) self.skip_ws() if self.skip_string(','): @@ -3824,7 +3829,7 @@ class DefinitionParser(object): return ASTDeclSpecs(outer, leftSpecs, rightSpecs, trailing) def _parse_declarator_name_param_qual(self, named, paramMode, typed): - # type: (Union[bool, unicode], unicode, bool) -> ASTDecleratorNameParamQual + # type: (Union[bool, unicode], unicode, bool) -> ASTDeclaratorNameParamQual # now we should parse the name, and then suffixes if named == 'maybe': pos = self.pos @@ -3860,7 +3865,7 @@ class DefinitionParser(object): else: break paramQual = self._parse_parameters_and_qualifiers(paramMode) - return ASTDecleratorNameParamQual(declId=declId, arrayOps=arrayOps, + return ASTDeclaratorNameParamQual(declId=declId, arrayOps=arrayOps, paramQual=paramQual) def _parse_declerator(self, named, paramMode, typed=True): @@ -3924,7 +3929,7 @@ class DefinitionParser(object): if paramMode == "operatorCast": # TODO: we should be able to parse cast operators which return # function pointers. For now, just hax it and ignore. - return ASTDecleratorNameParamQual(declId=None, arrayOps=[], + return ASTDeclaratorNameParamQual(declId=None, arrayOps=[], paramQual=None) # maybe this is the beginning of params and quals,try that first, # otherwise assume it's noptr->declarator > ( ptr-declarator ) From 07f282361812feda888b9beafd428bf97767fcba Mon Sep 17 00:00:00 2001 From: Eric Wieser <wieser.eric@gmail.com> Date: Sat, 31 Dec 2016 10:10:10 +0000 Subject: [PATCH 049/190] Fix name of _parse_declerator [sic] --- sphinx/domains/cpp.py | 53 +++++++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 25 deletions(-) diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py index e9c274eda..f72488ff7 100644 --- a/sphinx/domains/cpp.py +++ b/sphinx/domains/cpp.py @@ -95,9 +95,9 @@ if False: attribute-specifier-seq[opt] decl-specifier-seq[opt] init-declarator-list[opt] ; # Drop the semi-colon. For now: drop the attributes (TODO). - # Use at most 1 init-declerator. - -> decl-specifier-seq init-declerator - -> decl-specifier-seq declerator initializer + # Use at most 1 init-declarator. + -> decl-specifier-seq init-declarator + -> decl-specifier-seq declarator initializer decl-specifier -> storage-class-specifier -> @@ -158,22 +158,22 @@ if False: | template-argument-list "," template-argument "..."[opt] template-argument -> constant-expression - | type-specifier-seq abstract-declerator + | type-specifier-seq abstract-declarator | id-expression - declerator -> - ptr-declerator + declarator -> + ptr-declarator | noptr-declarator parameters-and-qualifiers trailing-return-type (TODO: for now we don't support trailing-eturn-type) - ptr-declerator -> - noptr-declerator + ptr-declarator -> + noptr-declarator | ptr-operator ptr-declarator - noptr-declerator -> + noptr-declarator -> declarator-id attribute-specifier-seq[opt] -> "..."[opt] id-expression | rest-of-trailing - | noptr-declerator parameters-and-qualifiers + | noptr-declarator parameters-and-qualifiers | noptr-declarator "[" constant-expression[opt] "]" attribute-specifier-seq[opt] | "(" ptr-declarator ")" @@ -235,20 +235,20 @@ if False: # Drop the attributes -> decl-specifier-seq abstract-declarator[opt] grammar, typedef-like: no initilizer - decl-specifier-seq declerator + decl-specifier-seq declarator Can start with a templateDeclPrefix. member_object: - goal: as a type_object which must have a declerator, and optionally + goal: as a type_object which must have a declarator, and optionally with a initializer grammar: - decl-specifier-seq declerator initializer + decl-specifier-seq declarator initializer Can start with a templateDeclPrefix. function_object: goal: a function declaration, TODO: what about templates? for now: skip grammar: no initializer - decl-specifier-seq declerator + decl-specifier-seq declarator Can start with a templateDeclPrefix. class_object: @@ -3868,7 +3868,7 @@ class DefinitionParser(object): return ASTDeclaratorNameParamQual(declId=declId, arrayOps=arrayOps, paramQual=paramQual) - def _parse_declerator(self, named, paramMode, typed=True): + def _parse_declarator(self, named, paramMode, typed=True): # type: (Union[bool, unicode], unicode, bool) -> Any # 'typed' here means 'parse return type stuff' if paramMode not in ('type', 'function', 'operatorCast'): @@ -3890,14 +3890,14 @@ class DefinitionParser(object): if const: continue break - next = self._parse_declerator(named, paramMode, typed) + next = self._parse_declarator(named, paramMode, typed) return ASTDeclaratorPtr(next=next, volatile=volatile, const=const) # TODO: shouldn't we parse an R-value ref here first? if typed and self.skip_string("&"): - next = self._parse_declerator(named, paramMode, typed) + next = self._parse_declarator(named, paramMode, typed) return ASTDeclaratorRef(next=next) if typed and self.skip_string("..."): - next = self._parse_declerator(named, paramMode, False) + next = self._parse_declarator(named, paramMode, False) return ASTDeclaratorParamPack(next=next) if typed: # pointer to member pos = self.pos @@ -3923,7 +3923,7 @@ class DefinitionParser(object): if const: continue break - next = self._parse_declerator(named, paramMode, typed) + next = self._parse_declarator(named, paramMode, typed) return ASTDeclaratorMemPtr(name, const, volatile, next=next) if typed and self.current_char == '(': # note: peeking, not skipping if paramMode == "operatorCast": @@ -3948,10 +3948,10 @@ class DefinitionParser(object): # TODO: hmm, if there is a name, it must be in inner, right? # TODO: hmm, if there must be parameters, they must b # inside, right? - inner = self._parse_declerator(named, paramMode, typed) + inner = self._parse_declarator(named, paramMode, typed) if not self.skip_string(')'): self.fail("Expected ')' in \"( ptr-declarator )\"") - next = self._parse_declerator(named=False, + next = self._parse_declarator(named=False, paramMode="type", typed=typed) return ASTDeclaratorParen(inner=inner, next=next) @@ -3969,6 +3969,9 @@ class DefinitionParser(object): header = "Error in declarator or parameters and qualifiers" raise self._make_multi_error(prevErrors, header) + # backwards-compatible typo + _parse_declerator = _parse_declarator + def _parse_initializer(self, outer=None): # type: (unicode) -> ASTInitializer self.skip_ws() @@ -4011,7 +4014,7 @@ class DefinitionParser(object): # first try without the type try: declSpecs = self._parse_decl_specs(outer=outer, typed=False) - decl = self._parse_declerator(named=True, paramMode=outer, + decl = self._parse_declarator(named=True, paramMode=outer, typed=False) self.assert_end() except DefinitionError as exUntyped: @@ -4025,7 +4028,7 @@ class DefinitionParser(object): self.pos = startPos try: declSpecs = self._parse_decl_specs(outer=outer) - decl = self._parse_declerator(named=True, paramMode=outer) + decl = self._parse_declarator(named=True, paramMode=outer) except DefinitionError as exTyped: self.pos = startPos if outer == 'type': @@ -4056,7 +4059,7 @@ class DefinitionParser(object): self.pos = startPos typed = True declSpecs = self._parse_decl_specs(outer=outer, typed=typed) - decl = self._parse_declerator(named=True, paramMode=outer, + decl = self._parse_declarator(named=True, paramMode=outer, typed=typed) else: paramMode = 'type' @@ -4068,7 +4071,7 @@ class DefinitionParser(object): elif outer == 'templateParam': named = 'single' declSpecs = self._parse_decl_specs(outer=outer) - decl = self._parse_declerator(named=named, paramMode=paramMode) + decl = self._parse_declarator(named=named, paramMode=paramMode) return ASTType(declSpecs, decl) def _parse_type_with_init(self, named, outer): From 7e8545a642c4dccce09f5e469a3678cf9427b442 Mon Sep 17 00:00:00 2001 From: Eric Wieser <wieser.eric@gmail.com> Date: Sat, 31 Dec 2016 10:47:09 +0000 Subject: [PATCH 050/190] PEP8 compliance --- sphinx/domains/cpp.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py index f72488ff7..78b1e15dd 100644 --- a/sphinx/domains/cpp.py +++ b/sphinx/domains/cpp.py @@ -1453,9 +1453,11 @@ class ASTFunctionParameter(ASTBase): else: self.arg.describe_signature(signode, mode, env, symbol=symbol) + # backwards-compatible typo ASTFunctinoParameter = ASTFunctionParameter + class ASTParametersQualifiers(ASTBase): def __init__(self, args, volatile, const, refQual, exceptionSpec, override, final, initializer): @@ -2288,6 +2290,7 @@ class ASTDeclaratorNameParamQual(ASTBase): if self.paramQual: self.paramQual.describe_signature(signode, mode, env, symbol) + # backwards-compatible typo ASTDecleratorNameParamQual = ASTDeclaratorNameParamQual From 09e3f8adfd4ae316a1ea74c9582b90babaefd683 Mon Sep 17 00:00:00 2001 From: Eric Wieser <wieser.eric@gmail.com> Date: Sat, 31 Dec 2016 10:52:16 +0000 Subject: [PATCH 051/190] Remove backwards-compatible typos Decreed in #3297 not to be a public API --- sphinx/domains/cpp.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py index 78b1e15dd..1ced8aba3 100644 --- a/sphinx/domains/cpp.py +++ b/sphinx/domains/cpp.py @@ -1454,10 +1454,6 @@ class ASTFunctionParameter(ASTBase): self.arg.describe_signature(signode, mode, env, symbol=symbol) -# backwards-compatible typo -ASTFunctinoParameter = ASTFunctionParameter - - class ASTParametersQualifiers(ASTBase): def __init__(self, args, volatile, const, refQual, exceptionSpec, override, final, initializer): @@ -2291,10 +2287,6 @@ class ASTDeclaratorNameParamQual(ASTBase): self.paramQual.describe_signature(signode, mode, env, symbol) -# backwards-compatible typo -ASTDecleratorNameParamQual = ASTDeclaratorNameParamQual - - class ASTInitializer(ASTBase): def __init__(self, value): # type: (unicode) -> None @@ -3972,9 +3964,6 @@ class DefinitionParser(object): header = "Error in declarator or parameters and qualifiers" raise self._make_multi_error(prevErrors, header) - # backwards-compatible typo - _parse_declerator = _parse_declarator - def _parse_initializer(self, outer=None): # type: (unicode) -> ASTInitializer self.skip_ws() From 51a5cde381024468cece33e907dba879a5c9ce7c Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Mon, 2 Jan 2017 12:35:49 +0900 Subject: [PATCH 052/190] Refactor sphinx.util.pycompat (reorder definitions) --- sphinx/util/pycompat.py | 78 +++++++++++++++++++++++++---------------- 1 file changed, 48 insertions(+), 30 deletions(-) diff --git a/sphinx/util/pycompat.py b/sphinx/util/pycompat.py index f25e14023..185772cce 100644 --- a/sphinx/util/pycompat.py +++ b/sphinx/util/pycompat.py @@ -30,20 +30,43 @@ NoneType = type(None) # ------------------------------------------------------------------------------ # Python 2/3 compatibility +# prefix for Unicode strings if PY3: - # Python 3 - # prefix for Unicode strings u = '' - from io import TextIOWrapper +else: + u = 'u' - # safely encode a string for printing to the terminal + +# TextIOWrapper +if PY3: + from io import TextIOWrapper +else: + def TextIOWrapper(stream, encoding): + # type: (file, str) -> unicode + return codecs.lookup(encoding or 'ascii')[2](stream) + + +# sys_encoding: some kind of default system encoding; should be used with +# a lenient error handler +if PY3: + sys_encoding = sys.getdefaultencoding() +else: + sys_encoding = __import__('locale').getpreferredencoding() + + +# terminal_safe(): safely encode a string for printing to the terminal +if PY3: def terminal_safe(s): # type: (unicode) -> unicode return s.encode('ascii', 'backslashreplace').decode('ascii') - # some kind of default system encoding; should be used with a lenient - # error handler - sys_encoding = sys.getdefaultencoding() +else: + def terminal_safe(s): + # type: (unicode) -> unicode + return s.encode('ascii', 'backslashreplace') + +# convert_with_2to3(): +if PY3: # support for running 2to3 over config files def convert_with_2to3(filepath): # type: (unicode) -> unicode @@ -60,37 +83,27 @@ if PY3: # try to match ParseError details with SyntaxError details raise SyntaxError(err.msg, (filepath, lineno, offset, err.value)) return text_type(tree) - from html import escape as htmlescape # noqa: >= Python 3.2 +else: + # no need to refactor on 2.x versions + convert_with_2to3 = None # type: ignore + +# htmlescape() +if PY3: + from html import escape as htmlescape +else: + from cgi import escape as htmlescape # NOQA + + +# UnicodeMixin +if PY3: class UnicodeMixin(object): """Mixin class to handle defining the proper __str__/__unicode__ methods in Python 2 or 3.""" def __str__(self): return self.__unicode__() - - from textwrap import indent - else: - # Python 2 - u = 'u' - # no need to refactor on 2.x versions - convert_with_2to3 = None # type: ignore - - def TextIOWrapper(stream, encoding): - # type: (file, str) -> unicode - return codecs.lookup(encoding or 'ascii')[2](stream) - - # safely encode a string for printing to the terminal - def terminal_safe(s): - # type: (unicode) -> unicode - return s.encode('ascii', 'backslashreplace') - # some kind of default system encoding; should be used with a lenient - # error handler - sys_encoding = __import__('locale').getpreferredencoding() - # use Python 3 name - from cgi import escape as htmlescape # noqa: F401 - class UnicodeMixin(object): """Mixin class to handle defining the proper __str__/__unicode__ methods in Python 2 or 3.""" @@ -98,6 +111,11 @@ else: def __str__(self): return self.__unicode__().encode('utf8') + +# indent() +if PY3: + from textwrap import indent +else: # backport from python3 def indent(text, prefix, predicate=None): # type: (unicode, unicode, Callable) -> unicode From 6d4e6454093953943e79d4db6efeb17390870e62 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 20 Dec 2016 23:13:41 +0900 Subject: [PATCH 053/190] Add sphinx.util.logging --- sphinx/application.py | 38 ++++----- sphinx/util/logging.py | 183 +++++++++++++++++++++++++++++++++++++++++ tests/test_intl.py | 4 +- 3 files changed, 203 insertions(+), 22 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index 398a4a124..8ec170fb2 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -15,6 +15,7 @@ from __future__ import print_function import os import sys import types +import warnings import posixpath import traceback from os import path @@ -30,20 +31,21 @@ from docutils.parsers.rst import convert_directive_function, \ import sphinx from sphinx import package_dir, locale from sphinx.config import Config -from sphinx.errors import SphinxError, SphinxWarning, ExtensionError, \ - VersionRequirementError, ConfigError +from sphinx.errors import SphinxError, ExtensionError, VersionRequirementError, \ + ConfigError from sphinx.domains import ObjType from sphinx.domains.std import GenericObject, Target, StandardDomain +from sphinx.deprecation import RemovedInSphinx17Warning from sphinx.environment import BuildEnvironment from sphinx.io import SphinxStandaloneReader from sphinx.roles import XRefRole from sphinx.util import pycompat # noqa: F401 from sphinx.util import import_object +from sphinx.util import logging from sphinx.util.tags import Tags from sphinx.util.osutil import ENOENT -from sphinx.util.logging import is_suppressed_warning from sphinx.util.console import ( # type: ignore - bold, lightgray, darkgray, darkred, darkgreen, term_width_line + bold, lightgray, darkgray, darkgreen, term_width_line ) from sphinx.util.i18n import find_catalog_source_files @@ -109,6 +111,8 @@ ENV_PICKLE_FILENAME = 'environment.pickle' # Values are Sphinx version that merge the extension. EXTENSION_BLACKLIST = {"sphinxjp.themecore": "1.2"} # type: Dict[unicode, unicode] +logger = logging.getLogger(__name__) + class Sphinx(object): @@ -151,6 +155,7 @@ class Sphinx(object): self._warning = warning self._warncount = 0 self.warningiserror = warningiserror + logging.setup(self, self._status, self._warning) self._events = events.copy() self._translators = {} # type: Dict[unicode, nodes.GenericNodeVisitor] @@ -385,8 +390,8 @@ class Sphinx(object): wfile.flush() self.messagelog.append(message) - def warn(self, message, location=None, prefix='WARNING: ', - type=None, subtype=None, colorfunc=darkred): + def warn(self, message, location=None, prefix=None, + type=None, subtype=None, colorfunc=None): # type: (unicode, unicode, unicode, unicode, unicode, Callable) -> None """Emit a warning. @@ -403,21 +408,14 @@ class Sphinx(object): :meth:`.BuildEnvironment.warn` since that will collect all warnings during parsing for later output. """ - if is_suppressed_warning(type, subtype, self.config.suppress_warnings): - return + if prefix: + warnings.warn('prefix option of warn() is now deprecated.', + RemovedInSphinx17Warning) + if colorfunc: + warnings.warn('colorfunc option of warn() is now deprecated.', + RemovedInSphinx17Warning) - if isinstance(location, tuple): - docname, lineno = location - if docname: - location = '%s:%s' % (self.env.doc2path(docname), lineno or '') - else: - location = None - warntext = location and '%s: %s%s\n' % (location, prefix, message) or \ - '%s%s\n' % (prefix, message) - if self.warningiserror: - raise SphinxWarning(warntext) - self._warncount += 1 - self._log(colorfunc(warntext), self._warning, True) + logger.warning(message, type=type, subtype=subtype, location=location) def info(self, message='', nonl=False): # type: (unicode, bool) -> None diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index ef91b728b..de06bfcf4 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -8,6 +8,114 @@ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ +from __future__ import absolute_import + +import logging +import logging.handlers +from six import string_types +from contextlib import contextmanager +from docutils.utils import get_source_line + +from sphinx.errors import SphinxWarning +from sphinx.util.console import darkred # type: ignore + + +def getLogger(name): + """Get logger wrapped by SphinxLoggerAdapter.""" + return SphinxLoggerAdapter(logging.getLogger(name), {}) + + +class SphinxLogRecord(logging.LogRecord): + """Log record class supporting location""" + def getMessage(self): + message = super(SphinxLogRecord, self).getMessage() + if isinstance(message, string_types): + location = getattr(self, 'location', None) + if location: + message = '%s: WARNING: %s' % (location, message) + elif 'WARNING:' not in message: + message = 'WARNING: %s' % message + + return darkred(message) + else: + return message + + +class SphinxLoggerAdapter(logging.LoggerAdapter): + """LoggerAdapter allowing ``type`` and ``subtype`` keywords.""" + + def warn(self, message, location=None, **kwargs): + """Emit a warning. + + :param message: a message of warning + :param location: a tuple of (docname, lineno) or a string describing the location + """ + if location: + kwargs['location'] = location + + self.warning(message, **kwargs) + + def warn_node(self, message, node, **kwargs): + """Emit a warning for specific node. + + :param message: a message of warning + :param node: a node related with the warning + """ + kwargs['location'] = "%s:%s" % get_source_line(node) + self.warning(message, **kwargs) + + def process(self, msg, kwargs): + extra = kwargs.setdefault('extra', {}) + if 'type' in kwargs: + extra['type'] = kwargs.pop('type') + if 'subtype' in kwargs: + extra['subtype'] = kwargs.pop('subtype') + if 'location' in kwargs: + extra['location'] = kwargs.pop('location') + + return msg, kwargs + + +class MemoryHandler(logging.handlers.BufferingHandler): + """Handler buffering all logs.""" + + def __init__(self): + super(MemoryHandler, self).__init__(-1) + + def shouldFlush(self, record): + return False # never flush + + def flushTo(self, logger): + self.acquire() + try: + for record in self.buffer: + logger.handle(record) + self.buffer = [] # type: ignore + finally: + self.release() + + +@contextmanager +def pending_logging(): + """contextmanager to pend logging temporary.""" + logger = logging.getLogger() + memhandler = MemoryHandler() + + try: + handlers = [] + for handler in logger.handlers[:]: + logger.removeHandler(handler) + handlers.append(handler) + + logger.addHandler(memhandler) + yield + finally: + logger.removeHandler(memhandler) + + for handler in handlers: + logger.addHandler(handler) + + memhandler.flushTo(logger) def is_suppressed_warning(type, subtype, suppress_warnings): @@ -27,3 +135,78 @@ def is_suppressed_warning(type, subtype, suppress_warnings): return True return False + + +class WarningSuppressor(logging.Filter): + """Filter logs by `suppress_warnings`.""" + + def __init__(self, app): + self.app = app + super(WarningSuppressor, self).__init__() + + def filter(self, record): + type = getattr(record, 'type', None) + subtype = getattr(record, 'subtype', None) + + if is_suppressed_warning(type, subtype, self.app.config.suppress_warnings): + return False + else: + self.app._warncount += 1 + return True + + +class WarningIsErrorFilter(logging.Filter): + """Raise exception if warning emitted.""" + + def __init__(self, app): + self.app = app + super(WarningIsErrorFilter, self).__init__() + + def filter(self, record): + if self.app.warningiserror: + raise SphinxWarning(record.msg % record.args) + else: + return True + + +class LogRecordTranslator(logging.Filter): + """Converts a log record to one Sphinx expects + + * Make a instance of SphinxLogRecord + * docname to path if location given + """ + def __init__(self, app): + self.app = app + super(LogRecordTranslator, self).__init__() + + def filter(self, record): + if isinstance(record, logging.LogRecord): + record.__class__ = SphinxLogRecord # force subclassing to handle location + + location = getattr(record, 'location', None) + if isinstance(location, tuple): + docname, lineno = location + if docname and lineno: + record.location = '%s:%s' % (self.app.env.doc2path(docname), lineno) + elif docname: + record.location = '%s' % (self.app.env.doc2path(docname)) + else: + record.location = None + + return True + + +def setup(app, status, warning): + """Setup root logger for Sphinx""" + logger = logging.getLogger() + + # clear all handlers + for handler in logger.handlers[:]: + logger.removeHandler(handler) + + warning_handler = logging.StreamHandler(warning) + warning_handler.addFilter(WarningSuppressor(app)) + warning_handler.addFilter(WarningIsErrorFilter(app)) + warning_handler.addFilter(LogRecordTranslator(app)) + warning_handler.setLevel(logging.WARNING) + logger.addHandler(warning_handler) diff --git a/tests/test_intl.py b/tests/test_intl.py index 21d6f763b..fae7b06a1 100644 --- a/tests/test_intl.py +++ b/tests/test_intl.py @@ -23,7 +23,7 @@ from six import string_types from util import tempdir, rootdir, path, gen_with_app, with_app, SkipTest, \ assert_re_search, assert_not_re_search, assert_in, assert_not_in, \ - assert_startswith, assert_node, repr_as, etree_parse + assert_startswith, assert_node, repr_as, etree_parse, strip_escseq root = tempdir / 'test-intl' @@ -931,4 +931,4 @@ def test_image_glob_intl_using_figure_language_filename(app, status, warning): def getwarning(warnings): - return repr_as(warnings.getvalue().replace(os.sep, '/'), '<warnings>') + return repr_as(strip_escseq(warnings.getvalue().replace(os.sep, '/')), '<warnings>') From 85dcd7baa85a04abe2fd571085911cc027b1f7ea Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 20 Dec 2016 23:21:30 +0900 Subject: [PATCH 054/190] Use sphinx.util.logging instead app.warn() --- sphinx/application.py | 64 +++++++++++++++++------------------ sphinx/builders/__init__.py | 34 ++++++++----------- sphinx/builders/changes.py | 3 +- sphinx/builders/html.py | 8 ++--- sphinx/config.py | 37 ++++++++++---------- sphinx/highlighting.py | 24 ++++++------- sphinx/theming.py | 18 +++++----- sphinx/transforms/__init__.py | 3 +- sphinx/util/i18n.py | 19 ++++++----- sphinx/writers/html.py | 7 ++-- sphinx/writers/latex.py | 10 +++--- tests/test_build_html.py | 4 +-- tests/test_config.py | 21 ++++++------ tests/test_highlighting.py | 14 ++++---- 14 files changed, 130 insertions(+), 136 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index 8ec170fb2..95e1ff17d 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -177,11 +177,11 @@ class Sphinx(object): self.tags = Tags(tags) self.config = Config(confdir, CONFIG_FILENAME, confoverrides or {}, self.tags) - self.config.check_unicode(self.warn) + self.config.check_unicode() # defer checking types until i18n has been initialized # initialize some limited config variables before loading extensions - self.config.pre_init_values(self.warn) + self.config.pre_init_values() # check the Sphinx version if requested if self.config.needs_sphinx and self.config.needs_sphinx > sphinx.__display_version__: @@ -227,7 +227,7 @@ class Sphinx(object): ) # now that we know all config values, collect them from conf.py - self.config.init_values(self.warn) + self.config.init_values() # check extension versions if requested if self.config.needs_extensions: @@ -251,7 +251,7 @@ class Sphinx(object): # set up translation infrastructure self._init_i18n() # check all configuration values for permissible types - self.config.check_types(self.warn) + self.config.check_types() # set up source_parsers self._init_source_parsers() # set up the build environment @@ -528,9 +528,9 @@ class Sphinx(object): if extension in self._extensions: return if extension in EXTENSION_BLACKLIST: - self.warn('the extension %r was already merged with Sphinx since version %s; ' - 'this extension is ignored.' % ( - extension, EXTENSION_BLACKLIST[extension])) + logger.warning('the extension %r was already merged with Sphinx since version %s; ' + 'this extension is ignored.', + extension, EXTENSION_BLACKLIST[extension]) return self._setting_up_extension.append(extension) try: @@ -540,8 +540,8 @@ class Sphinx(object): raise ExtensionError('Could not import extension %s' % extension, err) if not hasattr(mod, 'setup'): - self.warn('extension %r has no setup() function; is it really ' - 'a Sphinx extension module?' % extension) + logger.warning('extension %r has no setup() function; is it really ' + 'a Sphinx extension module?', extension) ext_meta = None else: try: @@ -561,9 +561,9 @@ class Sphinx(object): if not ext_meta.get('version'): ext_meta['version'] = 'unknown version' except Exception: - self.warn('extension %r returned an unsupported object from ' - 'its setup() function; it should return None or a ' - 'metadata dictionary' % extension) + logger.warning('extension %r returned an unsupported object from ' + 'its setup() function; it should return None or a ' + 'metadata dictionary', extension) ext_meta = {'version': 'unknown version'} self._extensions[extension] = mod self._extension_metadata[extension] = ext_meta @@ -668,10 +668,10 @@ class Sphinx(object): self.debug('[app] adding node: %r', (node, kwds)) if not kwds.pop('override', False) and \ hasattr(nodes.GenericNodeVisitor, 'visit_' + node.__name__): - self.warn('while setting up extension %s: node class %r is ' - 'already registered, its visitors will be overridden' % - (self._setting_up_extension, node.__name__), - type='app', subtype='add_node') + logger.warning('while setting up extension %s: node class %r is ' + 'already registered, its visitors will be overridden', + self._setting_up_extension, node.__name__, + type='app', subtype='add_node') nodes._add_node_class_names([node.__name__]) for key, val in iteritems(kwds): try: @@ -722,10 +722,10 @@ class Sphinx(object): self.debug('[app] adding directive: %r', (name, obj, content, arguments, options)) if name in directives._directives: - self.warn('while setting up extension %s: directive %r is ' - 'already registered, it will be overridden' % - (self._setting_up_extension[-1], name), - type='app', subtype='add_directive') + logger.warning('while setting up extension %s: directive %r is ' + 'already registered, it will be overridden', + self._setting_up_extension[-1], name, + type='app', subtype='add_directive') directives.register_directive( name, self._directive_helper(obj, content, arguments, **options)) @@ -733,10 +733,10 @@ class Sphinx(object): # type: (unicode, Any) -> None self.debug('[app] adding role: %r', (name, role)) if name in roles._roles: - self.warn('while setting up extension %s: role %r is ' - 'already registered, it will be overridden' % - (self._setting_up_extension[-1], name), - type='app', subtype='add_role') + logger.warning('while setting up extension %s: role %r is ' + 'already registered, it will be overridden', + self._setting_up_extension[-1], name, + type='app', subtype='add_role') roles.register_local_role(name, role) def add_generic_role(self, name, nodeclass): @@ -745,10 +745,10 @@ class Sphinx(object): # register_canonical_role self.debug('[app] adding generic role: %r', (name, nodeclass)) if name in roles._roles: - self.warn('while setting up extension %s: role %r is ' - 'already registered, it will be overridden' % - (self._setting_up_extension[-1], name), - type='app', subtype='add_generic_role') + logger.warning('while setting up extension %s: role %r is ' + 'already registered, it will be overridden', + self._setting_up_extension[-1], name, + type='app', subtype='add_generic_role') role = roles.GenericRole(name, nodeclass) roles.register_local_role(name, role) @@ -891,10 +891,10 @@ class Sphinx(object): # type: (unicode, Parser) -> None self.debug('[app] adding search source_parser: %r, %r', suffix, parser) if suffix in self._additional_source_parsers: - self.warn('while setting up extension %s: source_parser for %r is ' - 'already registered, it will be overridden' % - (self._setting_up_extension[-1], suffix), - type='app', subtype='add_source_parser') + logger.warning('while setting up extension %s: source_parser for %r is ' + 'already registered, it will be overridden', + self._setting_up_extension[-1], suffix, + type='app', subtype='add_source_parser') self._additional_source_parsers[suffix] = parser diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index 78ce7d89e..b8baf7792 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -19,7 +19,7 @@ except ImportError: from docutils import nodes -from sphinx.util import i18n, path_stabilize +from sphinx.util import i18n, path_stabilize, logging from sphinx.util.osutil import SEP, relative_uri from sphinx.util.i18n import find_catalog from sphinx.util.console import bold, darkgreen # type: ignore @@ -284,13 +284,9 @@ class Builder(object): self.info(bold('building [%s]' % self.name) + ': ' + summary) # while reading, collect all warnings from docutils - warnings = [] - self.env.set_warnfunc(lambda *args, **kwargs: warnings.append((args, kwargs))) - updated_docnames = set(self.env.update(self.config, self.srcdir, - self.doctreedir, self.app)) - self.env.set_warnfunc(self.warn) - for warning, kwargs in warnings: - self.warn(*warning, **kwargs) + with logging.pending_logging(): + updated_docnames = set(self.env.update(self.config, self.srcdir, + self.doctreedir, self.app)) doccount = len(updated_docnames) self.info(bold('looking for now-outdated files... '), nonl=1) @@ -376,25 +372,23 @@ class Builder(object): self.info('done') warnings = [] # type: List[Tuple[Tuple, Dict]] - self.env.set_warnfunc(lambda *args, **kwargs: warnings.append((args, kwargs))) if self.parallel_ok: # number of subprocesses is parallel-1 because the main process # is busy loading doctrees and doing write_doc_serialized() + warnings = [] self._write_parallel(sorted(docnames), warnings, nproc=self.app.parallel - 1) else: - self._write_serial(sorted(docnames), warnings) - self.env.set_warnfunc(self.warn) + self._write_serial(sorted(docnames)) - def _write_serial(self, docnames, warnings): - # type: (Sequence[unicode], List[Tuple[Tuple, Dict]]) -> None - for docname in self.app.status_iterator( - docnames, 'writing output... ', darkgreen, len(docnames)): - doctree = self.env.get_and_resolve_doctree(docname, self) - self.write_doc_serialized(docname, doctree) - self.write_doc(docname, doctree) - for warning, kwargs in warnings: - self.warn(*warning, **kwargs) + def _write_serial(self, docnames): + # type: (Sequence[unicode]) -> None + with logging.pending_logging(): + for docname in self.app.status_iterator( + docnames, 'writing output... ', darkgreen, len(docnames)): + doctree = self.env.get_and_resolve_doctree(docname, self) + self.write_doc_serialized(docname, doctree) + self.write_doc(docname, doctree) def _write_parallel(self, docnames, warnings, nproc): # type: (Iterable[unicode], List[Tuple[Tuple, Dict]], int) -> None diff --git a/sphinx/builders/changes.py b/sphinx/builders/changes.py index 1140f854a..de9e95bf1 100644 --- a/sphinx/builders/changes.py +++ b/sphinx/builders/changes.py @@ -38,8 +38,7 @@ class ChangesBuilder(Builder): def init(self): # type: () -> None self.create_template_bridge() - Theme.init_themes(self.confdir, self.config.html_theme_path, - warn=self.warn) + Theme.init_themes(self.confdir, self.config.html_theme_path) self.theme = Theme('default') self.templates.init(self, self.theme) diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index f4b043b06..31ee0a371 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -159,10 +159,9 @@ class StandaloneHTMLBuilder(Builder): def init_templates(self): # type: () -> None - Theme.init_themes(self.confdir, self.config.html_theme_path, - warn=self.warn) + Theme.init_themes(self.confdir, self.config.html_theme_path) themename, themeoptions = self.get_theme_config() - self.theme = Theme(themename, warn=self.warn) + self.theme = Theme(themename) self.theme_options = themeoptions.copy() self.create_template_bridge() self.templates.init(self, self.theme) @@ -314,8 +313,7 @@ class StandaloneHTMLBuilder(Builder): lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = format_date(lufmt or _('%b %d, %Y'), - language=self.config.language, - warn=self.warn) + language=self.config.language) else: self.last_updated = None diff --git a/sphinx/config.py b/sphinx/config.py index 5872ce8bb..7c163e1e3 100644 --- a/sphinx/config.py +++ b/sphinx/config.py @@ -16,6 +16,7 @@ from six import PY2, PY3, iteritems, string_types, binary_type, text_type, integ from sphinx.errors import ConfigError from sphinx.locale import l_ +from sphinx.util import logging from sphinx.util.i18n import format_date from sphinx.util.osutil import cd from sphinx.util.pycompat import execfile_, NoneType @@ -25,6 +26,8 @@ if False: from typing import Any, Callable, Tuple # NOQA from sphinx.util.tags import Tags # NOQA +logger = logging.getLogger(__name__) + nonascii_re = re.compile(br'[\x80-\xff]') copyright_year_re = re.compile(r'^((\d{4}-)?)(\d{4})(?=[ ,])') @@ -166,8 +169,8 @@ class Config(object): config[k] = copyright_year_re.sub('\g<1>%s' % format_date('%Y'), # type: ignore # NOQA config[k]) - def check_types(self, warn): - # type: (Callable) -> None + def check_types(self): + # type: () -> None # check all values for deviation from the default value's type, since # that can result in TypeErrors all over the place # NB. since config values might use l_() we have to wait with calling @@ -186,7 +189,7 @@ class Config(object): current = self[name] if isinstance(permitted, ENUM): if not permitted.match(current): - warn(CONFIG_ENUM_WARNING.format( + logger.warning(CONFIG_ENUM_WARNING.format( name=name, current=current, candidates=permitted.candidates)) else: if type(current) is type(default): @@ -201,22 +204,22 @@ class Config(object): continue # at least we share a non-trivial base class if permitted: - warn(CONFIG_PERMITTED_TYPE_WARNING.format( + logger.warning(CONFIG_PERMITTED_TYPE_WARNING.format( name=name, current=type(current), permitted=str([cls.__name__ for cls in permitted]))) else: - warn(CONFIG_TYPE_WARNING.format( + logger.warning(CONFIG_TYPE_WARNING.format( name=name, current=type(current), default=type(default))) - def check_unicode(self, warn): - # type: (Callable) -> None + def check_unicode(self): + # type: () -> None # check all string values for non-ASCII characters in bytestrings, # since that can result in UnicodeErrors all over the place for name, value in iteritems(self._raw_config): if isinstance(value, binary_type) and nonascii_re.search(value): # type: ignore - warn('the config value %r is set to a string with non-ASCII ' - 'characters; this can lead to Unicode errors occurring. ' - 'Please use Unicode strings, e.g. %r.' % (name, u'Content')) + logger.warning('the config value %r is set to a string with non-ASCII ' + 'characters; this can lead to Unicode errors occurring. ' + 'Please use Unicode strings, e.g. %r.' % (name, u'Content')) def convert_overrides(self, name, value): # type: (unicode, Any) -> Any @@ -244,8 +247,8 @@ class Config(object): else: return value - def pre_init_values(self, warn): - # type: (Callable) -> None + def pre_init_values(self): + # type: () -> None """Initialize some limited config variables before loading extensions""" variables = ['needs_sphinx', 'suppress_warnings', 'html_translator_class'] for name in variables: @@ -255,10 +258,10 @@ class Config(object): elif name in self._raw_config: self.__dict__[name] = self._raw_config[name] except ValueError as exc: - warn(exc) + logger.warning("%s" % exc) - def init_values(self, warn): - # type: (Callable) -> None + def init_values(self): + # type: () -> None config = self._raw_config for valname, value in iteritems(self.overrides): try: @@ -267,14 +270,14 @@ class Config(object): config.setdefault(realvalname, {})[key] = value # type: ignore continue elif valname not in self.values: - warn('unknown config value %r in override, ignoring' % valname) + logger.warning('unknown config value %r in override, ignoring' % valname) continue if isinstance(value, string_types): config[valname] = self.convert_overrides(valname, value) else: config[valname] = value except ValueError as exc: - warn(exc) + logger.warning("%s" % exc) for name in config: if name in self.values: self.__dict__[name] = config[name] diff --git a/sphinx/highlighting.py b/sphinx/highlighting.py index 94f562159..493ecb7a7 100644 --- a/sphinx/highlighting.py +++ b/sphinx/highlighting.py @@ -11,6 +11,7 @@ from six import text_type +from sphinx.util import logging from sphinx.util.pycompat import htmlescape from sphinx.util.texescape import tex_hl_escape_map_new from sphinx.ext import doctest @@ -26,6 +27,8 @@ from pygments.styles import get_style_by_name from pygments.util import ClassNotFound from sphinx.pygments_styles import SphinxStyle, NoneStyle +logger = logging.getLogger(__name__) + lexers = dict( none = TextLexer(stripnl=False), python = PythonLexer(stripnl=False), @@ -92,7 +95,7 @@ class PygmentsBridge(object): return '\\begin{Verbatim}[commandchars=\\\\\\{\\}]\n' + \ source + '\\end{Verbatim}\n' - def highlight_block(self, source, lang, opts=None, warn=None, force=False, **kwargs): + def highlight_block(self, source, lang, opts=None, location=None, force=False, **kwargs): if not isinstance(source, text_type): source = source.decode() @@ -120,11 +123,9 @@ class PygmentsBridge(object): try: lexer = lexers[lang] = get_lexer_by_name(lang, **(opts or {})) except ClassNotFound: - if warn: - warn('Pygments lexer name %r is not known' % lang) - lexer = lexers['none'] - else: - raise + logger.warning('Pygments lexer name %r is not known', lang, + location=location) + lexer = lexers['none'] else: lexer.add_filter('raiseonerror') @@ -137,17 +138,16 @@ class PygmentsBridge(object): formatter = self.get_formatter(**kwargs) try: hlsource = highlight(source, lexer, formatter) - except ErrorToken as exc: + except ErrorToken: # this is most probably not the selected language, # so let it pass unhighlighted if lang == 'default': pass # automatic highlighting failed. - elif warn: - warn('Could not lex literal_block as "%s". ' - 'Highlighting skipped.' % lang, - type='misc', subtype='highlighting_failure') else: - raise exc + logger.warning('Could not lex literal_block as "%s". ' + 'Highlighting skipped.', lang, + type='misc', subtype='highlighting_failure', + location=location) hlsource = highlight(source, lexers['none'], formatter) if self.dest == 'html': return hlsource diff --git a/sphinx/theming.py b/sphinx/theming.py index 4e05652cd..ec7867b3b 100644 --- a/sphinx/theming.py +++ b/sphinx/theming.py @@ -26,6 +26,9 @@ except ImportError: from sphinx import package_dir from sphinx.errors import ThemeError +from sphinx.util import logging + +logger = logging.getLogger(__name__) if False: # For type annotation @@ -43,8 +46,8 @@ class Theme(object): themepath = [] # type: List[unicode] @classmethod - def init_themes(cls, confdir, theme_path, warn=None): - # type: (unicode, unicode, Callable) -> None + def init_themes(cls, confdir, theme_path): + # type: (unicode, unicode) -> None """Search all theme paths for available themes.""" cls.themepath = list(theme_path) cls.themepath.append(path.join(package_dir, 'themes')) @@ -62,9 +65,8 @@ class Theme(object): tname = theme[:-4] tinfo = zfile except Exception: - if warn: - warn('file %r on theme path is not a valid ' - 'zipfile or contains no theme' % theme) + logger.warning('file %r on theme path is not a valid ' + 'zipfile or contains no theme', theme) continue else: if not path.isfile(path.join(themedir, theme, THEMECONF)): @@ -105,8 +107,8 @@ class Theme(object): cls.themes[name] = (path.join(themedir, name), None) return - def __init__(self, name, warn=None): - # type: (unicode, Callable) -> None + def __init__(self, name): + # type: (unicode) -> None if name not in self.themes: self.load_extra_theme(name) if name not in self.themes: @@ -162,7 +164,7 @@ class Theme(object): raise ThemeError('no theme named %r found, inherited by %r' % (inherit, name)) else: - self.base = Theme(inherit, warn=warn) + self.base = Theme(inherit) def get_confstr(self, section, name, default=NODEFAULT): # type: (unicode, unicode, Any) -> Any diff --git a/sphinx/transforms/__init__.py b/sphinx/transforms/__init__.py index efd2d546c..a72541666 100644 --- a/sphinx/transforms/__init__.py +++ b/sphinx/transforms/__init__.py @@ -34,7 +34,6 @@ class DefaultSubstitutions(Transform): def apply(self): # type: () -> None - env = self.document.settings.env config = self.document.settings.env.config # only handle those not otherwise defined in the document to_handle = default_substitutions - set(self.document.substitution_defs) @@ -45,7 +44,7 @@ class DefaultSubstitutions(Transform): if refname == 'today' and not text: # special handling: can also specify a strftime format text = format_date(config.today_fmt or _('%b %d, %Y'), - language=config.language, warn=env.warn) + language=config.language) ref.replace_self(nodes.Text(text, text)) diff --git a/sphinx/util/i18n.py b/sphinx/util/i18n.py index 1a5e51e28..42eb477a4 100644 --- a/sphinx/util/i18n.py +++ b/sphinx/util/i18n.py @@ -22,9 +22,12 @@ from babel.messages.pofile import read_po from babel.messages.mofile import write_mo from sphinx.errors import SphinxError +from sphinx.util import logging from sphinx.util.osutil import SEP, walk from sphinx.deprecation import RemovedInSphinx16Warning +logger = logging.getLogger(__name__) + if False: # For type annotation from typing import Callable # NOQA @@ -171,8 +174,8 @@ date_format_mappings = { } -def babel_format_date(date, format, locale, warn=None, formatter=babel.dates.format_date): - # type: (datetime, unicode, unicode, Callable, Callable) -> unicode +def babel_format_date(date, format, locale, formatter=babel.dates.format_date): + # type: (datetime, unicode, unicode, Callable) -> unicode if locale is None: locale = 'en' @@ -187,15 +190,13 @@ def babel_format_date(date, format, locale, warn=None, formatter=babel.dates.for # fallback to English return formatter(date, format, locale='en') except AttributeError: - if warn: - warn('Invalid date format. Quote the string by single quote ' - 'if you want to output it directly: %s' % format) - + logger.warning('Invalid date format. Quote the string by single quote ' + 'if you want to output it directly: %s', format) return format -def format_date(format, date=None, language=None, warn=None): - # type: (str, datetime, unicode, Callable) -> unicode +def format_date(format, date=None, language=None): + # type: (str, datetime, unicode) -> unicode if format is None: format = 'medium' @@ -213,7 +214,7 @@ def format_date(format, date=None, language=None, warn=None): warnings.warn('LDML format support will be dropped at Sphinx-1.6', RemovedInSphinx16Warning) - return babel_format_date(date, format, locale=language, warn=warn, + return babel_format_date(date, format, locale=language, formatter=babel.dates.format_datetime) else: # consider the format as ustrftime's and try to convert it to babel's diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py index ebffa2f28..b2d6587e5 100644 --- a/sphinx/writers/html.py +++ b/sphinx/writers/html.py @@ -364,11 +364,10 @@ class HTMLTranslator(BaseTranslator): else: opts = {} - def warner(msg, **kwargs): - self.builder.warn(msg, (self.builder.current_docname, node.line), **kwargs) highlighted = self.highlighter.highlight_block( - node.rawsource, lang, opts=opts, warn=warner, linenos=linenos, - **highlight_args) + node.rawsource, lang, opts=opts, linenos=linenos, + location=(self.builder.current_docname, node.line), **highlight_args + ) starttag = self.starttag(node, 'div', suffix='', CLASS='highlight-%s' % lang) self.body.append(starttag + highlighted + '</div>\n') diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 43c530851..ab5ce9307 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -2155,12 +2155,10 @@ class LaTeXTranslator(nodes.NodeVisitor): else: opts = {} - def warner(msg, **kwargs): - # type: (unicode) -> None - self.builder.warn(msg, (self.curfilestack[-1], node.line), **kwargs) - hlcode = self.highlighter.highlight_block(code, lang, opts=opts, - warn=warner, linenos=linenos, - **highlight_args) + hlcode = self.highlighter.highlight_block( + code, lang, opts=opts, linenos=linenos, + location=(self.curfilestack[-1], node.line), **highlight_args + ) # workaround for Unicode issue hlcode = hlcode.replace(u'€', u'@texteuro[]') if self.in_footnote: diff --git a/tests/test_build_html.py b/tests/test_build_html.py index d8aff88ab..ba28cbc30 100644 --- a/tests/test_build_html.py +++ b/tests/test_build_html.py @@ -40,10 +40,10 @@ with "\\?": b?'here: >>>(\\\\|/)xbb<<<' """ HTML_WARNINGS = ENV_WARNINGS + """\ -%(root)s/index.rst:\\d+: WARNING: no matching candidate for image URI u'foo.\\*' -%(root)s/index.rst:\\d+: WARNING: Could not lex literal_block as "c". Highlighting skipped. %(root)s/index.rst:\\d+: WARNING: unknown option: &option %(root)s/index.rst:\\d+: WARNING: citation not found: missing +%(root)s/index.rst:\\d+: WARNING: no matching candidate for image URI u'foo.\\*' +%(root)s/index.rst:\\d+: WARNING: Could not lex literal_block as "c". Highlighting skipped. """ if PY3: diff --git a/tests/test_config.py b/tests/test_config.py index 1b3c94957..a076636d6 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -87,7 +87,8 @@ def test_extension_values(app, status, warning): @with_tempdir -def test_errors_warnings(dir): +@mock.patch("sphinx.config.logger") +def test_errors_warnings(dir, logger): # test the error for syntax errors in the config file (dir / 'conf.py').write_text(u'project = \n', encoding='ascii') raises_msg(ConfigError, 'conf.py', Config, dir, 'conf.py', {}, None) @@ -97,8 +98,9 @@ def test_errors_warnings(dir): u'# -*- coding: utf-8\n\nproject = u"Jägermeister"\n', encoding='utf-8') cfg = Config(dir, 'conf.py', {}, None) - cfg.init_values(lambda warning: 1/0) + cfg.init_values() assert cfg.project == u'Jägermeister' + assert logger.called is False # test the warning for bytestrings with non-ascii content # bytestrings with non-ascii content are a syntax error in python3 so we @@ -108,13 +110,10 @@ def test_errors_warnings(dir): (dir / 'conf.py').write_text( u'# -*- coding: latin-1\nproject = "fooä"\n', encoding='latin-1') cfg = Config(dir, 'conf.py', {}, None) - warned = [False] - def warn(msg): - warned[0] = True - - cfg.check_unicode(warn) - assert warned[0] + assert logger.warning.called is False + cfg.check_unicode() + assert logger.warning.called is True @with_tempdir @@ -152,14 +151,16 @@ def test_needs_sphinx(): @with_tempdir -def test_config_eol(tmpdir): +@mock.patch("sphinx.config.logger") +def test_config_eol(tmpdir, logger): # test config file's eol patterns: LF, CRLF configfile = tmpdir / 'conf.py' for eol in (b'\n', b'\r\n'): configfile.write_bytes(b'project = "spam"' + eol) cfg = Config(tmpdir, 'conf.py', {}, None) - cfg.init_values(lambda warning: 1/0) + cfg.init_values() assert cfg.project == u'spam' + assert logger.called is False @with_app(confoverrides={'master_doc': 123, diff --git a/tests/test_highlighting.py b/tests/test_highlighting.py index 328abdf31..46cf95d1a 100644 --- a/tests/test_highlighting.py +++ b/tests/test_highlighting.py @@ -9,9 +9,9 @@ :license: BSD, see LICENSE for details. """ +import mock from pygments.lexer import RegexLexer from pygments.token import Text, Name -from pygments.filters import ErrorToken from pygments.formatters.html import HtmlFormatter from sphinx.highlighting import PygmentsBridge @@ -89,7 +89,8 @@ def test_trim_doctest_flags(): PygmentsBridge.html_formatter = HtmlFormatter -def test_default_highlight(): +@mock.patch('sphinx.highlighting.logger') +def test_default_highlight(logger): bridge = PygmentsBridge('html') # default: highlights as python3 @@ -107,8 +108,7 @@ def test_default_highlight(): '<span class="s2">"Hello sphinx world"</span>\n</pre></div>\n') # python3: raises error if highlighting failed - try: - ret = bridge.highlight_block('reST ``like`` text', 'python3') - assert False, "highlight_block() does not raise any exceptions" - except ErrorToken: - pass # raise parsing error + ret = bridge.highlight_block('reST ``like`` text', 'python3') + logger.warning.assert_called_with('Could not lex literal_block as "%s". ' + 'Highlighting skipped.', 'python3', + type='misc', subtype='highlighting_failure', location=None) From fb227f24cba672b44b4b71d11ccef0ac891e2dd3 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Mon, 5 Sep 2016 18:27:32 +0900 Subject: [PATCH 055/190] docutils bridge --- sphinx/io.py | 9 +++++++++ sphinx/util/docutils.py | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/sphinx/io.py b/sphinx/io.py index c6fea570e..e29420d97 100644 --- a/sphinx/io.py +++ b/sphinx/io.py @@ -24,6 +24,7 @@ from sphinx.transforms.i18n import ( PreserveTranslatableMessages, Locale, RemoveTranslatableInline, ) from sphinx.util import import_object, split_docinfo +from sphinx.util.docutils import LoggingReporter if False: # For type annotation @@ -73,6 +74,14 @@ class SphinxBaseReader(standalone.Reader): # type: () -> List[Transform] return standalone.Reader.get_transforms(self) + self.transforms + def new_document(self): + document = standalone.Reader.new_document(self) + reporter = document.reporter + document.reporter = LoggingReporter(reporter.source, reporter.report_level, + reporter.halt_level, reporter.debug_flag, + reporter.error_handler) + return document + class SphinxStandaloneReader(SphinxBaseReader): """ diff --git a/sphinx/util/docutils.py b/sphinx/util/docutils.py index 0704f553e..084579815 100644 --- a/sphinx/util/docutils.py +++ b/sphinx/util/docutils.py @@ -10,12 +10,20 @@ """ from __future__ import absolute_import +import re from copy import copy from contextlib import contextmanager import docutils +from docutils.utils import Reporter from docutils.parsers.rst import directives, roles +from sphinx.util import logging + +logger = logging.getLogger(__name__) +report_re = re.compile('^(.+?:\d+): \((DEBUG|INFO|WARNING|ERROR|SEVERE)/(\d+)?\) (.+?)\n?$') + + if False: # For type annotation from typing import Any, Callable, Iterator, Tuple # NOQA @@ -113,3 +121,36 @@ class sphinx_domains(object): return self.lookup_domain_element('role', name) except ElementLookupError: return self.role_func(name, lang_module, lineno, reporter) + + +class WarningStream(object): + level_mapping = { + 'DEBUG': logger.debug, + 'INFO': logger.info, + 'WARNING': logger.warning, + 'ERROR': logger.error, + 'SEVERE': logger.critical, + } + + def write(self, text): + matched = report_re.search(text) + if not matched: + logger.warning(text.rstrip("\r\n")) + else: + location, type, level, message = matched.groups() + if type in self.level_mapping: + logger_method = self.level_mapping.get(type) + logger_method(message, location=location) + else: + logger.warning(text.rstrip("\r\n")) + + +class LoggingReporter(Reporter): + def __init__(self, source, report_level, halt_level, + debug=False, error_handler='backslashreplace'): + stream = WarningStream() + Reporter.__init__(self, source, report_level, halt_level, + stream, debug, error_handler=error_handler) + + def set_conditions(self, category, report_level, halt_level, debug=False): + Reporter.set_conditions(self, category, report_level, halt_level, debug=debug) From 2507b4dd099897933384fedf9c59df67c7bff274 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 21 Dec 2016 01:09:38 +0900 Subject: [PATCH 056/190] Drop SphinxLoggerAdapter.warn() --- sphinx/util/logging.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index de06bfcf4..9c629ff5d 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -44,17 +44,6 @@ class SphinxLogRecord(logging.LogRecord): class SphinxLoggerAdapter(logging.LoggerAdapter): """LoggerAdapter allowing ``type`` and ``subtype`` keywords.""" - def warn(self, message, location=None, **kwargs): - """Emit a warning. - - :param message: a message of warning - :param location: a tuple of (docname, lineno) or a string describing the location - """ - if location: - kwargs['location'] = location - - self.warning(message, **kwargs) - def warn_node(self, message, node, **kwargs): """Emit a warning for specific node. From 3698fe2b0f593c39fcf324a92905358cd03201de Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 21 Dec 2016 15:01:50 +0900 Subject: [PATCH 057/190] Add testcase for sphinx.util.logging --- sphinx/util/logging.py | 13 ++++- tests/test_util_logging.py | 112 +++++++++++++++++++++++++++++++++++++ 2 files changed, 123 insertions(+), 2 deletions(-) diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index 9c629ff5d..29157e2f3 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -50,7 +50,14 @@ class SphinxLoggerAdapter(logging.LoggerAdapter): :param message: a message of warning :param node: a node related with the warning """ - kwargs['location'] = "%s:%s" % get_source_line(node) + (source, line) = get_source_line(node) + if source and line: + kwargs['location'] = "%s:%s" % (source, line) + elif source: + kwargs['location'] = "%s:" % source + elif line: + kwargs['location'] = "<unknown>:%s" % line + self.warning(message, **kwargs) def process(self, msg, kwargs): @@ -178,9 +185,11 @@ class LogRecordTranslator(logging.Filter): if docname and lineno: record.location = '%s:%s' % (self.app.env.doc2path(docname), lineno) elif docname: - record.location = '%s' % (self.app.env.doc2path(docname)) + record.location = '%s' % self.app.env.doc2path(docname) else: record.location = None + elif location and ':' not in location: + record.location = '%s' % self.app.env.doc2path(location) return True diff --git a/tests/test_util_logging.py b/tests/test_util_logging.py index d88d3cc6d..bcc316c10 100644 --- a/tests/test_util_logging.py +++ b/tests/test_util_logging.py @@ -10,8 +10,14 @@ """ from __future__ import print_function +from docutils import nodes + +from sphinx.errors import SphinxWarning +from sphinx.util import logging from sphinx.util.logging import is_suppressed_warning +from util import with_app, raises, strip_escseq + def test_is_suppressed_warning(): suppress_warnings = ["ref", "files.*", "rest.duplicated_labels"] @@ -24,3 +30,109 @@ def test_is_suppressed_warning(): assert is_suppressed_warning("files", "stylesheet", suppress_warnings) is True assert is_suppressed_warning("rest", "syntax", suppress_warnings) is False assert is_suppressed_warning("rest", "duplicated_labels", suppress_warnings) is True + + +@with_app() +def test_suppress_warnings(app, status, warning): + logging.setup(app, status, warning) + logger = logging.getLogger(__name__) + + app.config.suppress_warnings = [] + warning.truncate(0) + logger.warning('message1', type='test', subtype='logging') + logger.warning('message2', type='test', subtype='crash') + logger.warning('message3', type='actual', subtype='logging') + assert 'message1' in warning.getvalue() + assert 'message2' in warning.getvalue() + assert 'message3' in warning.getvalue() + assert app._warncount == 3 + + app.config.suppress_warnings = ['test'] + warning.truncate(0) + logger.warning('message1', type='test', subtype='logging') + logger.warning('message2', type='test', subtype='crash') + logger.warning('message3', type='actual', subtype='logging') + assert 'message1' not in warning.getvalue() + assert 'message2' not in warning.getvalue() + assert 'message3' in warning.getvalue() + assert app._warncount == 4 + + app.config.suppress_warnings = ['test.logging'] + warning.truncate(0) + logger.warning('message1', type='test', subtype='logging') + logger.warning('message2', type='test', subtype='crash') + logger.warning('message3', type='actual', subtype='logging') + assert 'message1' not in warning.getvalue() + assert 'message2' in warning.getvalue() + assert 'message3' in warning.getvalue() + assert app._warncount == 6 + + +@with_app() +def test_warningiserror(app, status, warning): + logging.setup(app, status, warning) + logger = logging.getLogger(__name__) + + # if False, warning is not error + app.warningiserror = False + logger.warning('message') + + # if True, warning raises SphinxWarning exception + app.warningiserror = True + raises(SphinxWarning, logger.warning, 'message') + + +@with_app() +def test_warning_location(app, status, warning): + logging.setup(app, status, warning) + logger = logging.getLogger(__name__) + + logger.warning('message1', location='index') + assert 'index.txt: WARNING: message1' in warning.getvalue() + + logger.warning('message2', location=('index', 10)) + assert 'index.txt:10: WARNING: message2' in warning.getvalue() + + logger.warning('message3', location=None) + assert '\x1b[31mWARNING: message3' in warning.getvalue() # \x1b[31m = darkred + + +@with_app() +def test_warn_node(app, status, warning): + logging.setup(app, status, warning) + logger = logging.getLogger(__name__) + + node = nodes.Node() + node.source, node.line = ('index.txt', 10) + logger.warn_node('message1', node) + assert 'index.txt:10: WARNING: message1' in warning.getvalue() + + node.source, node.line = ('index.txt', None) + logger.warn_node('message2', node) + assert 'index.txt:: WARNING: message2' in warning.getvalue() + + node.source, node.line = (None, 10) + logger.warn_node('message3', node) + assert '<unknown>:10: WARNING: message3' in warning.getvalue() + + node.source, node.line = (None, None) + logger.warn_node('message4', node) + assert '\x1b[31mWARNING: message4' in warning.getvalue() # \x1b[31m = darkred + + +@with_app() +def test_pending_logging(app, status, warning): + logging.setup(app, status, warning) + logger = logging.getLogger(__name__) + + logger.warning('message1') + with logging.pending_logging(): + # not logged yet (bufferred) in here + logger.warning('message2') + logger.warning('message3') + assert 'WARNING: message1' in warning.getvalue() + assert 'WARNING: message2' not in warning.getvalue() + assert 'WARNING: message3' not in warning.getvalue() + + # actually logged as ordered + assert 'WARNING: message2\nWARNING: message3' in strip_escseq(warning.getvalue()) From 284ace16bf4ae5ffc6ca501f14da486450761a47 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 21 Dec 2016 18:36:02 +0900 Subject: [PATCH 058/190] Now sphinx.util.logging supports info and other logs --- sphinx/util/logging.py | 75 ++++++++++++++++++++++++++++++++++---- tests/test_util_logging.py | 38 +++++++++++++++++++ 2 files changed, 105 insertions(+), 8 deletions(-) diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index 29157e2f3..5dde151e7 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -12,8 +12,9 @@ from __future__ import absolute_import import logging import logging.handlers -from six import string_types from contextlib import contextmanager + +from six import PY2, StringIO, string_types from docutils.utils import get_source_line from sphinx.errors import SphinxWarning @@ -25,10 +26,10 @@ def getLogger(name): return SphinxLoggerAdapter(logging.getLogger(name), {}) -class SphinxLogRecord(logging.LogRecord): +class SphinxWarningLogRecord(logging.LogRecord): """Log record class supporting location""" def getMessage(self): - message = super(SphinxLogRecord, self).getMessage() + message = super(SphinxWarningLogRecord, self).getMessage() if isinstance(message, string_types): location = getattr(self, 'location', None) if location: @@ -68,10 +69,53 @@ class SphinxLoggerAdapter(logging.LoggerAdapter): extra['subtype'] = kwargs.pop('subtype') if 'location' in kwargs: extra['location'] = kwargs.pop('location') + if 'nonl' in kwargs: + extra['nonl'] = kwargs.pop('nonl') return msg, kwargs +class NewLineStreamHandlerPY2(logging.StreamHandler): + """StreamHandler which switches line terminator by record.nonl flag.""" + + def emit(self, record): + try: + self.acquire() + stream = self.stream + if getattr(record, 'nonl', False): + # remove return code forcely when nonl=True + self.stream = StringIO() + super(NewLineStreamHandlerPY2, self).emit(record) + stream.write(self.stream.getvalue()[:-1]) + stream.flush() + else: + super(NewLineStreamHandlerPY2, self).emit(record) + finally: + self.stream = stream + self.release() + + +class NewLineStreamHandlerPY3(logging.StreamHandler): + """StreamHandler which switches line terminator by record.nonl flag.""" + + def emit(self, record): + try: + self.acquire() + if getattr(record, 'nonl', False): + # skip appending terminator when nonl=True + self.terminator = '' + super(NewLineStreamHandlerPY3, self).emit(record) + finally: + self.terminator = '\n' + self.release() + + +if PY2: + NewLineStreamHandler = NewLineStreamHandlerPY2 +else: + NewLineStreamHandler = NewLineStreamHandlerPY3 + + class MemoryHandler(logging.handlers.BufferingHandler): """Handler buffering all logs.""" @@ -114,6 +158,16 @@ def pending_logging(): memhandler.flushTo(logger) +class InfoFilter(logging.Filter): + """Filter error and warning messages.""" + + def filter(self, record): + if record.levelno < logging.WARNING: + return True + else: + return False + + def is_suppressed_warning(type, subtype, suppress_warnings): """Check the warning is suppressed or not.""" if type is None: @@ -165,19 +219,19 @@ class WarningIsErrorFilter(logging.Filter): return True -class LogRecordTranslator(logging.Filter): +class WarningLogRecordTranslator(logging.Filter): """Converts a log record to one Sphinx expects - * Make a instance of SphinxLogRecord + * Make a instance of SphinxWarningLogRecord * docname to path if location given """ def __init__(self, app): self.app = app - super(LogRecordTranslator, self).__init__() + super(WarningLogRecordTranslator, self).__init__() def filter(self, record): if isinstance(record, logging.LogRecord): - record.__class__ = SphinxLogRecord # force subclassing to handle location + record.__class__ = SphinxWarningLogRecord # force subclassing to handle location location = getattr(record, 'location', None) if isinstance(location, tuple): @@ -197,14 +251,19 @@ class LogRecordTranslator(logging.Filter): def setup(app, status, warning): """Setup root logger for Sphinx""" logger = logging.getLogger() + logger.setLevel(logging.NOTSET) # clear all handlers for handler in logger.handlers[:]: logger.removeHandler(handler) + info_handler = NewLineStreamHandler(status) + info_handler.addFilter(InfoFilter()) + warning_handler = logging.StreamHandler(warning) warning_handler.addFilter(WarningSuppressor(app)) warning_handler.addFilter(WarningIsErrorFilter(app)) - warning_handler.addFilter(LogRecordTranslator(app)) + warning_handler.addFilter(WarningLogRecordTranslator(app)) warning_handler.setLevel(logging.WARNING) + logger.addHandler(info_handler) logger.addHandler(warning_handler) diff --git a/tests/test_util_logging.py b/tests/test_util_logging.py index bcc316c10..b7ce1ef10 100644 --- a/tests/test_util_logging.py +++ b/tests/test_util_logging.py @@ -19,6 +19,42 @@ from sphinx.util.logging import is_suppressed_warning from util import with_app, raises, strip_escseq +@with_app() +def test_info_and_warning(app, status, warning): + logging.setup(app, status, warning) + logger = logging.getLogger(__name__) + + logger.debug('message1') + logger.info('message2') + logger.warning('message3') + logger.critical('message4') + logger.error('message5') + + assert 'message1' in status.getvalue() + assert 'message2' in status.getvalue() + assert 'message3' not in status.getvalue() + assert 'message4' not in status.getvalue() + assert 'message5' not in status.getvalue() + + assert 'message1' not in warning.getvalue() + assert 'message2' not in warning.getvalue() + assert 'message3' in warning.getvalue() + assert 'message4' in warning.getvalue() + assert 'message5' in warning.getvalue() + + +@with_app() +def test_nonl_info_log(app, status, warning): + logging.setup(app, status, warning) + logger = logging.getLogger(__name__) + + logger.info('message1', nonl=True) + logger.info('message2') + logger.info('message3') + + assert 'message1message2\nmessage3' in status.getvalue() + + def test_is_suppressed_warning(): suppress_warnings = ["ref", "files.*", "rest.duplicated_labels"] @@ -37,6 +73,8 @@ def test_suppress_warnings(app, status, warning): logging.setup(app, status, warning) logger = logging.getLogger(__name__) + app._warncount = 0 # force reset + app.config.suppress_warnings = [] warning.truncate(0) logger.warning('message1', type='test', subtype='logging') From f23a4c6c92901e2505d581b30fc8cf04ca4d95ce Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 21 Dec 2016 23:52:22 +0900 Subject: [PATCH 059/190] Add type annotation to sphinx.util.logging --- sphinx/util/logging.py | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index 5dde151e7..73d9d093b 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -20,15 +20,25 @@ from docutils.utils import get_source_line from sphinx.errors import SphinxWarning from sphinx.util.console import darkred # type: ignore +if False: + # For type annotation + from typing import Any, Generator, IO, Tuple # NOQA + from docutils import nodes # NOQA + from sphinx.application import Sphinx # NOQA + def getLogger(name): + # type: (str) -> SphinxLoggerAdapter """Get logger wrapped by SphinxLoggerAdapter.""" return SphinxLoggerAdapter(logging.getLogger(name), {}) class SphinxWarningLogRecord(logging.LogRecord): """Log record class supporting location""" + location = None # type: Any + def getMessage(self): + # type: () -> str message = super(SphinxWarningLogRecord, self).getMessage() if isinstance(message, string_types): location = getattr(self, 'location', None) @@ -46,6 +56,7 @@ class SphinxLoggerAdapter(logging.LoggerAdapter): """LoggerAdapter allowing ``type`` and ``subtype`` keywords.""" def warn_node(self, message, node, **kwargs): + # type: (unicode, nodes.Node, Any) -> None """Emit a warning for specific node. :param message: a message of warning @@ -61,7 +72,8 @@ class SphinxLoggerAdapter(logging.LoggerAdapter): self.warning(message, **kwargs) - def process(self, msg, kwargs): + def process(self, msg, kwargs): # type: ignore + # type: (unicode, Dict) -> Tuple[unicode, Dict] extra = kwargs.setdefault('extra', {}) if 'type' in kwargs: extra['type'] = kwargs.pop('type') @@ -79,9 +91,10 @@ class NewLineStreamHandlerPY2(logging.StreamHandler): """StreamHandler which switches line terminator by record.nonl flag.""" def emit(self, record): + # type: (logging.LogRecord) -> None try: self.acquire() - stream = self.stream + stream = self.stream # type: ignore if getattr(record, 'nonl', False): # remove return code forcely when nonl=True self.stream = StringIO() @@ -99,6 +112,7 @@ class NewLineStreamHandlerPY3(logging.StreamHandler): """StreamHandler which switches line terminator by record.nonl flag.""" def emit(self, record): + # type: (logging.LogRecord) -> None try: self.acquire() if getattr(record, 'nonl', False): @@ -123,20 +137,23 @@ class MemoryHandler(logging.handlers.BufferingHandler): super(MemoryHandler, self).__init__(-1) def shouldFlush(self, record): + # type: (logging.LogRecord) -> bool return False # never flush def flushTo(self, logger): + # type: (logging.Logger) -> None self.acquire() try: for record in self.buffer: logger.handle(record) - self.buffer = [] # type: ignore + self.buffer = [] # type: List[logging.LogRecord] finally: self.release() @contextmanager def pending_logging(): + # type: () -> Generator """contextmanager to pend logging temporary.""" logger = logging.getLogger() memhandler = MemoryHandler() @@ -162,6 +179,7 @@ class InfoFilter(logging.Filter): """Filter error and warning messages.""" def filter(self, record): + # type: (logging.LogRecord) -> bool if record.levelno < logging.WARNING: return True else: @@ -169,6 +187,7 @@ class InfoFilter(logging.Filter): def is_suppressed_warning(type, subtype, suppress_warnings): + # type: (unicode, unicode, List[unicode]) -> bool """Check the warning is suppressed or not.""" if type is None: return False @@ -191,10 +210,12 @@ class WarningSuppressor(logging.Filter): """Filter logs by `suppress_warnings`.""" def __init__(self, app): + # type: (Sphinx) -> None self.app = app super(WarningSuppressor, self).__init__() def filter(self, record): + # type: (logging.LogRecord) -> bool type = getattr(record, 'type', None) subtype = getattr(record, 'subtype', None) @@ -209,10 +230,12 @@ class WarningIsErrorFilter(logging.Filter): """Raise exception if warning emitted.""" def __init__(self, app): + # type: (Sphinx) -> None self.app = app super(WarningIsErrorFilter, self).__init__() def filter(self, record): + # type: (logging.LogRecord) -> bool if self.app.warningiserror: raise SphinxWarning(record.msg % record.args) else: @@ -226,10 +249,12 @@ class WarningLogRecordTranslator(logging.Filter): * docname to path if location given """ def __init__(self, app): + # type: (Sphinx) -> None self.app = app super(WarningLogRecordTranslator, self).__init__() - def filter(self, record): + def filter(self, record): # type: ignore + # type: (SphinxWarningLogRecord) -> bool if isinstance(record, logging.LogRecord): record.__class__ = SphinxWarningLogRecord # force subclassing to handle location @@ -249,6 +274,7 @@ class WarningLogRecordTranslator(logging.Filter): def setup(app, status, warning): + # type: (Sphinx, IO, IO) -> None """Setup root logger for Sphinx""" logger = logging.getLogger() logger.setLevel(logging.NOTSET) From 7358512f118b0a3c4e65c957ca08aa4fd17bd875 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 22 Dec 2016 01:20:17 +0900 Subject: [PATCH 060/190] logging.info() supports verbosity filter by app.verbosity --- sphinx/util/logging.py | 21 +++++++++++++ tests/test_util_logging.py | 64 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index 73d9d093b..9ebd661a4 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -13,6 +13,7 @@ from __future__ import absolute_import import logging import logging.handlers from contextlib import contextmanager +from collections import defaultdict from six import PY2, StringIO, string_types from docutils.utils import get_source_line @@ -27,6 +28,17 @@ if False: from sphinx.application import Sphinx # NOQA +VERBOSE = 15 +DEBUG2 = 5 +VERBOSITY_MAP = defaultdict(lambda: 0) # type: Dict[int, int] +VERBOSITY_MAP.update({ + 0: logging.INFO, + 1: VERBOSE, + 2: logging.DEBUG, + 3: DEBUG2, +}) + + def getLogger(name): # type: (str) -> SphinxLoggerAdapter """Get logger wrapped by SphinxLoggerAdapter.""" @@ -72,6 +84,14 @@ class SphinxLoggerAdapter(logging.LoggerAdapter): self.warning(message, **kwargs) + def verbose(self, msg, *args, **kwargs): + # type: (unicode, Any, Any) -> None + self.log(VERBOSE, msg, *args, **kwargs) + + def debug2(self, msg, *args, **kwargs): + # type: (unicode, Any, Any) -> None + self.log(DEBUG2, msg, *args, **kwargs) + def process(self, msg, kwargs): # type: ignore # type: (unicode, Dict) -> Tuple[unicode, Dict] extra = kwargs.setdefault('extra', {}) @@ -285,6 +305,7 @@ def setup(app, status, warning): info_handler = NewLineStreamHandler(status) info_handler.addFilter(InfoFilter()) + info_handler.setLevel(VERBOSITY_MAP.get(app.verbosity)) warning_handler = logging.StreamHandler(warning) warning_handler.addFilter(WarningSuppressor(app)) diff --git a/tests/test_util_logging.py b/tests/test_util_logging.py index b7ce1ef10..6a4d0f315 100644 --- a/tests/test_util_logging.py +++ b/tests/test_util_logging.py @@ -21,6 +21,7 @@ from util import with_app, raises, strip_escseq @with_app() def test_info_and_warning(app, status, warning): + app.verbosity = 3 logging.setup(app, status, warning) logger = logging.getLogger(__name__) @@ -43,6 +44,69 @@ def test_info_and_warning(app, status, warning): assert 'message5' in warning.getvalue() +@with_app() +def test_verbosity_filter(app, status, warning): + # verbosity = 0: INFO + app.verbosity = 0 + logging.setup(app, status, warning) + logger = logging.getLogger(__name__) + + logger.info('message1') + logger.verbose('message2') + logger.debug('message3') + logger.debug2('message4') + + assert 'message1' in status.getvalue() + assert 'message2' not in status.getvalue() + assert 'message3' not in status.getvalue() + assert 'message4' not in status.getvalue() + + # verbosity = 1: VERBOSE + app.verbosity = 1 + logging.setup(app, status, warning) + logger = logging.getLogger(__name__) + + logger.info('message1') + logger.verbose('message2') + logger.debug('message3') + logger.debug2('message4') + + assert 'message1' in status.getvalue() + assert 'message2' in status.getvalue() + assert 'message3' not in status.getvalue() + assert 'message4' not in status.getvalue() + + # verbosity = 2: DEBUG + app.verbosity = 2 + logging.setup(app, status, warning) + logger = logging.getLogger(__name__) + + logger.info('message1') + logger.verbose('message2') + logger.debug('message3') + logger.debug2('message4') + + assert 'message1' in status.getvalue() + assert 'message2' in status.getvalue() + assert 'message3' in status.getvalue() + assert 'message4' not in status.getvalue() + + # verbosity = 3: DEBUG2 + app.verbosity = 3 + logging.setup(app, status, warning) + logger = logging.getLogger(__name__) + + logger.info('message1') + logger.verbose('message2') + logger.debug('message3') + logger.debug2('message4') + + assert 'message1' in status.getvalue() + assert 'message2' in status.getvalue() + assert 'message3' in status.getvalue() + assert 'message4' in status.getvalue() + + @with_app() def test_nonl_info_log(app, status, warning): logging.setup(app, status, warning) From 70d6a560f2e23acbfaa85d1eda7471cfca15f77d Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 22 Dec 2016 02:19:57 +0900 Subject: [PATCH 061/190] sphinx.util.logging supports colorized log --- sphinx/util/logging.py | 44 ++++++++++++++++++++++++++++---------- tests/test_util_logging.py | 31 +++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 11 deletions(-) diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index 9ebd661a4..10043ccd4 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -15,11 +15,11 @@ import logging.handlers from contextlib import contextmanager from collections import defaultdict -from six import PY2, StringIO, string_types +from six import PY2, StringIO from docutils.utils import get_source_line from sphinx.errors import SphinxWarning -from sphinx.util.console import darkred # type: ignore +from sphinx.util.console import colorize if False: # For type annotation @@ -30,6 +30,7 @@ if False: VERBOSE = 15 DEBUG2 = 5 + VERBOSITY_MAP = defaultdict(lambda: 0) # type: Dict[int, int] VERBOSITY_MAP.update({ 0: logging.INFO, @@ -38,6 +39,13 @@ VERBOSITY_MAP.update({ 3: DEBUG2, }) +COLOR_MAP = defaultdict(lambda text: text) # type: Dict[int, unicode] +COLOR_MAP.update({ + logging.WARNING: 'darkred', + logging.DEBUG: 'darkgray', + DEBUG2: 'lightgray', +}) + def getLogger(name): # type: (str) -> SphinxLoggerAdapter @@ -52,16 +60,13 @@ class SphinxWarningLogRecord(logging.LogRecord): def getMessage(self): # type: () -> str message = super(SphinxWarningLogRecord, self).getMessage() - if isinstance(message, string_types): - location = getattr(self, 'location', None) - if location: - message = '%s: WARNING: %s' % (location, message) - elif 'WARNING:' not in message: - message = 'WARNING: %s' % message + location = getattr(self, 'location', None) + if location: + message = '%s: WARNING: %s' % (location, message) + elif 'WARNING:' not in message: + message = 'WARNING: %s' % message - return darkred(message) - else: - return message + return message class SphinxLoggerAdapter(logging.LoggerAdapter): @@ -103,6 +108,8 @@ class SphinxLoggerAdapter(logging.LoggerAdapter): extra['location'] = kwargs.pop('location') if 'nonl' in kwargs: extra['nonl'] = kwargs.pop('nonl') + if 'color' in kwargs: + extra['color'] = kwargs.pop('color') return msg, kwargs @@ -293,6 +300,19 @@ class WarningLogRecordTranslator(logging.Filter): return True +class ColorizeFormatter(logging.Formatter): + def format(self, record): + message = super(ColorizeFormatter, self).format(record) + color = getattr(record, 'color', None) + if color is None: + color = COLOR_MAP.get(record.levelno) + + if color: + return colorize(color, message) + else: + return message + + def setup(app, status, warning): # type: (Sphinx, IO, IO) -> None """Setup root logger for Sphinx""" @@ -306,11 +326,13 @@ def setup(app, status, warning): info_handler = NewLineStreamHandler(status) info_handler.addFilter(InfoFilter()) info_handler.setLevel(VERBOSITY_MAP.get(app.verbosity)) + info_handler.setFormatter(ColorizeFormatter()) warning_handler = logging.StreamHandler(warning) warning_handler.addFilter(WarningSuppressor(app)) warning_handler.addFilter(WarningIsErrorFilter(app)) warning_handler.addFilter(WarningLogRecordTranslator(app)) warning_handler.setLevel(logging.WARNING) + warning_handler.setFormatter(ColorizeFormatter()) logger.addHandler(info_handler) logger.addHandler(warning_handler) diff --git a/tests/test_util_logging.py b/tests/test_util_logging.py index 6a4d0f315..7a7ce3b21 100644 --- a/tests/test_util_logging.py +++ b/tests/test_util_logging.py @@ -14,6 +14,7 @@ from docutils import nodes from sphinx.errors import SphinxWarning from sphinx.util import logging +from sphinx.util.console import colorize from sphinx.util.logging import is_suppressed_warning from util import with_app, raises, strip_escseq @@ -238,3 +239,33 @@ def test_pending_logging(app, status, warning): # actually logged as ordered assert 'WARNING: message2\nWARNING: message3' in strip_escseq(warning.getvalue()) + + +@with_app() +def test_colored_logs(app, status, warning): + app.verbosity = 3 + logging.setup(app, status, warning) + logger = logging.getLogger(__name__) + + # default colors + logger.debug2('message1') + logger.debug('message2') + logger.verbose('message3') + logger.info('message4') + logger.warning('message5') + logger.critical('message6') + logger.error('message7') + + assert colorize('lightgray', 'message1') in status.getvalue() + assert colorize('darkgray', 'message2') in status.getvalue() + assert 'message3\n' in status.getvalue() # not colored + assert 'message4\n' in status.getvalue() # not colored + assert colorize('darkred', 'WARNING: message5') in warning.getvalue() + assert 'WARNING: message6\n' in warning.getvalue() # not colored + assert 'WARNING: message7\n' in warning.getvalue() # not colored + + # color specification + logger.debug('message8', color='white') + logger.info('message9', color='red') + assert colorize('white', 'message8') in status.getvalue() + assert colorize('red', 'message9') in status.getvalue() From b43523fcbecda58ffd1ed31d29d0c9363a42be86 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 22 Dec 2016 13:41:56 +0900 Subject: [PATCH 062/190] Use sphinx.util.logging instead app.info(), verbose(), debug() and debug2() --- sphinx/application.py | 183 +++++++++++------------------ sphinx/builders/__init__.py | 31 ++--- sphinx/builders/applehelp.py | 32 ++--- sphinx/builders/changes.py | 10 +- sphinx/builders/devhelp.py | 6 +- sphinx/builders/epub.py | 14 ++- sphinx/builders/epub3.py | 6 +- sphinx/builders/gettext.py | 9 +- sphinx/builders/html.py | 58 ++++----- sphinx/builders/htmlhelp.py | 14 ++- sphinx/builders/latex.py | 33 +++--- sphinx/builders/linkcheck.py | 22 ++-- sphinx/builders/manpage.py | 12 +- sphinx/builders/qthelp.py | 9 +- sphinx/builders/texinfo.py | 27 +++-- sphinx/environment/__init__.py | 10 +- sphinx/ext/autodoc.py | 28 +++-- sphinx/ext/autosummary/__init__.py | 7 +- sphinx/ext/doctest.py | 10 +- sphinx/ext/intersphinx.py | 7 +- sphinx/ext/viewcode.py | 12 +- sphinx/transforms/__init__.py | 6 +- sphinx/util/__init__.py | 13 ++ 23 files changed, 292 insertions(+), 267 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index 95e1ff17d..181ef1d95 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -45,7 +45,7 @@ from sphinx.util import logging from sphinx.util.tags import Tags from sphinx.util.osutil import ENOENT from sphinx.util.console import ( # type: ignore - bold, lightgray, darkgray, darkgreen, term_width_line + bold, darkgreen, term_width_line ) from sphinx.util.i18n import find_catalog_source_files @@ -164,13 +164,13 @@ class Sphinx(object): self.messagelog = deque(maxlen=10) # type: deque # say hello to the world - self.info(bold('Running Sphinx v%s' % sphinx.__display_version__)) + logger.info(bold('Running Sphinx v%s' % sphinx.__display_version__)) # status code for command-line application self.statuscode = 0 if not path.isdir(outdir): - self.info('making output directory...') + logger.info('making output directory...') os.makedirs(outdir) # read config @@ -267,8 +267,8 @@ class Sphinx(object): the configuration. """ if self.config.language is not None: - self.info(bold('loading translations [%s]... ' % - self.config.language), nonl=True) + logger.info(bold('loading translations [%s]... ' % self.config.language), + nonl=True) user_locale_dirs = [ path.join(self.srcdir, x) for x in self.config.locale_dirs] # compile mo files if sphinx.po file in user locale directories are updated @@ -283,9 +283,9 @@ class Sphinx(object): if self.config.language is not None: if has_translation or self.config.language == 'en': # "en" never needs to be translated - self.info('done') + logger.info('done') else: - self.info('not available for built-in messages') + logger.info('not available for built-in messages') def _init_source_parsers(self): # type: () -> None @@ -305,7 +305,7 @@ class Sphinx(object): self.env.domains[domain] = self.domains[domain](self.env) else: try: - self.info(bold('loading pickled environment... '), nonl=True) + logger.info(bold('loading pickled environment... '), nonl=True) self.env = BuildEnvironment.frompickle( self.srcdir, self.config, path.join(self.doctreedir, ENV_PICKLE_FILENAME)) self.env.set_warnfunc(self.warn) @@ -314,12 +314,12 @@ class Sphinx(object): for domain in self.domains.keys(): # this can raise if the data version doesn't fit self.env.domains[domain] = self.domains[domain](self.env) - self.info('done') + logger.info('done') except Exception as err: if isinstance(err, IOError) and err.errno == ENOENT: - self.info('not yet created') + logger.info('not yet created') else: - self.info('failed: %s' % err) + logger.info('failed: %s' % err) self._init_env(freshenv=True) def _init_builder(self, buildername): @@ -357,11 +357,11 @@ class Sphinx(object): status = (self.statuscode == 0 and 'succeeded' or 'finished with problems') if self._warncount: - self.info(bold('build %s, %s warning%s.' % - (status, self._warncount, - self._warncount != 1 and 's' or ''))) + logger.info(bold('build %s, %s warning%s.' % + (status, self._warncount, + self._warncount != 1 and 's' or ''))) else: - self.info(bold('build %s.' % status)) + logger.info(bold('build %s.' % status)) except Exception as err: # delete the saved env to force a fresh build next time envfile = path.join(self.doctreedir, ENV_PICKLE_FILENAME) @@ -374,22 +374,6 @@ class Sphinx(object): self.builder.cleanup() # ---- logging handling ---------------------------------------------------- - - def _log(self, message, wfile, nonl=False): - # type: (unicode, IO, bool) -> None - try: - wfile.write(message) - except UnicodeEncodeError: - encoding = getattr(wfile, 'encoding', 'ascii') or 'ascii' - # wfile.write accept only str, not bytes.So, we encode and replace - # non-encodable characters, then decode them. - wfile.write(message.encode(encoding, 'replace').decode(encoding)) - if not nonl: - wfile.write('\n') - if hasattr(wfile, 'flush'): - wfile.flush() - self.messagelog.append(message) - def warn(self, message, location=None, prefix=None, type=None, subtype=None, colorfunc=None): # type: (unicode, unicode, unicode, unicode, unicode, Callable) -> None @@ -424,58 +408,25 @@ class Sphinx(object): If *nonl* is true, don't emit a newline at the end (which implies that more info output will follow soon.) """ - self._log(message, self._status, nonl) + logger.info(message, nonl=nonl) def verbose(self, message, *args, **kwargs): # type: (unicode, Any, Any) -> None - """Emit a verbose informational message. - - The message will only be emitted for verbosity levels >= 1 (i.e. at - least one ``-v`` option was given). - - The message can contain %-style interpolation placeholders, which is - formatted with either the ``*args`` or ``**kwargs`` when output. - """ - if self.verbosity < 1: - return - if args or kwargs: - message = message % (args or kwargs) - self._log(message, self._status) + """Emit a verbose informational message.""" + logger.verbose(message, *args, **kwargs) def debug(self, message, *args, **kwargs): # type: (unicode, Any, Any) -> None - """Emit a debug-level informational message. - - The message will only be emitted for verbosity levels >= 2 (i.e. at - least two ``-v`` options were given). - - The message can contain %-style interpolation placeholders, which is - formatted with either the ``*args`` or ``**kwargs`` when output. - """ - if self.verbosity < 2: - return - if args or kwargs: - message = message % (args or kwargs) - self._log(darkgray(message), self._status) + """Emit a debug-level informational message.""" + logger.debug(message, *args, **kwargs) def debug2(self, message, *args, **kwargs): # type: (unicode, Any, Any) -> None - """Emit a lowlevel debug-level informational message. - - The message will only be emitted for verbosity level 3 (i.e. three - ``-v`` options were given). - - The message can contain %-style interpolation placeholders, which is - formatted with either the ``*args`` or ``**kwargs`` when output. - """ - if self.verbosity < 3: - return - if args or kwargs: - message = message % (args or kwargs) - self._log(lightgray(message), self._status) + """Emit a lowlevel debug-level informational message.""" + logger.debug2(message, *args, **kwargs) def _display_chunk(chunk): - # type: (Any) -> unicode + # type: (Union[List, Tuple, unicode]) -> unicode if isinstance(chunk, (list, tuple)): if len(chunk) == 1: return text_type(chunk[0]) @@ -484,21 +435,21 @@ class Sphinx(object): def old_status_iterator(self, iterable, summary, colorfunc=darkgreen, stringify_func=_display_chunk): - # type: (Iterable, unicode, Callable, Callable) -> Iterator + # type: (Iterable, unicode, Callable, Callable[[Any], unicode]) -> Iterator l = 0 for item in iterable: if l == 0: - self.info(bold(summary), nonl=True) + logger.info(bold(summary), nonl=True) l = 1 - self.info(colorfunc(stringify_func(item)) + ' ', nonl=True) + logger.info(colorfunc(stringify_func(item)) + ' ', nonl=True) yield item if l == 1: - self.info() + logger.info('') # new version with progress info def status_iterator(self, iterable, summary, colorfunc=darkgreen, length=0, stringify_func=_display_chunk): - # type: (Iterable, unicode, Callable, int, Callable) -> Iterable + # type: (Iterable, unicode, Callable, int, Callable[[Any], unicode]) -> Iterable if length == 0: for item in self.old_status_iterator(iterable, summary, colorfunc, stringify_func): @@ -514,17 +465,17 @@ class Sphinx(object): s += '\n' else: s = term_width_line(s) - self.info(s, nonl=True) + logger.info(s, nonl=True) yield item if l > 0: - self.info() + logger.info('') # ---- general extensibility interface ------------------------------------- def setup_extension(self, extension): # type: (unicode) -> None """Import and setup a Sphinx extension module. No-op if called twice.""" - self.debug('[app] setting up extension: %r', extension) + logger.debug('[app] setting up extension: %r', extension) if extension in self._extensions: return if extension in EXTENSION_BLACKLIST: @@ -536,7 +487,7 @@ class Sphinx(object): try: mod = __import__(extension, None, None, ['setup']) except ImportError as err: - self.verbose('Original exception:\n' + traceback.format_exc()) + logger.verbose('Original exception:\n' + traceback.format_exc()) raise ExtensionError('Could not import extension %s' % extension, err) if not hasattr(mod, 'setup'): @@ -596,20 +547,20 @@ class Sphinx(object): else: self._listeners[event][listener_id] = callback self.next_listener_id += 1 - self.debug('[app] connecting event %r: %r [id=%s]', - event, callback, listener_id) + logger.debug('[app] connecting event %r: %r [id=%s]', + event, callback, listener_id) return listener_id def disconnect(self, listener_id): # type: (int) -> None - self.debug('[app] disconnecting event: [id=%s]', listener_id) + logger.debug('[app] disconnecting event: [id=%s]', listener_id) for event in itervalues(self._listeners): event.pop(listener_id, None) def emit(self, event, *args): # type: (unicode, Any) -> List try: - self.debug2('[app] emitting event: %r%s', event, repr(args)[:100]) + logger.debug2('[app] emitting event: %r%s', event, repr(args)[:100]) except Exception: # not every object likes to be repr()'d (think # random stuff coming via autodoc) @@ -631,7 +582,7 @@ class Sphinx(object): def add_builder(self, builder): # type: (Type[Builder]) -> None - self.debug('[app] adding builder: %r', builder) + logger.debug('[app] adding builder: %r', builder) if not hasattr(builder, 'name'): raise ExtensionError('Builder class %s has no "name" attribute' % builder) @@ -643,8 +594,8 @@ class Sphinx(object): def add_config_value(self, name, default, rebuild, types=()): # type: (unicode, Any, Union[bool, unicode], Any) -> None - self.debug('[app] adding config value: %r', - (name, default, rebuild) + ((types,) if types else ())) # type: ignore + logger.debug('[app] adding config value: %r', + (name, default, rebuild) + ((types,) if types else ())) # type: ignore if name in self.config.values: raise ExtensionError('Config value %r already present' % name) if rebuild in (False, True): @@ -653,19 +604,19 @@ class Sphinx(object): def add_event(self, name): # type: (unicode) -> None - self.debug('[app] adding event: %r', name) + logger.debug('[app] adding event: %r', name) if name in self._events: raise ExtensionError('Event %r already present' % name) self._events[name] = '' def set_translator(self, name, translator_class): # type: (unicode, Any) -> None - self.info(bold('A Translator for the %s builder is changed.' % name)) + logger.info(bold('A Translator for the %s builder is changed.' % name)) self._translators[name] = translator_class def add_node(self, node, **kwds): # type: (nodes.Node, Any) -> None - self.debug('[app] adding node: %r', (node, kwds)) + logger.debug('[app] adding node: %r', (node, kwds)) if not kwds.pop('override', False) and \ hasattr(nodes.GenericNodeVisitor, 'visit_' + node.__name__): logger.warning('while setting up extension %s: node class %r is ' @@ -719,8 +670,8 @@ class Sphinx(object): def add_directive(self, name, obj, content=None, arguments=None, **options): # type: (unicode, Any, unicode, Any, Any) -> None - self.debug('[app] adding directive: %r', - (name, obj, content, arguments, options)) + logger.debug('[app] adding directive: %r', + (name, obj, content, arguments, options)) if name in directives._directives: logger.warning('while setting up extension %s: directive %r is ' 'already registered, it will be overridden', @@ -731,7 +682,7 @@ class Sphinx(object): def add_role(self, name, role): # type: (unicode, Any) -> None - self.debug('[app] adding role: %r', (name, role)) + logger.debug('[app] adding role: %r', (name, role)) if name in roles._roles: logger.warning('while setting up extension %s: role %r is ' 'already registered, it will be overridden', @@ -743,7 +694,7 @@ class Sphinx(object): # type: (unicode, Any) -> None # don't use roles.register_generic_role because it uses # register_canonical_role - self.debug('[app] adding generic role: %r', (name, nodeclass)) + logger.debug('[app] adding generic role: %r', (name, nodeclass)) if name in roles._roles: logger.warning('while setting up extension %s: role %r is ' 'already registered, it will be overridden', @@ -754,14 +705,14 @@ class Sphinx(object): def add_domain(self, domain): # type: (Type[Domain]) -> None - self.debug('[app] adding domain: %r', domain) + logger.debug('[app] adding domain: %r', domain) if domain.name in self.domains: raise ExtensionError('domain %s already registered' % domain.name) self.domains[domain.name] = domain def override_domain(self, domain): # type: (Type[Domain]) -> None - self.debug('[app] overriding domain: %r', domain) + logger.debug('[app] overriding domain: %r', domain) if domain.name not in self.domains: raise ExtensionError('domain %s not yet registered' % domain.name) if not issubclass(domain, self.domains[domain.name]): @@ -772,8 +723,8 @@ class Sphinx(object): def add_directive_to_domain(self, domain, name, obj, content=None, arguments=None, **options): # type: (unicode, unicode, Any, unicode, Any, Any) -> None - self.debug('[app] adding directive to domain: %r', - (domain, name, obj, content, arguments, options)) + logger.debug('[app] adding directive to domain: %r', + (domain, name, obj, content, arguments, options)) if domain not in self.domains: raise ExtensionError('domain %s not yet registered' % domain) self.domains[domain].directives[name] = \ @@ -781,14 +732,14 @@ class Sphinx(object): def add_role_to_domain(self, domain, name, role): # type: (unicode, unicode, Any) -> None - self.debug('[app] adding role to domain: %r', (domain, name, role)) + logger.debug('[app] adding role to domain: %r', (domain, name, role)) if domain not in self.domains: raise ExtensionError('domain %s not yet registered' % domain) self.domains[domain].roles[name] = role def add_index_to_domain(self, domain, index): # type: (unicode, unicode) -> None - self.debug('[app] adding index to domain: %r', (domain, index)) + logger.debug('[app] adding index to domain: %r', (domain, index)) if domain not in self.domains: raise ExtensionError('domain %s not yet registered' % domain) self.domains[domain].indices.append(index) @@ -797,9 +748,9 @@ class Sphinx(object): parse_node=None, ref_nodeclass=None, objname='', doc_field_types=[]): # type: (unicode, unicode, unicode, Callable, nodes.Node, unicode, List) -> None - self.debug('[app] adding object type: %r', - (directivename, rolename, indextemplate, parse_node, - ref_nodeclass, objname, doc_field_types)) + logger.debug('[app] adding object type: %r', + (directivename, rolename, indextemplate, parse_node, + ref_nodeclass, objname, doc_field_types)) StandardDomain.object_types[directivename] = \ ObjType(objname or directivename, rolename) # create a subclass of GenericObject as the new directive @@ -817,9 +768,9 @@ class Sphinx(object): def add_crossref_type(self, directivename, rolename, indextemplate='', ref_nodeclass=None, objname=''): # type: (unicode, unicode, unicode, nodes.Node, unicode) -> None - self.debug('[app] adding crossref type: %r', - (directivename, rolename, indextemplate, ref_nodeclass, - objname)) + logger.debug('[app] adding crossref type: %r', + (directivename, rolename, indextemplate, ref_nodeclass, + objname)) StandardDomain.object_types[directivename] = \ ObjType(objname or directivename, rolename) # create a subclass of Target as the new directive @@ -831,12 +782,12 @@ class Sphinx(object): def add_transform(self, transform): # type: (Transform) -> None - self.debug('[app] adding transform: %r', transform) + logger.debug('[app] adding transform: %r', transform) SphinxStandaloneReader.transforms.append(transform) def add_javascript(self, filename): # type: (unicode) -> None - self.debug('[app] adding javascript: %r', filename) + logger.debug('[app] adding javascript: %r', filename) from sphinx.builders.html import StandaloneHTMLBuilder if '://' in filename: StandaloneHTMLBuilder.script_files.append(filename) @@ -846,7 +797,7 @@ class Sphinx(object): def add_stylesheet(self, filename): # type: (unicode) -> None - self.debug('[app] adding stylesheet: %r', filename) + logger.debug('[app] adding stylesheet: %r', filename) from sphinx.builders.html import StandaloneHTMLBuilder if '://' in filename: StandaloneHTMLBuilder.css_files.append(filename) @@ -856,12 +807,12 @@ class Sphinx(object): def add_latex_package(self, packagename, options=None): # type: (unicode, unicode) -> None - self.debug('[app] adding latex package: %r', packagename) + logger.debug('[app] adding latex package: %r', packagename) self.builder.usepackages.append((packagename, options)) def add_lexer(self, alias, lexer): # type: (unicode, Any) -> None - self.debug('[app] adding lexer: %r', (alias, lexer)) + logger.debug('[app] adding lexer: %r', (alias, lexer)) from sphinx.highlighting import lexers if lexers is None: return @@ -869,27 +820,27 @@ class Sphinx(object): def add_autodocumenter(self, cls): # type: (Any) -> None - self.debug('[app] adding autodocumenter: %r', cls) + logger.debug('[app] adding autodocumenter: %r', cls) from sphinx.ext import autodoc autodoc.add_documenter(cls) self.add_directive('auto' + cls.objtype, autodoc.AutoDirective) def add_autodoc_attrgetter(self, type, getter): # type: (Any, Callable) -> None - self.debug('[app] adding autodoc attrgetter: %r', (type, getter)) + logger.debug('[app] adding autodoc attrgetter: %r', (type, getter)) from sphinx.ext import autodoc autodoc.AutoDirective._special_attrgetters[type] = getter def add_search_language(self, cls): # type: (Any) -> None - self.debug('[app] adding search language: %r', cls) + logger.debug('[app] adding search language: %r', cls) from sphinx.search import languages, SearchLanguage assert issubclass(cls, SearchLanguage) languages[cls.lang] = cls def add_source_parser(self, suffix, parser): # type: (unicode, Parser) -> None - self.debug('[app] adding search source_parser: %r, %r', suffix, parser) + logger.debug('[app] adding search source_parser: %r, %r', suffix, parser) if suffix in self._additional_source_parsers: logger.warning('while setting up extension %s: source_parser for %r is ' 'already registered, it will be overridden', diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index b8baf7792..9dc757cd4 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -40,6 +40,9 @@ if False: from sphinx.util.tags import Tags # NOQA +logger = logging.getLogger(__name__) + + class Builder(object): """ Builds target formats from the reST sources. @@ -180,7 +183,7 @@ class Builder(object): def cat2relpath(cat): return path.relpath(cat.mo_path, self.env.srcdir).replace(path.sep, SEP) - self.info(bold('building [mo]: ') + message) + logger.info(bold('building [mo]: ') + message) for catalog in self.app.status_iterator( catalogs, 'writing output... ', darkgreen, len(catalogs), cat2relpath): @@ -281,7 +284,7 @@ class Builder(object): First updates the environment, and then calls :meth:`write`. """ if summary: - self.info(bold('building [%s]' % self.name) + ': ' + summary) + logger.info(bold('building [%s]' % self.name) + ': ' + summary) # while reading, collect all warnings from docutils with logging.pending_logging(): @@ -289,29 +292,29 @@ class Builder(object): self.doctreedir, self.app)) doccount = len(updated_docnames) - self.info(bold('looking for now-outdated files... '), nonl=1) + logger.info(bold('looking for now-outdated files... '), nonl=1) for docname in self.env.check_dependents(updated_docnames): updated_docnames.add(docname) outdated = len(updated_docnames) - doccount if outdated: - self.info('%d found' % outdated) + logger.info('%d found' % outdated) else: - self.info('none found') + logger.info('none found') if updated_docnames: # save the environment from sphinx.application import ENV_PICKLE_FILENAME - self.info(bold('pickling environment... '), nonl=True) + logger.info(bold('pickling environment... '), nonl=True) self.env.topickle(path.join(self.doctreedir, ENV_PICKLE_FILENAME)) - self.info('done') + logger.info('done') # global actions - self.info(bold('checking consistency... '), nonl=True) + logger.info(bold('checking consistency... '), nonl=True) self.env.check_consistency() - self.info('done') + logger.info('done') else: if method == 'update' and not docnames: - self.info(bold('no targets are out of date.')) + logger.info(bold('no targets are out of date.')) return # filter "docnames" (list of outdated files) by the updated @@ -358,7 +361,7 @@ class Builder(object): docnames = set(build_docnames) | set(updated_docnames) else: docnames = set(build_docnames) - self.app.debug('docnames to write: %s', ', '.join(sorted(docnames))) + logger.debug('docnames to write: %s', ', '.join(sorted(docnames))) # add all toctree-containing files that may have changed for docname in list(docnames): @@ -367,9 +370,9 @@ class Builder(object): docnames.add(tocdocname) docnames.add(self.config.master_doc) - self.info(bold('preparing documents... '), nonl=True) + logger.info(bold('preparing documents... '), nonl=True) self.prepare_writing(docnames) - self.info('done') + logger.info('done') warnings = [] # type: List[Tuple[Tuple, Dict]] if self.parallel_ok: @@ -425,7 +428,7 @@ class Builder(object): tasks.add_task(write_process, arg, add_warnings) # make sure all threads have finished - self.info(bold('waiting for workers...')) + logger.info(bold('waiting for workers...')) tasks.join() for warning, kwargs in warnings: diff --git a/sphinx/builders/applehelp.py b/sphinx/builders/applehelp.py index 3c2782802..b0c5c66b1 100644 --- a/sphinx/builders/applehelp.py +++ b/sphinx/builders/applehelp.py @@ -18,6 +18,7 @@ import shlex from sphinx.builders.html import StandaloneHTMLBuilder from sphinx.config import string_classes +from sphinx.util import logging from sphinx.util.osutil import copyfile, ensuredir, make_filename from sphinx.util.console import bold # type: ignore from sphinx.util.fileutil import copy_asset @@ -33,6 +34,9 @@ if False: from typing import Any # NOQA from sphinx.application import Sphinx # NOQA + +logger = logging.getLogger(__name__) + # Use plistlib.dump in 3.4 and above try: write_plist = plistlib.dump # type: ignore @@ -118,13 +122,13 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): target_dir = self.outdir if path.isdir(source_dir): - self.info(bold('copying localized files... '), nonl=True) + logger.info(bold('copying localized files... '), nonl=True) excluded = Matcher(self.config.exclude_patterns + ['**/.*']) copy_asset(source_dir, target_dir, excluded, context=self.globalcontext, renderer=self.templates) - self.info('done') + logger.info('done') def build_helpbook(self): # type: () -> None @@ -165,20 +169,20 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): if self.config.applehelp_remote_url is not None: info_plist['HPDBookRemoteURL'] = self.config.applehelp_remote_url - self.info(bold('writing Info.plist... '), nonl=True) + logger.info(bold('writing Info.plist... '), nonl=True) with open(path.join(contents_dir, 'Info.plist'), 'wb') as f: write_plist(info_plist, f) - self.info('done') + logger.info('done') # Copy the icon, if one is supplied if self.config.applehelp_icon: - self.info(bold('copying icon... '), nonl=True) + logger.info(bold('copying icon... '), nonl=True) try: copyfile(path.join(self.srcdir, self.config.applehelp_icon), path.join(resources_dir, info_plist['HPDBookIconPath'])) - self.info('done') + logger.info('done') except Exception as err: self.warn('cannot copy icon file %r: %s' % (path.join(self.srcdir, self.config.applehelp_icon), @@ -186,16 +190,16 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): del info_plist['HPDBookIconPath'] # Build the access page - self.info(bold('building access page...'), nonl=True) + logger.info(bold('building access page...'), nonl=True) with codecs.open(path.join(language_dir, '_access.html'), 'w') as f: f.write(access_page_template % { 'toc': htmlescape(toc, quote=True), 'title': htmlescape(self.config.applehelp_title) }) - self.info('done') + logger.info('done') # Generate the help index - self.info(bold('generating help index... '), nonl=True) + logger.info(bold('generating help index... '), nonl=True) args = [ self.config.applehelp_indexer_path, @@ -217,7 +221,7 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): args += ['-l', self.config.applehelp_locale] if self.config.applehelp_disable_external_tools: - self.info('skipping') + logger.info('skipping') self.warn('you will need to index this help book with:\n %s' % (' '.join([pipes.quote(arg) for arg in args]))) @@ -232,13 +236,13 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): if p.returncode != 0: raise AppleHelpIndexerFailed(output) else: - self.info('done') + logger.info('done') except OSError: raise AppleHelpIndexerFailed('Command not found: %s' % args[0]) # If we've been asked to, sign the bundle if self.config.applehelp_codesign_identity: - self.info(bold('signing help book... '), nonl=True) + logger.info(bold('signing help book... '), nonl=True) args = [ self.config.applehelp_codesign_path, @@ -251,7 +255,7 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): args.append(self.bundle_path) if self.config.applehelp_disable_external_tools: - self.info('skipping') + logger.info('skipping') self.warn('you will need to sign this help book with:\n %s' % (' '.join([pipes.quote(arg) for arg in args]))) @@ -266,7 +270,7 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): if p.returncode != 0: raise AppleHelpCodeSigningFailed(output) else: - self.info('done') + logger.info('done') except OSError: raise AppleHelpCodeSigningFailed('Command not found: %s' % args[0]) diff --git a/sphinx/builders/changes.py b/sphinx/builders/changes.py index de9e95bf1..d1b908e3d 100644 --- a/sphinx/builders/changes.py +++ b/sphinx/builders/changes.py @@ -18,6 +18,7 @@ from sphinx import package_dir from sphinx.locale import _ from sphinx.theming import Theme from sphinx.builders import Builder +from sphinx.util import logging from sphinx.util.osutil import ensuredir, os_path from sphinx.util.console import bold # type: ignore from sphinx.util.fileutil import copy_asset_file @@ -29,6 +30,9 @@ if False: from sphinx.application import Sphinx # NOQA +logger = logging.getLogger(__name__) + + class ChangesBuilder(Builder): """ Write a summary with all versionadded/changed directives. @@ -59,9 +63,9 @@ class ChangesBuilder(Builder): apichanges = [] # type: List[Tuple[unicode, unicode, int]] otherchanges = {} # type: Dict[Tuple[unicode, unicode], List[Tuple[unicode, unicode, int]]] # NOQA if version not in self.env.versionchanges: - self.info(bold('no changes in version %s.' % version)) + logger.info(bold('no changes in version %s.' % version)) return - self.info(bold('writing summary file...')) + logger.info(bold('writing summary file...')) for type, docname, lineno, module, descname, content in \ self.env.versionchanges[version]: if isinstance(descname, tuple): @@ -125,7 +129,7 @@ class ChangesBuilder(Builder): break return line - self.info(bold('copying source files...')) + logger.info(bold('copying source files...')) for docname in self.env.all_docs: with codecs.open(self.env.doc2path(docname), 'r', # type: ignore self.env.config.source_encoding) as f: diff --git a/sphinx/builders/devhelp.py b/sphinx/builders/devhelp.py index f1ed3a495..af8bcfeed 100644 --- a/sphinx/builders/devhelp.py +++ b/sphinx/builders/devhelp.py @@ -19,6 +19,7 @@ from os import path from docutils import nodes from sphinx import addnodes +from sphinx.util import logging from sphinx.util.osutil import make_filename from sphinx.builders.html import StandaloneHTMLBuilder @@ -33,6 +34,9 @@ if False: from sphinx.application import Sphinx # NOQA +logger = logging.getLogger(__name__) + + class DevhelpBuilder(StandaloneHTMLBuilder): """ Builder that also outputs GNOME Devhelp file. @@ -60,7 +64,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder): def build_devhelp(self, outdir, outname): # type: (unicode, unicode) -> None - self.info('dumping devhelp index...') + logger.info('dumping devhelp index...') # Basic info root = etree.Element('book', diff --git a/sphinx/builders/epub.py b/sphinx/builders/epub.py index 5d4686af6..97c736d9f 100644 --- a/sphinx/builders/epub.py +++ b/sphinx/builders/epub.py @@ -29,6 +29,7 @@ from docutils import nodes from sphinx import addnodes from sphinx.builders.html import StandaloneHTMLBuilder +from sphinx.util import logging from sphinx.util.osutil import ensuredir, copyfile, make_filename, EEXIST from sphinx.util.smartypants import sphinx_smarty_pants as ssp from sphinx.util.console import brown # type: ignore @@ -39,6 +40,9 @@ if False: from sphinx.application import Sphinx # NOQA +logger = logging.getLogger(__name__) + + # (Fragment) templates from which the metainfo files content.opf, toc.ncx, # mimetype, and META-INF/container.xml are created. # This template section also defines strings that are embedded in the html @@ -547,14 +551,14 @@ class EpubBuilder(StandaloneHTMLBuilder): def build_mimetype(self, outdir, outname): # type: (unicode, unicode) -> None """Write the metainfo file mimetype.""" - self.info('writing %s file...' % outname) + logger.info('writing %s file...' % outname) with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f: # type: ignore f.write(self.mimetype_template) def build_container(self, outdir, outname): # type: (unicode, unicode) -> None """Write the metainfo file META-INF/cointainer.xml.""" - self.info('writing %s file...' % outname) + logger.info('writing %s file...' % outname) fn = path.join(outdir, outname) try: os.mkdir(path.dirname(fn)) @@ -589,7 +593,7 @@ class EpubBuilder(StandaloneHTMLBuilder): """Write the metainfo file content.opf It contains bibliographic data, a file list and the spine (the reading order). """ - self.info('writing %s file...' % outname) + logger.info('writing %s file...' % outname) # files if not outdir.endswith(os.sep): @@ -800,7 +804,7 @@ class EpubBuilder(StandaloneHTMLBuilder): def build_toc(self, outdir, outname): # type: (unicode, unicode) -> None """Write the metainfo file toc.ncx.""" - self.info('writing %s file...' % outname) + logger.info('writing %s file...' % outname) if self.config.epub_tocscope == 'default': doctree = self.env.get_and_resolve_doctree(self.config.master_doc, @@ -824,7 +828,7 @@ class EpubBuilder(StandaloneHTMLBuilder): It is a zip file with the mimetype file stored uncompressed as the first entry. """ - self.info('writing %s file...' % outname) + logger.info('writing %s file...' % outname) projectfiles = ['META-INF/container.xml', 'content.opf', 'toc.ncx'] # type: List[unicode] # NOQA projectfiles.extend(self.files) epub = zipfile.ZipFile(path.join(outdir, outname), 'w', # type: ignore diff --git a/sphinx/builders/epub3.py b/sphinx/builders/epub3.py index 5e0663a08..55434a499 100644 --- a/sphinx/builders/epub3.py +++ b/sphinx/builders/epub3.py @@ -16,6 +16,10 @@ from datetime import datetime from sphinx.config import string_classes from sphinx.builders.epub import EpubBuilder +from sphinx.util import logging + + +logger = logging.getLogger(__name__) # (Fragment) templates from which the metainfo files content.opf, toc.ncx, @@ -235,7 +239,7 @@ class Epub3Builder(EpubBuilder): def build_navigation_doc(self, outdir, outname): """Write the metainfo file nav.xhtml.""" - self.info('writing %s file...' % outname) + logger.info('writing %s file...' % outname) if self.config.epub_tocscope == 'default': doctree = self.env.get_and_resolve_doctree( diff --git a/sphinx/builders/gettext.py b/sphinx/builders/gettext.py index ced63e8f5..a38c9eca4 100644 --- a/sphinx/builders/gettext.py +++ b/sphinx/builders/gettext.py @@ -21,7 +21,7 @@ from uuid import uuid4 from six import iteritems from sphinx.builders import Builder -from sphinx.util import split_index_msg +from sphinx.util import split_index_msg, logging from sphinx.util.tags import Tags from sphinx.util.nodes import extract_messages, traverse_translatable_index from sphinx.util.osutil import safe_relpath, ensuredir, canon_path @@ -36,6 +36,9 @@ if False: from sphinx.util.i18n import CatalogInfo # NOQA from sphinx.application import Sphinx # NOQA + +logger = logging.getLogger(__name__) + POHEADER = r""" # SOME DESCRIPTIVE TITLE. # Copyright (C) %(copyright)s @@ -216,8 +219,8 @@ class MessageCatalogBuilder(I18nBuilder): def _extract_from_template(self): # type: () -> None files = self._collect_templates() - self.info(bold('building [%s]: ' % self.name), nonl=1) - self.info('targets for %d template files' % len(files)) + logger.info(bold('building [%s]: ' % self.name), nonl=1) + logger.info('targets for %d template files' % len(files)) extract_translations = self.templates.environment.extract_translations diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index 31ee0a371..b4f4d7fd3 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -28,7 +28,7 @@ from docutils.frontend import OptionParser from docutils.readers.doctree import Reader as DoctreeReader from sphinx import package_dir, __display_version__ -from sphinx.util import jsonimpl +from sphinx.util import jsonimpl, logging from sphinx.util.i18n import format_date from sphinx.util.osutil import SEP, os_path, relative_uri, ensuredir, \ movefile, copyfile @@ -57,6 +57,8 @@ INVENTORY_FILENAME = 'objects.inv' #: the filename for the "last build" file (for serializing builders) LAST_BUILD_FILENAME = 'last_build' +logger = logging.getLogger(__name__) + def get_stable_hash(obj): # type: (Any) -> unicode @@ -502,7 +504,7 @@ class StandaloneHTMLBuilder(Builder): def gen_indices(self): # type: () -> None - self.info(bold('generating indices...'), nonl=1) + logger.info(bold('generating indices...'), nonl=1) # the global general index if self.use_index: @@ -511,7 +513,7 @@ class StandaloneHTMLBuilder(Builder): # the global domain-specific indices self.write_domain_indices() - self.info() + logger.info('') def gen_additional_pages(self): # type: () -> None @@ -520,25 +522,25 @@ class StandaloneHTMLBuilder(Builder): for pagename, context, template in pagelist: self.handle_page(pagename, context, template) - self.info(bold('writing additional pages...'), nonl=1) + logger.info(bold('writing additional pages...'), nonl=1) # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): - self.info(' '+pagename, nonl=1) + logger.info(' '+pagename, nonl=1) self.handle_page(pagename, {}, template) # the search page if self.search: - self.info(' search', nonl=1) + logger.info(' search', nonl=1) self.handle_page('search', {}, 'search.html') # the opensearch xml file if self.config.html_use_opensearch and self.search: - self.info(' opensearch', nonl=1) + logger.info(' opensearch', nonl=1) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) - self.info() + logger.info('') def write_genindex(self): # type: () -> None @@ -555,7 +557,7 @@ class StandaloneHTMLBuilder(Builder): genindexcounts = indexcounts, split_index = self.config.html_split_index, ) - self.info(' genindex', nonl=1) + logger.info(' genindex', nonl=1) if self.config.html_split_index: self.handle_page('genindex', genindexcontext, @@ -578,7 +580,7 @@ class StandaloneHTMLBuilder(Builder): content = content, collapse_index = collapse, ) - self.info(' ' + indexname, nonl=1) + logger.info(' ' + indexname, nonl=1) self.handle_page(indexname, indexcontext, 'domainindex.html') def copy_image_files(self): @@ -618,7 +620,7 @@ class StandaloneHTMLBuilder(Builder): def copy_static_files(self): # type: () -> None # copy static files - self.info(bold('copying static files... '), nonl=True) + logger.info(bold('copying static files... '), nonl=True) ensuredir(path.join(self.outdir, '_static')) # first, create pygments style file with open(path.join(self.outdir, '_static', 'pygments.css'), 'w') as f: @@ -674,12 +676,12 @@ class StandaloneHTMLBuilder(Builder): elif not path.isfile(icontarget): copyfile(path.join(self.confdir, self.config.html_favicon), icontarget) - self.info('done') + logger.info('done') def copy_extra_files(self): # type: () -> None # copy html_extra_path files - self.info(bold('copying extra files... '), nonl=True) + logger.info(bold('copying extra files... '), nonl=True) excluded = Matcher(self.config.exclude_patterns) for extra_path in self.config.html_extra_path: @@ -689,7 +691,7 @@ class StandaloneHTMLBuilder(Builder): continue copy_asset(entry, self.outdir, excluded) - self.info('done') + logger.info('done') def write_buildinfo(self): # type: () -> None @@ -890,7 +892,7 @@ class StandaloneHTMLBuilder(Builder): def dump_inventory(self): # type: () -> None - self.info(bold('dumping object inventory... '), nonl=True) + logger.info(bold('dumping object inventory... '), nonl=True) with open(path.join(self.outdir, INVENTORY_FILENAME), 'wb') as f: f.write((u'# Sphinx inventory version 2\n' u'# Project: %s\n' @@ -913,11 +915,11 @@ class StandaloneHTMLBuilder(Builder): (u'%s %s:%s %s %s %s\n' % (name, domainname, type, prio, uri, dispname)).encode('utf-8'))) f.write(compressor.flush()) - self.info('done') + logger.info('done') def dump_search_index(self): # type: () -> None - self.info( + logger.info( bold('dumping search index in %s ... ' % self.indexer.label()), nonl=True) self.indexer.prune(self.env.all_docs) @@ -931,7 +933,7 @@ class StandaloneHTMLBuilder(Builder): with f: self.indexer.dump(f, self.indexer_format) # type: ignore movefile(searchindexfn + '.tmp', searchindexfn) - self.info('done') + logger.info('done') class DirectoryHTMLBuilder(StandaloneHTMLBuilder): @@ -1097,36 +1099,36 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder): # type: (Any) -> None docnames = self.env.all_docs - self.info(bold('preparing documents... '), nonl=True) + logger.info(bold('preparing documents... '), nonl=True) self.prepare_writing(docnames) - self.info('done') + logger.info('done') - self.info(bold('assembling single document... '), nonl=True) + logger.info(bold('assembling single document... '), nonl=True) doctree = self.assemble_doctree() self.env.toc_secnumbers = self.assemble_toc_secnumbers() self.env.toc_fignumbers = self.assemble_toc_fignumbers() - self.info() - self.info(bold('writing... '), nonl=True) + logger.info('') + logger.info(bold('writing... '), nonl=True) self.write_doc_serialized(self.config.master_doc, doctree) self.write_doc(self.config.master_doc, doctree) - self.info('done') + logger.info('done') def finish(self): # type: () -> None # no indices or search pages are supported - self.info(bold('writing additional files...'), nonl=1) + logger.info(bold('writing additional files...'), nonl=1) # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): - self.info(' '+pagename, nonl=1) + logger.info(' '+pagename, nonl=1) self.handle_page(pagename, {}, template) if self.config.html_use_opensearch: - self.info(' opensearch', nonl=1) + logger.info(' opensearch', nonl=1) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) - self.info() + logger.info('') self.copy_image_files() self.copy_download_files() diff --git a/sphinx/builders/htmlhelp.py b/sphinx/builders/htmlhelp.py index 79268ab74..5f94460c8 100644 --- a/sphinx/builders/htmlhelp.py +++ b/sphinx/builders/htmlhelp.py @@ -18,11 +18,15 @@ from os import path from docutils import nodes from sphinx import addnodes -from sphinx.util.osutil import make_filename from sphinx.builders.html import StandaloneHTMLBuilder +from sphinx.util import logging +from sphinx.util.osutil import make_filename from sphinx.util.pycompat import htmlescape +logger = logging.getLogger(__name__) + + # Project file (*.hhp) template. 'outname' is the file basename (like # the pythlp in pythlp.hhp); 'version' is the doc version number (like # the 2.2 in Python 2.2). @@ -207,12 +211,12 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder): StandaloneHTMLBuilder.write_doc(self, docname, doctree) def build_hhx(self, outdir, outname): - self.info('dumping stopword list...') + logger.info('dumping stopword list...') with self.open_file(outdir, outname+'.stp') as f: for word in sorted(stopwords): print(word, file=f) - self.info('writing project file...') + logger.info('writing project file...') with self.open_file(outdir, outname+'.hhp') as f: f.write(project_template % { 'outname': outname, @@ -233,7 +237,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder): print(path.join(root, fn)[olen:].replace(os.sep, '\\'), file=f) - self.info('writing TOC file...') + logger.info('writing TOC file...') with self.open_file(outdir, outname+'.hhc') as f: f.write(contents_header) # special books @@ -273,7 +277,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder): write_toc(node) f.write(contents_footer) - self.info('writing index file...') + logger.info('writing index file...') index = self.env.create_index(self) with self.open_file(outdir, outname+'.hhk') as f: f.write('<UL>\n') diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py index 01be08cbd..15a127e7b 100644 --- a/sphinx/builders/latex.py +++ b/sphinx/builders/latex.py @@ -20,7 +20,7 @@ from docutils.utils import new_document from docutils.frontend import OptionParser from sphinx import package_dir, addnodes, highlighting -from sphinx.util import texescape +from sphinx.util import texescape, logging from sphinx.config import string_classes, ENUM from sphinx.errors import SphinxError from sphinx.locale import _ @@ -38,6 +38,9 @@ if False: from sphinx.application import Sphinx # NOQA +logger = logging.getLogger(__name__) + + class LaTeXBuilder(Builder): """ Builds LaTeX output to create PDF. @@ -119,7 +122,7 @@ class LaTeXBuilder(Builder): destination = FileOutput( destination_path=path.join(self.outdir, targetname), encoding='utf-8') - self.info("processing " + targetname + "... ", nonl=1) + logger.info("processing " + targetname + "... ", nonl=1) toctrees = self.env.get_doctree(docname).traverse(addnodes.toctree) if toctrees: if toctrees[0].get('maxdepth') > 0: @@ -133,7 +136,7 @@ class LaTeXBuilder(Builder): appendices=((docclass != 'howto') and self.config.latex_appendices or [])) doctree['tocdepth'] = tocdepth self.post_process_images(doctree) - self.info("writing... ", nonl=1) + logger.info("writing... ", nonl=1) doctree.settings = docsettings doctree.settings.author = author doctree.settings.title = title @@ -141,7 +144,7 @@ class LaTeXBuilder(Builder): doctree.settings.docname = docname doctree.settings.docclass = docclass docwriter.write(doctree, destination) - self.info("done") + logger.info("done") def get_contentsname(self, indexfile): # type: (unicode) -> unicode @@ -157,7 +160,7 @@ class LaTeXBuilder(Builder): def assemble_doctree(self, indexfile, toctree_only, appendices): # type: (unicode, bool, List[unicode]) -> nodes.Node self.docnames = set([indexfile] + appendices) - self.info(darkgreen(indexfile) + " ", nonl=1) + logger.info(darkgreen(indexfile) + " ", nonl=1) tree = self.env.get_doctree(indexfile) tree['docname'] = indexfile if toctree_only: @@ -178,8 +181,8 @@ class LaTeXBuilder(Builder): appendix = self.env.get_doctree(docname) appendix['docname'] = docname largetree.append(appendix) - self.info() - self.info("resolving references...") + logger.info('') + logger.info("resolving references...") self.env.resolve_references(largetree, indexfile, self) # resolve :ref:s to distant tex files -- we can't add a cross-reference, # but append the document name @@ -202,16 +205,16 @@ class LaTeXBuilder(Builder): # type: () -> None # copy image files if self.images: - self.info(bold('copying images...'), nonl=1) + logger.info(bold('copying images...'), nonl=1) for src, dest in iteritems(self.images): - self.info(' '+src, nonl=1) + logger.info(' '+src, nonl=1) copy_asset_file(path.join(self.srcdir, src), path.join(self.outdir, dest)) - self.info() + logger.info('') # copy TeX support files from texinputs context = {'latex_engine': self.config.latex_engine} - self.info(bold('copying TeX support files...')) + logger.info(bold('copying TeX support files...')) staticdirname = path.join(package_dir, 'texinputs') for filename in os.listdir(staticdirname): if not filename.startswith('.'): @@ -220,11 +223,11 @@ class LaTeXBuilder(Builder): # copy additional files if self.config.latex_additional_files: - self.info(bold('copying additional files...'), nonl=1) + logger.info(bold('copying additional files...'), nonl=1) for filename in self.config.latex_additional_files: - self.info(' '+filename, nonl=1) + logger.info(' '+filename, nonl=1) copy_asset_file(path.join(self.confdir, filename), self.outdir) - self.info() + logger.info('') # the logo is handled differently if self.config.latex_logo: @@ -232,7 +235,7 @@ class LaTeXBuilder(Builder): raise SphinxError('logo file %r does not exist' % self.config.latex_logo) else: copy_asset_file(path.join(self.confdir, self.config.latex_logo), self.outdir) - self.info('done') + logger.info('done') def validate_config_values(app): diff --git a/sphinx/builders/linkcheck.py b/sphinx/builders/linkcheck.py index 0b95bcd6c..6a3c03e35 100644 --- a/sphinx/builders/linkcheck.py +++ b/sphinx/builders/linkcheck.py @@ -19,7 +19,6 @@ from requests.exceptions import HTTPError from six.moves import queue # type: ignore from six.moves.urllib.parse import unquote from six.moves.html_parser import HTMLParser - from docutils import nodes # 2015-06-25 barry@python.org. This exception was deprecated in Python 3.3 and @@ -33,7 +32,7 @@ except ImportError: pass from sphinx.builders import Builder -from sphinx.util import encode_uri, requests +from sphinx.util import encode_uri, requests, logging from sphinx.util.console import ( # type: ignore purple, red, darkgreen, darkgray, darkred, turquoise ) @@ -46,6 +45,9 @@ if False: from sphinx.util.requests.requests import Response # NOQA +logger = logging.getLogger(__name__) + + class AnchorCheckParser(HTMLParser): """Specialized HTML parser that looks for a specific anchor.""" @@ -231,24 +233,24 @@ class CheckExternalLinksBuilder(Builder): if status == 'working' and info == 'old': return if lineno: - self.info('(line %4d) ' % lineno, nonl=1) + logger.info('(line %4d) ' % lineno, nonl=1) if status == 'ignored': if info: - self.info(darkgray('-ignored- ') + uri + ': ' + info) + logger.info(darkgray('-ignored- ') + uri + ': ' + info) else: - self.info(darkgray('-ignored- ') + uri) + logger.info(darkgray('-ignored- ') + uri) elif status == 'local': - self.info(darkgray('-local- ') + uri) + logger.info(darkgray('-local- ') + uri) self.write_entry('local', docname, lineno, uri) elif status == 'working': - self.info(darkgreen('ok ') + uri + info) + logger.info(darkgreen('ok ') + uri + info) elif status == 'broken': self.write_entry('broken', docname, lineno, uri + ': ' + info) if self.app.quiet or self.app.warningiserror: self.warn('broken link: %s (%s)' % (uri, info), '%s:%s' % (self.env.doc2path(docname), lineno)) else: - self.info(red('broken ') + uri + red(' - ' + info)) + logger.info(red('broken ') + uri + red(' - ' + info)) elif status == 'redirected': text, color = { 301: ('permanently', darkred), @@ -259,7 +261,7 @@ class CheckExternalLinksBuilder(Builder): }[code] self.write_entry('redirected ' + text, docname, lineno, uri + ' to ' + info) - self.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info)) + logger.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info)) def get_target_uri(self, docname, typ=None): # type: (unicode, unicode) -> unicode @@ -275,7 +277,7 @@ class CheckExternalLinksBuilder(Builder): def write_doc(self, docname, doctree): # type: (unicode, nodes.Node) -> None - self.info() + logger.info('') n = 0 for node in doctree.traverse(nodes.reference): if 'refuri' not in node: diff --git a/sphinx/builders/manpage.py b/sphinx/builders/manpage.py index e6f7c4104..cd8684c79 100644 --- a/sphinx/builders/manpage.py +++ b/sphinx/builders/manpage.py @@ -19,6 +19,7 @@ from docutils.frontend import OptionParser from sphinx import addnodes from sphinx.builders import Builder from sphinx.environment import NoUri +from sphinx.util import logging from sphinx.util.nodes import inline_all_toctrees from sphinx.util.osutil import make_filename from sphinx.util.console import bold, darkgreen # type: ignore @@ -30,6 +31,9 @@ if False: from sphinx.application import Sphinx # NOQA +logger = logging.getLogger(__name__) + + class ManualPageBuilder(Builder): """ Builds groff output in manual page format. @@ -62,7 +66,7 @@ class ManualPageBuilder(Builder): components=(docwriter,), read_config_files=True).get_default_values() - self.info(bold('writing... '), nonl=True) + logger.info(bold('writing... '), nonl=True) for info in self.config.man_pages: docname, name, description, authors, section = info @@ -73,7 +77,7 @@ class ManualPageBuilder(Builder): authors = [] targetname = '%s.%s' % (name, section) - self.info(darkgreen(targetname) + ' { ', nonl=True) + logger.info(darkgreen(targetname) + ' { ', nonl=True) destination = FileOutput( destination_path=path.join(self.outdir, targetname), encoding='utf-8') @@ -82,7 +86,7 @@ class ManualPageBuilder(Builder): docnames = set() # type: Set[unicode] largetree = inline_all_toctrees(self, docnames, docname, tree, darkgreen, [docname]) - self.info('} ', nonl=True) + logger.info('} ', nonl=True) self.env.resolve_references(largetree, docname, self) # remove pending_xref nodes for pendingnode in largetree.traverse(addnodes.pending_xref): @@ -95,7 +99,7 @@ class ManualPageBuilder(Builder): largetree.settings.section = section docwriter.write(largetree, destination) - self.info() + logger.info('') def finish(self): # type: () -> None diff --git a/sphinx/builders/qthelp.py b/sphinx/builders/qthelp.py index c49f3d767..6ed9c6b7e 100644 --- a/sphinx/builders/qthelp.py +++ b/sphinx/builders/qthelp.py @@ -21,7 +21,7 @@ from docutils import nodes from sphinx import addnodes from sphinx.builders.html import StandaloneHTMLBuilder -from sphinx.util import force_decode +from sphinx.util import force_decode, logging from sphinx.util.osutil import make_filename from sphinx.util.pycompat import htmlescape @@ -31,6 +31,9 @@ if False: from sphinx.application import Sphinx # NOQA +logger = logging.getLogger(__name__) + + _idpattern = re.compile( r'(?P<title>.+) (\((class in )?(?P<id>[\w\.]+)( (?P<descr>\w+))?\))$') @@ -138,7 +141,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder): def build_qhp(self, outdir, outname): # type: (unicode, unicode) -> None - self.info('writing project file...') + logger.info('writing project file...') # sections tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self, @@ -216,7 +219,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder): nspace, 'doc', self.get_target_uri(self.config.master_doc)) startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html') - self.info('writing collection project file...') + logger.info('writing collection project file...') with codecs.open(path.join(outdir, outname+'.qhcp'), 'w', 'utf-8') as f: # type: ignore # NOQA f.write(collection_template % { # type: ignore 'outname': htmlescape(outname), diff --git a/sphinx/builders/texinfo.py b/sphinx/builders/texinfo.py index 354575157..e46320456 100644 --- a/sphinx/builders/texinfo.py +++ b/sphinx/builders/texinfo.py @@ -22,6 +22,7 @@ from sphinx import addnodes from sphinx.locale import _ from sphinx.builders import Builder from sphinx.environment import NoUri +from sphinx.util import logging from sphinx.util.nodes import inline_all_toctrees from sphinx.util.osutil import SEP, copyfile, make_filename from sphinx.util.console import bold, darkgreen # type: ignore @@ -33,6 +34,8 @@ if False: from typing import Any, Iterable, Tuple, Union # NOQA +logger = logging.getLogger(__name__) + TEXINFO_MAKEFILE = '''\ # Makefile for Sphinx Texinfo output @@ -152,11 +155,11 @@ class TexinfoBuilder(Builder): destination = FileOutput( destination_path=path.join(self.outdir, targetname), encoding='utf-8') - self.info("processing " + targetname + "... ", nonl=1) + logger.info("processing " + targetname + "... ", nonl=1) doctree = self.assemble_doctree( docname, toctree_only, appendices=(self.config.texinfo_appendices or [])) - self.info("writing... ", nonl=1) + logger.info("writing... ", nonl=1) self.post_process_images(doctree) docwriter = TexinfoWriter(self) settings = OptionParser( @@ -173,12 +176,12 @@ class TexinfoBuilder(Builder): settings.docname = docname doctree.settings = settings docwriter.write(doctree, destination) - self.info("done") + logger.info("done") def assemble_doctree(self, indexfile, toctree_only, appendices): # type: (unicode, bool, List[unicode]) -> nodes.Node self.docnames = set([indexfile] + appendices) - self.info(darkgreen(indexfile) + " ", nonl=1) + logger.info(darkgreen(indexfile) + " ", nonl=1) tree = self.env.get_doctree(indexfile) tree['docname'] = indexfile if toctree_only: @@ -199,8 +202,8 @@ class TexinfoBuilder(Builder): appendix = self.env.get_doctree(docname) appendix['docname'] = docname largetree.append(appendix) - self.info() - self.info("resolving references...") + logger.info('') + logger.info("resolving references...") self.env.resolve_references(largetree, indexfile, self) # TODO: add support for external :ref:s for pendingnode in largetree.traverse(addnodes.pending_xref): @@ -222,23 +225,23 @@ class TexinfoBuilder(Builder): # type: () -> None # copy image files if self.images: - self.info(bold('copying images...'), nonl=1) + logger.info(bold('copying images...'), nonl=1) for src, dest in iteritems(self.images): - self.info(' '+src, nonl=1) + logger.info(' '+src, nonl=1) copyfile(path.join(self.srcdir, src), path.join(self.outdir, dest)) - self.info() + logger.info('') - self.info(bold('copying Texinfo support files... '), nonl=True) + logger.info(bold('copying Texinfo support files... '), nonl=True) # copy Makefile fn = path.join(self.outdir, 'Makefile') - self.info(fn, nonl=1) + logger.info(fn, nonl=1) try: with open(fn, 'w') as mkfile: mkfile.write(TEXINFO_MAKEFILE) except (IOError, OSError) as err: self.warn("error writing file %s: %s" % (fn, err)) - self.info(' done') + logger.info(' done') def setup(app): diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index a952146fe..663442503 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -33,7 +33,7 @@ from docutils.frontend import OptionParser from sphinx import addnodes from sphinx.io import SphinxStandaloneReader, SphinxDummyWriter, SphinxFileInput -from sphinx.util import get_matching_docs, docname_join, FilenameUniqDict +from sphinx.util import get_matching_docs, docname_join, FilenameUniqDict, logging from sphinx.util.nodes import clean_astext, WarningStream, is_translatable, \ process_only_nodes from sphinx.util.osutil import SEP, getcwd, fs_encoding, ensuredir @@ -60,6 +60,8 @@ if False: from sphinx.domains import Domain # NOQA from sphinx.environment.managers import EnvironmentManager # NOQA +logger = logging.getLogger(__name__) + default_settings = { 'embed_stylesheet': False, 'cloak_email_addresses': True, @@ -553,7 +555,7 @@ class BuildEnvironment(object): # this cache also needs to be updated every time self._nitpick_ignore = set(self.config.nitpick_ignore) - app.info(bold('updating environment: '), nonl=True) + logger.info(bold('updating environment: '), nonl=True) added, changed, removed = self.get_outdated_files(config_changed) @@ -569,7 +571,7 @@ class BuildEnvironment(object): msg += '%s added, %s changed, %s removed' % (len(added), len(changed), len(removed)) - app.info(msg) + logger.info(msg) self.app = app @@ -664,7 +666,7 @@ class BuildEnvironment(object): tasks.add_task(read_process, chunk, merge) # make sure all threads have finished - app.info(bold('waiting for workers...')) + logger.info(bold('waiting for workers...')) tasks.join() for warning, kwargs in warnings: diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index 4251c7169..3e7f84b8e 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -31,6 +31,7 @@ from sphinx.util import rpartition, force_decode from sphinx.locale import _ from sphinx.pycode import ModuleAnalyzer, PycodeError from sphinx.application import ExtensionError +from sphinx.util import logging from sphinx.util.nodes import nested_parse_with_titles from sphinx.util.inspect import getargspec, isdescriptor, safe_getmembers, \ safe_getattr, object_description, is_builtin_class_method, \ @@ -52,6 +53,8 @@ try: except ImportError: typing = None +logger = logging.getLogger(__name__) + # This type isn't exposed directly in any modules, but can be found # here in most Python versions MethodDescriptorType = type(type.__subclasses__) @@ -581,26 +584,25 @@ class Documenter(object): Returns True if successful, False if an error occurred. """ - dbg = self.env.app.debug if self.objpath: - dbg('[autodoc] from %s import %s', - self.modname, '.'.join(self.objpath)) + logger.debug('[autodoc] from %s import %s', + self.modname, '.'.join(self.objpath)) try: - dbg('[autodoc] import %s', self.modname) + logger.debug('[autodoc] import %s', self.modname) for modname in self.env.config.autodoc_mock_imports: - dbg('[autodoc] adding a mock module %s!', modname) + logger.debug('[autodoc] adding a mock module %s!', modname) mock_import(modname) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=ImportWarning) __import__(self.modname) parent = None obj = self.module = sys.modules[self.modname] - dbg('[autodoc] => %r', obj) + logger.debug('[autodoc] => %r', obj) for part in self.objpath: parent = obj - dbg('[autodoc] getattr(_, %r)', part) + logger.debug('[autodoc] getattr(_, %r)', part) obj = self.get_attr(obj, part) - dbg('[autodoc] => %r', obj) + logger.debug('[autodoc] => %r', obj) self.object_name = part self.parent = parent self.object = obj @@ -622,7 +624,7 @@ class Documenter(object): traceback.format_exc() if PY2: errmsg = errmsg.decode('utf-8') # type: ignore - dbg(errmsg) + logger.debug(errmsg) self.directive.warn(errmsg) self.env.note_reread() return False @@ -1024,7 +1026,7 @@ class Documenter(object): # be cached anyway) self.analyzer.find_attr_docs() except PycodeError as err: - self.env.app.debug('[autodoc] module analyzer failed: %s', err) + logger.debug('[autodoc] module analyzer failed: %s', err) # no source file -- e.g. for builtin and C modules self.analyzer = None # at least add the module.__file__ as a dependency @@ -1730,8 +1732,8 @@ class AutoDirective(Directive): source, lineno = self.reporter.get_source_and_line(self.lineno) except AttributeError: source = lineno = None - self.env.app.debug('[autodoc] %s:%s: input:\n%s', - source, lineno, self.block_text) + logger.debug('[autodoc] %s:%s: input:\n%s', + source, lineno, self.block_text) # find out what documenter to call objtype = self.name[4:] @@ -1760,7 +1762,7 @@ class AutoDirective(Directive): if not self.result: return self.warnings - self.env.app.debug2('[autodoc] output:\n%s', '\n'.join(self.result)) + logger.debug2('[autodoc] output:\n%s', '\n'.join(self.result)) # record all filenames as dependencies -- this will at least # partially make automatic invalidation possible diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py index 158692c1e..cbe7c08f0 100644 --- a/sphinx/ext/autosummary/__init__.py +++ b/sphinx/ext/autosummary/__init__.py @@ -69,7 +69,7 @@ from docutils import nodes import sphinx from sphinx import addnodes -from sphinx.util import import_object, rst +from sphinx.util import import_object, rst, logging from sphinx.pycode import ModuleAnalyzer, PycodeError from sphinx.ext.autodoc import Options @@ -81,6 +81,8 @@ if False: from sphinx.environment import BuildEnvironment # NOQA from sphinx.ext.autodoc import Documenter # NOQA +logger = logging.getLogger(__name__) + # -- autosummary_toc node ------------------------------------------------------ @@ -305,8 +307,7 @@ class Autosummary(Directive): # be cached anyway) documenter.analyzer.find_attr_docs() except PycodeError as err: - documenter.env.app.debug( - '[autodoc] module analyzer failed: %s', err) + logger.debug('[autodoc] module analyzer failed: %s', err) # no source file -- e.g. for builtin and C modules documenter.analyzer = None diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index b0c1f61f5..d8311c3b5 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -25,7 +25,7 @@ from docutils.parsers.rst import Directive, directives import sphinx from sphinx.builders import Builder -from sphinx.util import force_decode +from sphinx.util import force_decode, logging from sphinx.util.nodes import set_source_info from sphinx.util.console import bold # type: ignore from sphinx.util.osutil import fs_encoding @@ -35,6 +35,8 @@ if False: from typing import Any, Callable, IO, Iterable, Sequence, Tuple # NOQA from sphinx.application import Sphinx # NOQA +logger = logging.getLogger(__name__) + blankline_re = re.compile(r'^\s*<BLANKLINE>', re.MULTILINE) doctestopt_re = re.compile(r'#\s*doctest:.+$', re.MULTILINE) @@ -262,7 +264,7 @@ Results of doctest builder run on %s def _out(self, text): # type: (unicode) -> None - self.info(text, nonl=True) + logger.info(text, nonl=True) self.outfile.write(text) def _warn_out(self, text): @@ -270,7 +272,7 @@ Results of doctest builder run on %s if self.app.quiet or self.app.warningiserror: self.warn(text) else: - self.info(text, nonl=True) + logger.info(text, nonl=True) if isinstance(text, binary_type): text = force_decode(text, None) self.outfile.write(text) @@ -311,7 +313,7 @@ Doctest summary if build_docnames is None: build_docnames = sorted(self.env.all_docs) - self.info(bold('running tests...')) + logger.info(bold('running tests...')) for docname in build_docnames: # no need to resolve the doctree doctree = self.env.get_doctree(docname) diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py index d24290436..41f00e5a1 100644 --- a/sphinx/ext/intersphinx.py +++ b/sphinx/ext/intersphinx.py @@ -42,7 +42,7 @@ from docutils.utils import relative_path import sphinx from sphinx.locale import _ from sphinx.builders.html import INVENTORY_FILENAME -from sphinx.util import requests +from sphinx.util import requests, logging if False: # For type annotation @@ -56,6 +56,7 @@ if False: Inventory = Dict[unicode, Dict[unicode, Tuple[unicode, unicode, unicode, unicode]]] +logger = logging.getLogger(__name__) UTF8StreamReader = codecs.lookup('utf-8')[2] @@ -235,7 +236,7 @@ def fetch_inventory(app, uri, inv): if hasattr(f, 'url'): newinv = f.url # type: ignore if inv != newinv: - app.info('intersphinx inventory has moved: %s -> %s' % (inv, newinv)) + logger.info('intersphinx inventory has moved: %s -> %s' % (inv, newinv)) if uri in (inv, path.dirname(inv), path.dirname(inv) + '/'): uri = path.dirname(newinv) @@ -294,7 +295,7 @@ def load_mappings(app): if '://' not in inv or uri not in cache \ or cache[uri][1] < cache_time: safe_inv_url = _get_safe_url(inv) # type: ignore - app.info( + logger.info( 'loading intersphinx inventory from %s...' % safe_inv_url) invdata = fetch_inventory(app, uri, inv) if invdata: diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py index 813a465db..a4f93c144 100644 --- a/sphinx/ext/viewcode.py +++ b/sphinx/ext/viewcode.py @@ -19,7 +19,7 @@ import sphinx from sphinx import addnodes from sphinx.locale import _ from sphinx.pycode import ModuleAnalyzer -from sphinx.util import get_full_modname +from sphinx.util import get_full_modname, logging from sphinx.util.nodes import make_refnode from sphinx.util.console import blue # type: ignore @@ -29,6 +29,8 @@ if False: from sphinx.application import Sphinx # NOQA from sphinx.environment import BuildEnvironment # NOQA +logger = logging.getLogger(__name__) + def _get_full_modname(app, modname, attribute): # type: (Sphinx, str, unicode) -> unicode @@ -37,16 +39,16 @@ def _get_full_modname(app, modname, attribute): except AttributeError: # sphinx.ext.viewcode can't follow class instance attribute # then AttributeError logging output only verbose mode. - app.verbose('Didn\'t find %s in %s' % (attribute, modname)) + logger.verbose('Didn\'t find %s in %s' % (attribute, modname)) return None except Exception as e: # sphinx.ext.viewcode follow python domain directives. # because of that, if there are no real modules exists that specified # by py:function or other directives, viewcode emits a lot of warnings. # It should be displayed only verbose mode. - app.verbose(traceback.format_exc().rstrip()) - app.verbose('viewcode can\'t import %s, failed with error "%s"' % - (modname, e)) + logger.verbose(traceback.format_exc().rstrip()) + logger.verbose('viewcode can\'t import %s, failed with error "%s"' % + (modname, e)) return None diff --git a/sphinx/transforms/__init__.py b/sphinx/transforms/__init__.py index a72541666..8d469eabe 100644 --- a/sphinx/transforms/__init__.py +++ b/sphinx/transforms/__init__.py @@ -15,9 +15,13 @@ from docutils.transforms.parts import ContentsFilter from sphinx import addnodes from sphinx.locale import _ +from sphinx.util import logging from sphinx.util.i18n import format_date from sphinx.util.nodes import apply_source_workaround + +logger = logging.getLogger(__name__) + default_substitutions = set([ 'version', 'release', @@ -215,7 +219,7 @@ class FilterSystemMessages(Transform): filterlevel = env.config.keep_warnings and 2 or 5 for node in self.document.traverse(nodes.system_message): if node['level'] < filterlevel: - env.app.debug('%s [filtered system message]', node.astext()) + logger.debug('%s [filtered system message]', node.astext()) node.parent.remove(node) diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index d89aae374..404556f36 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -28,6 +28,7 @@ from six.moves.urllib.parse import urlsplit, urlunsplit, quote_plus, parse_qsl, from docutils.utils import relative_path from sphinx.errors import PycodeError, SphinxParallelError, ExtensionError +from sphinx.util import logging from sphinx.util.console import strip_colors from sphinx.util.fileutil import copy_asset_file from sphinx.util.osutil import fs_encoding @@ -46,6 +47,9 @@ if False: # For type annotation from typing import Any, Callable, Iterable, Pattern, Sequence, Tuple # NOQA + +logger = logging.getLogger(__name__) + # Generally useful regular expressions. ws_re = re.compile(r'\s+') # type: Pattern url_re = re.compile(r'(?P<schema>.+)://.*') # type: Pattern @@ -532,3 +536,12 @@ def split_docinfo(text): return '', result[0] else: return result[1:] + + +def display_chunk(chunk): + # type: (Union[List, Tuple, unicode]) -> unicode + if isinstance(chunk, (list, tuple)): + if len(chunk) == 1: + return text_type(chunk[0]) + return '%s .. %s' % (chunk[0], chunk[-1]) + return text_type(chunk) From d8ad3d063c25278b22c20a46cb2b0b465b4047bf Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 22 Dec 2016 21:57:29 +0900 Subject: [PATCH 063/190] sphinx.util.parallel supports logging in child workers --- sphinx/util/logging.py | 31 ++++++++++++++++++++++++++++++- sphinx/util/parallel.py | 27 +++++++++++++++++++-------- tests/test_util_logging.py | 17 +++++++++++++++++ 3 files changed, 66 insertions(+), 9 deletions(-) diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index 10043ccd4..7afaadf34 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -53,6 +53,14 @@ def getLogger(name): return SphinxLoggerAdapter(logging.getLogger(name), {}) +def convert_serializable(records): + """Convert LogRecord serializable.""" + for r in records: + # extract arguments to a message and clear them + r.msg = r.getMessage() + r.args = () + + class SphinxWarningLogRecord(logging.LogRecord): """Log record class supporting location""" location = None # type: Any @@ -113,6 +121,10 @@ class SphinxLoggerAdapter(logging.LoggerAdapter): return msg, kwargs + def handle(self, record): + # type: (logging.LogRecord) -> None + self.logger.handle(record) # type: ignore + class NewLineStreamHandlerPY2(logging.StreamHandler): """StreamHandler which switches line terminator by record.nonl flag.""" @@ -177,6 +189,11 @@ class MemoryHandler(logging.handlers.BufferingHandler): finally: self.release() + def clear(self): + # type: () -> List[logging.LogRecord] + buffer, self.buffer = self.buffer, [] + return buffer + @contextmanager def pending_logging(): @@ -192,7 +209,7 @@ def pending_logging(): handlers.append(handler) logger.addHandler(memhandler) - yield + yield memhandler finally: logger.removeHandler(memhandler) @@ -202,6 +219,18 @@ def pending_logging(): memhandler.flushTo(logger) +class LogCollector(object): + def __init__(self): + self.logs = [] # type: logging.LogRecord + + @contextmanager + def collect(self): + with pending_logging() as memhandler: + yield + + self.logs = memhandler.clear() + + class InfoFilter(logging.Filter): """Filter error and warning messages.""" diff --git a/sphinx/util/parallel.py b/sphinx/util/parallel.py index 814af09b1..a92dd9639 100644 --- a/sphinx/util/parallel.py +++ b/sphinx/util/parallel.py @@ -21,11 +21,15 @@ except ImportError: multiprocessing = None from sphinx.errors import SphinxParallelError +from sphinx.util import logging if False: # For type annotation from typing import Any, Callable, Sequence # NOQA +logger = logging.getLogger(__name__) + + # our parallel functionality only works for the forking Process parallel_available = multiprocessing and (os.name == 'posix') @@ -75,19 +79,24 @@ class ParallelTasks(object): def _process(self, pipe, func, arg): # type: (Any, Callable, Any) -> None try: - if arg is None: - ret = func() - else: - ret = func(arg) - pipe.send((False, ret)) + collector = logging.LogCollector() + with collector.collect(): + if arg is None: + ret = func() + else: + ret = func(arg) + failed = False except BaseException as err: - pipe.send((True, (err, traceback.format_exc()))) + failed = True + ret = (err, traceback.format_exc()) + logging.convert_serializable(collector.logs) + pipe.send((failed, collector.logs, ret)) def add_task(self, task_func, arg=None, result_func=None): # type: (Callable, Any, Callable) -> None tid = self._taskid self._taskid += 1 - self._result_funcs[tid] = result_func or (lambda arg: None) + self._result_funcs[tid] = result_func or (lambda arg, result: None) self._args[tid] = arg precv, psend = multiprocessing.Pipe(False) proc = multiprocessing.Process(target=self._process, @@ -105,9 +114,11 @@ class ParallelTasks(object): # type: () -> None for tid, pipe in iteritems(self._precvs): if pipe.poll(): - exc, result = pipe.recv() + exc, logs, result = pipe.recv() if exc: raise SphinxParallelError(*result) + for log in logs: + logger.handle(log) self._result_funcs.pop(tid)(self._args.pop(tid), result) self._procs[tid].join() self._pworking -= 1 diff --git a/tests/test_util_logging.py b/tests/test_util_logging.py index 7a7ce3b21..25816bf4e 100644 --- a/tests/test_util_logging.py +++ b/tests/test_util_logging.py @@ -16,6 +16,7 @@ from sphinx.errors import SphinxWarning from sphinx.util import logging from sphinx.util.console import colorize from sphinx.util.logging import is_suppressed_warning +from sphinx.util.parallel import ParallelTasks from util import with_app, raises, strip_escseq @@ -269,3 +270,19 @@ def test_colored_logs(app, status, warning): logger.info('message9', color='red') assert colorize('white', 'message8') in status.getvalue() assert colorize('red', 'message9') in status.getvalue() + + +@with_app() +def test_logging_in_ParallelTasks(app, status, warning): + logging.setup(app, status, warning) + logger = logging.getLogger(__name__) + + def child_process(): + logger.info('message1') + logger.warning('message2', location='index') + + tasks = ParallelTasks(1) + tasks.add_task(child_process) + tasks.join() + assert 'message1' in status.getvalue() + assert 'index.txt: WARNING: message2' in warning.getvalue() From ad871e5a48ecf1b20506a0b90d689f50e241111f Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Fri, 23 Dec 2016 10:41:38 +0900 Subject: [PATCH 064/190] Add sphinx.util.logging.pending_warnings() --- sphinx/builders/__init__.py | 4 ++-- sphinx/util/logging.py | 35 +++++++++++++++++++++++++++++++++-- tests/test_util_logging.py | 4 ++-- 3 files changed, 37 insertions(+), 6 deletions(-) diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index 9dc757cd4..6230502d1 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -287,7 +287,7 @@ class Builder(object): logger.info(bold('building [%s]' % self.name) + ': ' + summary) # while reading, collect all warnings from docutils - with logging.pending_logging(): + with logging.pending_warnings(): updated_docnames = set(self.env.update(self.config, self.srcdir, self.doctreedir, self.app)) @@ -386,7 +386,7 @@ class Builder(object): def _write_serial(self, docnames): # type: (Sequence[unicode]) -> None - with logging.pending_logging(): + with logging.pending_warnings(): for docname in self.app.status_iterator( docnames, 'writing output... ', darkgreen, len(docnames)): doctree = self.env.get_and_resolve_doctree(docname, self) diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index 7afaadf34..a473ffc97 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -126,6 +126,11 @@ class SphinxLoggerAdapter(logging.LoggerAdapter): self.logger.handle(record) # type: ignore +class WarningStreamHandler(logging.StreamHandler): + """StreamHandler for warnings.""" + pass + + class NewLineStreamHandlerPY2(logging.StreamHandler): """StreamHandler which switches line terminator by record.nonl flag.""" @@ -195,10 +200,36 @@ class MemoryHandler(logging.handlers.BufferingHandler): return buffer +@contextmanager +def pending_warnings(): + # type: () -> Generator + """contextmanager to pend logging warnings temporary.""" + logger = logging.getLogger() + memhandler = MemoryHandler() + memhandler.setLevel(logging.WARNING) + + try: + handlers = [] + for handler in logger.handlers[:]: + if isinstance(handler, WarningStreamHandler): + logger.removeHandler(handler) + handlers.append(handler) + + logger.addHandler(memhandler) + yield memhandler + finally: + logger.removeHandler(memhandler) + + for handler in handlers: + logger.addHandler(handler) + + memhandler.flushTo(logger) + + @contextmanager def pending_logging(): # type: () -> Generator - """contextmanager to pend logging temporary.""" + """contextmanager to pend logging all logs temporary.""" logger = logging.getLogger() memhandler = MemoryHandler() @@ -357,7 +388,7 @@ def setup(app, status, warning): info_handler.setLevel(VERBOSITY_MAP.get(app.verbosity)) info_handler.setFormatter(ColorizeFormatter()) - warning_handler = logging.StreamHandler(warning) + warning_handler = WarningStreamHandler(warning) warning_handler.addFilter(WarningSuppressor(app)) warning_handler.addFilter(WarningIsErrorFilter(app)) warning_handler.addFilter(WarningLogRecordTranslator(app)) diff --git a/tests/test_util_logging.py b/tests/test_util_logging.py index 25816bf4e..984b52220 100644 --- a/tests/test_util_logging.py +++ b/tests/test_util_logging.py @@ -225,12 +225,12 @@ def test_warn_node(app, status, warning): @with_app() -def test_pending_logging(app, status, warning): +def test_pending_warnings(app, status, warning): logging.setup(app, status, warning) logger = logging.getLogger(__name__) logger.warning('message1') - with logging.pending_logging(): + with logging.pending_warnings(): # not logged yet (bufferred) in here logger.warning('message2') logger.warning('message3') From aa65a194668bb32f9f2be8189a7302136c119826 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Fri, 23 Dec 2016 11:58:11 +0900 Subject: [PATCH 065/190] Add sphinx.util.logging.SafeEncodingWriter --- sphinx/util/logging.py | 23 +++++++++++++++++++++-- tests/test_application.py | 17 ----------------- tests/test_util_logging.py | 16 ++++++++++++++++ 3 files changed, 37 insertions(+), 19 deletions(-) diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index a473ffc97..97d1ae624 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -373,6 +373,25 @@ class ColorizeFormatter(logging.Formatter): return message +class SafeEncodingWriter(object): + """Stream writer which ignores UnicodeEncodeError silently""" + def __init__(self, stream): + self.stream = stream + self.encoding = getattr(stream, 'encoding', 'ascii') or 'ascii' + + def write(self, data): + try: + self.stream.write(data) + except UnicodeEncodeError: + # stream accept only str, not bytes. So, we encode and replace + # non-encodable characters, then decode them. + self.stream.write(data.encode(self.encoding, 'replace').decode(self.encoding)) + + def flush(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def setup(app, status, warning): # type: (Sphinx, IO, IO) -> None """Setup root logger for Sphinx""" @@ -383,12 +402,12 @@ def setup(app, status, warning): for handler in logger.handlers[:]: logger.removeHandler(handler) - info_handler = NewLineStreamHandler(status) + info_handler = NewLineStreamHandler(SafeEncodingWriter(status)) info_handler.addFilter(InfoFilter()) info_handler.setLevel(VERBOSITY_MAP.get(app.verbosity)) info_handler.setFormatter(ColorizeFormatter()) - warning_handler = WarningStreamHandler(warning) + warning_handler = WarningStreamHandler(SafeEncodingWriter(warning)) warning_handler.addFilter(WarningSuppressor(app)) warning_handler.addFilter(WarningIsErrorFilter(app)) warning_handler.addFilter(WarningLogRecordTranslator(app)) diff --git a/tests/test_application.py b/tests/test_application.py index 1580b8036..1f4a30d97 100644 --- a/tests/test_application.py +++ b/tests/test_application.py @@ -8,7 +8,6 @@ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ -import codecs from docutils import nodes @@ -68,22 +67,6 @@ def test_output(app, status, warning): assert app._warncount == old_count + 1 -@with_app() -def test_output_with_unencodable_char(app, status, warning): - - class StreamWriter(codecs.StreamWriter): - def write(self, object): - self.stream.write(object.encode('cp1252').decode('cp1252')) - - app._status = StreamWriter(status) - - # info with UnicodeEncodeError - status.truncate(0) - status.seek(0) - app.info(u"unicode \u206d...") - assert status.getvalue() == "unicode ?...\n" - - @with_app() def test_extensions(app, status, warning): app.setup_extension('shutil') diff --git a/tests/test_util_logging.py b/tests/test_util_logging.py index 984b52220..37c4a2d2a 100644 --- a/tests/test_util_logging.py +++ b/tests/test_util_logging.py @@ -10,6 +10,7 @@ """ from __future__ import print_function +import codecs from docutils import nodes from sphinx.errors import SphinxWarning @@ -286,3 +287,18 @@ def test_logging_in_ParallelTasks(app, status, warning): tasks.join() assert 'message1' in status.getvalue() assert 'index.txt: WARNING: message2' in warning.getvalue() + + +@with_app() +def test_output_with_unencodable_char(app, status, warning): + class StreamWriter(codecs.StreamWriter): + def write(self, object): + self.stream.write(object.encode('cp1252').decode('cp1252')) + + logging.setup(app, StreamWriter(status), warning) + + # info with UnicodeEncodeError + status.truncate(0) + status.seek(0) + app.info(u"unicode \u206d...") + assert status.getvalue() == "unicode ?...\n" From 25a078655b8cf5fd9a6d40ace7eb09848a3a5de8 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 25 Dec 2016 00:39:54 +0900 Subject: [PATCH 066/190] Refactor SphinxLoggerAdapter --- sphinx/util/docutils.py | 14 +------------- sphinx/util/logging.py | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/sphinx/util/docutils.py b/sphinx/util/docutils.py index 084579815..ad80ac7a7 100644 --- a/sphinx/util/docutils.py +++ b/sphinx/util/docutils.py @@ -124,25 +124,13 @@ class sphinx_domains(object): class WarningStream(object): - level_mapping = { - 'DEBUG': logger.debug, - 'INFO': logger.info, - 'WARNING': logger.warning, - 'ERROR': logger.error, - 'SEVERE': logger.critical, - } - def write(self, text): matched = report_re.search(text) if not matched: logger.warning(text.rstrip("\r\n")) else: location, type, level, message = matched.groups() - if type in self.level_mapping: - logger_method = self.level_mapping.get(type) - logger_method(message, location=location) - else: - logger.warning(text.rstrip("\r\n")) + logger.log(type, message, location=location) class LoggingReporter(Reporter): diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index 97d1ae624..2ce31a283 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -31,6 +31,18 @@ if False: VERBOSE = 15 DEBUG2 = 5 +LEVEL_NAMES = defaultdict(lambda: logging.WARNING) # type: Dict[str, int] +LEVEL_NAMES.update({ + 'CRITICAL': logging.CRITICAL, + 'SEVERE': logging.CRITICAL, + 'ERROR': logging.ERROR, + 'WARNING': logging.WARNING, + 'INFO': logging.INFO, + 'VERBOSE': VERBOSE, + 'DEBUG': logging.DEBUG, + 'DEBUG2': DEBUG2, +}) + VERBOSITY_MAP = defaultdict(lambda: 0) # type: Dict[int, int] VERBOSITY_MAP.update({ 0: logging.INFO, @@ -97,6 +109,14 @@ class SphinxLoggerAdapter(logging.LoggerAdapter): self.warning(message, **kwargs) + def log(self, level, msg, *args, **kwargs): + # type: (Union[int, str], unicode, Any, Any) -> None + if isinstance(level, int): + super(SphinxLoggerAdapter, self).log(level, msg, *args, **kwargs) + else: + levelno = LEVEL_NAMES.get(level) + super(SphinxLoggerAdapter, self).log(levelno, msg, *args, **kwargs) + def verbose(self, msg, *args, **kwargs): # type: (unicode, Any, Any) -> None self.log(VERBOSE, msg, *args, **kwargs) From 4ea25a4df36eaaf7f6084bc9138877d4d90e6cb6 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 25 Dec 2016 01:14:08 +0900 Subject: [PATCH 067/190] Emit warning on logger methods --- sphinx/application.py | 12 +++++++++++- sphinx/deprecation.py | 4 ++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/sphinx/application.py b/sphinx/application.py index 181ef1d95..acef54ab6 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -35,7 +35,7 @@ from sphinx.errors import SphinxError, ExtensionError, VersionRequirementError, ConfigError from sphinx.domains import ObjType from sphinx.domains.std import GenericObject, Target, StandardDomain -from sphinx.deprecation import RemovedInSphinx17Warning +from sphinx.deprecation import RemovedInSphinx17Warning, RemovedInSphinx20Warning from sphinx.environment import BuildEnvironment from sphinx.io import SphinxStandaloneReader from sphinx.roles import XRefRole @@ -399,6 +399,8 @@ class Sphinx(object): warnings.warn('colorfunc option of warn() is now deprecated.', RemovedInSphinx17Warning) + warnings.warn('app.warning() is now deprecated. Use sphinx.util.logging instead.', + RemovedInSphinx20Warning) logger.warning(message, type=type, subtype=subtype, location=location) def info(self, message='', nonl=False): @@ -408,21 +410,29 @@ class Sphinx(object): If *nonl* is true, don't emit a newline at the end (which implies that more info output will follow soon.) """ + warnings.warn('app.info() is now deprecated. Use sphinx.util.logging instead.', + RemovedInSphinx20Warning) logger.info(message, nonl=nonl) def verbose(self, message, *args, **kwargs): # type: (unicode, Any, Any) -> None """Emit a verbose informational message.""" + warnings.warn('app.verbose() is now deprecated. Use sphinx.util.logging instead.', + RemovedInSphinx20Warning) logger.verbose(message, *args, **kwargs) def debug(self, message, *args, **kwargs): # type: (unicode, Any, Any) -> None """Emit a debug-level informational message.""" + warnings.warn('app.debug() is now deprecated. Use sphinx.util.logging instead.', + RemovedInSphinx20Warning) logger.debug(message, *args, **kwargs) def debug2(self, message, *args, **kwargs): # type: (unicode, Any, Any) -> None """Emit a lowlevel debug-level informational message.""" + warnings.warn('app.debug2() is now deprecated. Use sphinx.util.logging instead.', + RemovedInSphinx20Warning) logger.debug2(message, *args, **kwargs) def _display_chunk(chunk): diff --git a/sphinx/deprecation.py b/sphinx/deprecation.py index a5d14762f..163992712 100644 --- a/sphinx/deprecation.py +++ b/sphinx/deprecation.py @@ -18,4 +18,8 @@ class RemovedInSphinx17Warning(PendingDeprecationWarning): pass +class RemovedInSphinx20Warning(PendingDeprecationWarning): + pass + + RemovedInNextVersionWarning = RemovedInSphinx16Warning From 6bf201533a372fee2503ea4b79858752ce41b612 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 25 Dec 2016 01:11:52 +0900 Subject: [PATCH 068/190] Fix style-check violation --- tests/test_highlighting.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_highlighting.py b/tests/test_highlighting.py index 46cf95d1a..5a6b749d0 100644 --- a/tests/test_highlighting.py +++ b/tests/test_highlighting.py @@ -111,4 +111,5 @@ def test_default_highlight(logger): ret = bridge.highlight_block('reST ``like`` text', 'python3') logger.warning.assert_called_with('Could not lex literal_block as "%s". ' 'Highlighting skipped.', 'python3', - type='misc', subtype='highlighting_failure', location=None) + type='misc', subtype='highlighting_failure', + location=None) From e755a8c004291faa27e511205d2c365e8461bf9f Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 25 Dec 2016 01:03:06 +0900 Subject: [PATCH 069/190] Use loggers --- sphinx/application.py | 8 +-- sphinx/builders/__init__.py | 43 ++++-------- sphinx/builders/applehelp.py | 14 ++-- sphinx/builders/changes.py | 2 +- sphinx/builders/epub.py | 28 ++++---- sphinx/builders/epub3.py | 10 +-- sphinx/builders/gettext.py | 2 +- sphinx/builders/html.py | 48 ++++++------- sphinx/builders/latex.py | 48 +++++++------ sphinx/builders/linkcheck.py | 6 +- sphinx/builders/manpage.py | 4 +- sphinx/builders/texinfo.py | 10 +-- sphinx/builders/text.py | 5 +- sphinx/builders/xml.py | 5 +- sphinx/config.py | 8 +-- sphinx/domains/cpp.py | 9 ++- sphinx/domains/python.py | 5 +- sphinx/domains/std.py | 32 ++++----- sphinx/environment/__init__.py | 75 +++++++++++---------- sphinx/environment/managers/indexentries.py | 10 +-- sphinx/environment/managers/toctree.py | 19 +++--- sphinx/ext/autosectionlabel.py | 7 +- sphinx/ext/autosummary/__init__.py | 8 +-- sphinx/ext/coverage.py | 33 +++++---- sphinx/ext/doctest.py | 13 ++-- sphinx/ext/graphviz.py | 15 +++-- sphinx/ext/imgmath.py | 21 +++--- sphinx/ext/intersphinx.py | 15 ++--- sphinx/ext/pngmath.py | 22 +++--- sphinx/ext/todo.py | 5 +- sphinx/ext/viewcode.py | 2 +- sphinx/transforms/__init__.py | 3 +- sphinx/transforms/i18n.py | 17 +++-- sphinx/util/logging.py | 6 +- sphinx/util/nodes.py | 20 +++--- sphinx/writers/html.py | 19 +++--- sphinx/writers/latex.py | 41 ++++++----- sphinx/writers/manpage.py | 9 ++- sphinx/writers/texinfo.py | 27 ++++---- sphinx/writers/text.py | 11 +-- tests/test_application.py | 20 ------ tests/test_environment.py | 24 ++----- tests/test_util_logging.py | 3 +- 43 files changed, 368 insertions(+), 364 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index acef54ab6..584ad109f 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -233,9 +233,9 @@ class Sphinx(object): if self.config.needs_extensions: for extname, needs_ver in self.config.needs_extensions.items(): if extname not in self._extensions: - self.warn('needs_extensions config value specifies a ' - 'version requirement for extension %s, but it is ' - 'not loaded' % extname) + logger.warning('needs_extensions config value specifies a ' + 'version requirement for extension %s, but it is ' + 'not loaded', extname) continue has_ver = self._extension_metadata[extname]['version'] if has_ver == 'unknown version' or needs_ver > has_ver: @@ -246,7 +246,7 @@ class Sphinx(object): # check primary_domain if requested if self.config.primary_domain and self.config.primary_domain not in self.domains: - self.warn('primary_domain %r not found, ignored.' % self.config.primary_domain) + logger.warning('primary_domain %r not found, ignored.', self.config.primary_domain) # set up translation infrastructure self._init_i18n() diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index 6230502d1..7448e2682 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -161,9 +161,8 @@ class Builder(object): if candidate: break else: - self.warn( - 'no matching candidate for image URI %r' % node['uri'], - '%s:%s' % (node.source, getattr(node, 'line', ''))) + logger.warn_node('no matching candidate for image URI %r' % node['uri'], + node) continue node['uri'] = candidate else: @@ -246,13 +245,13 @@ class Builder(object): for filename in filenames: filename = path.normpath(path.abspath(filename)) if not filename.startswith(self.srcdir): - self.warn('file %r given on command line is not under the ' - 'source directory, ignoring' % filename) + logger.warning('file %r given on command line is not under the ' + 'source directory, ignoring', filename) continue if not (path.isfile(filename) or any(path.isfile(filename + suffix) for suffix in suffixes)): - self.warn('file %r given on command line does not exist, ' - 'ignoring' % filename) + logger.warning('file %r given on command line does not exist, ' + 'ignoring', filename) continue filename = filename[dirlen:] for suffix in suffixes: @@ -297,7 +296,7 @@ class Builder(object): updated_docnames.add(docname) outdated = len(updated_docnames) - doccount if outdated: - logger.info('%d found' % outdated) + logger.info('%d found', outdated) else: logger.info('none found') @@ -330,8 +329,8 @@ class Builder(object): for extname, md in self.app._extension_metadata.items(): par_ok = md.get('parallel_write_safe', True) if not par_ok: - self.app.warn('the %s extension is not safe for parallel ' - 'writing, doing serial write' % extname) + logger.warning('the %s extension is not safe for parallel ' + 'writing, doing serial write', extname) self.parallel_ok = False break @@ -374,12 +373,10 @@ class Builder(object): self.prepare_writing(docnames) logger.info('done') - warnings = [] # type: List[Tuple[Tuple, Dict]] if self.parallel_ok: # number of subprocesses is parallel-1 because the main process # is busy loading doctrees and doing write_doc_serialized() - warnings = [] - self._write_parallel(sorted(docnames), warnings, + self._write_parallel(sorted(docnames), nproc=self.app.parallel - 1) else: self._write_serial(sorted(docnames)) @@ -393,21 +390,12 @@ class Builder(object): self.write_doc_serialized(docname, doctree) self.write_doc(docname, doctree) - def _write_parallel(self, docnames, warnings, nproc): - # type: (Iterable[unicode], List[Tuple[Tuple, Dict]], int) -> None + def _write_parallel(self, docnames, nproc): + # type: (Iterable[unicode], int) -> None def write_process(docs): - # type: (List[Tuple[unicode, nodes.Node]]) -> List[Tuple[Tuple, Dict]] - local_warnings = [] - - def warnfunc(*args, **kwargs): - local_warnings.append((args, kwargs)) - self.env.set_warnfunc(warnfunc) + # type: (List[Tuple[unicode, nodes.Node]]) -> None for docname, doctree in docs: self.write_doc(docname, doctree) - return local_warnings - - def add_warnings(docs, wlist): - warnings.extend(wlist) # warm up caches/compile templates using the first document firstname, docnames = docnames[0], docnames[1:] # type: ignore @@ -425,15 +413,12 @@ class Builder(object): doctree = self.env.get_and_resolve_doctree(docname, self) self.write_doc_serialized(docname, doctree) arg.append((docname, doctree)) - tasks.add_task(write_process, arg, add_warnings) + tasks.add_task(write_process, arg) # make sure all threads have finished logger.info(bold('waiting for workers...')) tasks.join() - for warning, kwargs in warnings: - self.warn(*warning, **kwargs) - def prepare_writing(self, docnames): # type: (Set[unicode]) -> None """A place where you can add logic before :meth:`write_doc` is run""" diff --git a/sphinx/builders/applehelp.py b/sphinx/builders/applehelp.py index b0c5c66b1..6337f96da 100644 --- a/sphinx/builders/applehelp.py +++ b/sphinx/builders/applehelp.py @@ -184,9 +184,8 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): logger.info('done') except Exception as err: - self.warn('cannot copy icon file %r: %s' % - (path.join(self.srcdir, self.config.applehelp_icon), - err)) + logger.warning('cannot copy icon file %r: %s', + path.join(self.srcdir, self.config.applehelp_icon), err) del info_plist['HPDBookIconPath'] # Build the access page @@ -223,8 +222,8 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): if self.config.applehelp_disable_external_tools: logger.info('skipping') - self.warn('you will need to index this help book with:\n %s' - % (' '.join([pipes.quote(arg) for arg in args]))) + logger.warning('you will need to index this help book with:\n %s', + ' '.join([pipes.quote(arg) for arg in args])) else: try: p = subprocess.Popen(args, @@ -256,9 +255,8 @@ class AppleHelpBuilder(StandaloneHTMLBuilder): if self.config.applehelp_disable_external_tools: logger.info('skipping') - - self.warn('you will need to sign this help book with:\n %s' - % (' '.join([pipes.quote(arg) for arg in args]))) + logger.warning('you will need to sign this help book with:\n %s', + ' '.join([pipes.quote(arg) for arg in args])) else: try: p = subprocess.Popen(args, diff --git a/sphinx/builders/changes.py b/sphinx/builders/changes.py index d1b908e3d..d6e118d97 100644 --- a/sphinx/builders/changes.py +++ b/sphinx/builders/changes.py @@ -136,7 +136,7 @@ class ChangesBuilder(Builder): try: lines = f.readlines() except UnicodeDecodeError: - self.warn('could not read %r for changelog creation' % docname) + logger.warning('could not read %r for changelog creation', docname) continue targetfn = path.join(self.outdir, 'rst', os_path(docname)) + '.html' ensuredir(path.dirname(targetfn)) diff --git a/sphinx/builders/epub.py b/sphinx/builders/epub.py index 97c736d9f..a48f94436 100644 --- a/sphinx/builders/epub.py +++ b/sphinx/builders/epub.py @@ -477,14 +477,14 @@ class EpubBuilder(StandaloneHTMLBuilder): img = Image.open(path.join(self.srcdir, src)) except IOError: if not self.is_vector_graphics(src): - self.warn('cannot read image file %r: copying it instead' % - (path.join(self.srcdir, src), )) + logger.warning('cannot read image file %r: copying it instead', + path.join(self.srcdir, src)) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, self.imagedir, dest)) except (IOError, OSError) as err: - self.warn('cannot copy image file %r: %s' % - (path.join(self.srcdir, src), err)) + logger.warning('cannot copy image file %r: %s', + path.join(self.srcdir, src), err) continue if self.config.epub_fix_images: if img.mode in ('P',): @@ -499,8 +499,8 @@ class EpubBuilder(StandaloneHTMLBuilder): try: img.save(path.join(self.outdir, self.imagedir, dest)) except (IOError, OSError) as err: - self.warn('cannot write image file %r: %s' % - (path.join(self.srcdir, src), err)) + logger.warning('cannot write image file %r: %s', + path.join(self.srcdir, src), err) def copy_image_files(self): # type: () -> None @@ -510,7 +510,7 @@ class EpubBuilder(StandaloneHTMLBuilder): if self.images: if self.config.epub_fix_images or self.config.epub_max_image_width: if not Image: - self.warn('PIL not found - copying image files') + logger.warning('PIL not found - copying image files') super(EpubBuilder, self).copy_image_files() else: self.copy_image_files_pil() @@ -551,14 +551,14 @@ class EpubBuilder(StandaloneHTMLBuilder): def build_mimetype(self, outdir, outname): # type: (unicode, unicode) -> None """Write the metainfo file mimetype.""" - logger.info('writing %s file...' % outname) + logger.info('writing %s file...', outname) with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f: # type: ignore f.write(self.mimetype_template) def build_container(self, outdir, outname): # type: (unicode, unicode) -> None """Write the metainfo file META-INF/cointainer.xml.""" - logger.info('writing %s file...' % outname) + logger.info('writing %s file...', outname) fn = path.join(outdir, outname) try: os.mkdir(path.dirname(fn)) @@ -593,7 +593,7 @@ class EpubBuilder(StandaloneHTMLBuilder): """Write the metainfo file content.opf It contains bibliographic data, a file list and the spine (the reading order). """ - logger.info('writing %s file...' % outname) + logger.info('writing %s file...', outname) # files if not outdir.endswith(os.sep): @@ -618,8 +618,8 @@ class EpubBuilder(StandaloneHTMLBuilder): # we always have JS and potentially OpenSearch files, don't # always warn about them if ext not in ('.js', '.xml'): - self.warn('unknown mimetype for %s, ignoring' % filename, - type='epub', subtype='unknown_project_files') + logger.warning('unknown mimetype for %s, ignoring', filename, + type='epub', subtype='unknown_project_files') continue filename = filename.replace(os.sep, '/') projectfiles.append(self.file_template % { @@ -804,7 +804,7 @@ class EpubBuilder(StandaloneHTMLBuilder): def build_toc(self, outdir, outname): # type: (unicode, unicode) -> None """Write the metainfo file toc.ncx.""" - logger.info('writing %s file...' % outname) + logger.info('writing %s file...', outname) if self.config.epub_tocscope == 'default': doctree = self.env.get_and_resolve_doctree(self.config.master_doc, @@ -828,7 +828,7 @@ class EpubBuilder(StandaloneHTMLBuilder): It is a zip file with the mimetype file stored uncompressed as the first entry. """ - logger.info('writing %s file...' % outname) + logger.info('writing %s file...', outname) projectfiles = ['META-INF/container.xml', 'content.opf', 'toc.ncx'] # type: List[unicode] # NOQA projectfiles.extend(self.files) epub = zipfile.ZipFile(path.join(outdir, outname), 'w', # type: ignore diff --git a/sphinx/builders/epub3.py b/sphinx/builders/epub3.py index 55434a499..ca4a44749 100644 --- a/sphinx/builders/epub3.py +++ b/sphinx/builders/epub3.py @@ -239,7 +239,7 @@ class Epub3Builder(EpubBuilder): def build_navigation_doc(self, outdir, outname): """Write the metainfo file nav.xhtml.""" - logger.info('writing %s file...' % outname) + logger.info('writing %s file...', outname) if self.config.epub_tocscope == 'default': doctree = self.env.get_and_resolve_doctree( @@ -262,16 +262,16 @@ class Epub3Builder(EpubBuilder): def validate_config_values(app): if app.config.epub3_description is not None: - app.warn('epub3_description is deprecated. Use epub_description instead.') + logger.warning('epub3_description is deprecated. Use epub_description instead.') app.config.epub_description = app.config.epub3_description if app.config.epub3_contributor is not None: - app.warn('epub3_contributor is deprecated. Use epub_contributor instead.') + logger.warning('epub3_contributor is deprecated. Use epub_contributor instead.') app.config.epub_contributor = app.config.epub3_contributor if app.config.epub3_page_progression_direction is not None: - app.warn('epub3_page_progression_direction option is deprecated' - ' from 1.5. Use epub_writing_mode instead.') + logger.warning('epub3_page_progression_direction option is deprecated' + ' from 1.5. Use epub_writing_mode instead.') def setup(app): diff --git a/sphinx/builders/gettext.py b/sphinx/builders/gettext.py index a38c9eca4..6993210f3 100644 --- a/sphinx/builders/gettext.py +++ b/sphinx/builders/gettext.py @@ -220,7 +220,7 @@ class MessageCatalogBuilder(I18nBuilder): # type: () -> None files = self._collect_templates() logger.info(bold('building [%s]: ' % self.name), nonl=1) - logger.info('targets for %d template files' % len(files)) + logger.info('targets for %d template files', len(files)) extract_translations = self.templates.environment.extract_translations diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index b4f4d7fd3..b979929b7 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -209,8 +209,8 @@ class StandaloneHTMLBuilder(Builder): if tag != 'tags': raise ValueError except ValueError: - self.warn('unsupported build info format in %r, building all' % - path.join(self.outdir, '.buildinfo')) + logger.warning('unsupported build info format in %r, building all', + path.join(self.outdir, '.buildinfo')) except Exception: pass if old_config_hash != self.config_hash or \ @@ -325,10 +325,10 @@ class StandaloneHTMLBuilder(Builder): favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if favicon and os.path.splitext(favicon)[1] != '.ico': - self.warn('html_favicon is not an .ico file') + logger.warning('html_favicon is not an .ico file') if not isinstance(self.config.html_use_opensearch, string_types): - self.warn('html_use_opensearch config value must now be a string') + logger.warning('html_use_opensearch config value must now be a string') self.relations = self.env.collect_relations() @@ -595,8 +595,8 @@ class StandaloneHTMLBuilder(Builder): copyfile(path.join(self.srcdir, src), path.join(self.outdir, self.imagedir, dest)) except Exception as err: - self.warn('cannot copy image file %r: %s' % - (path.join(self.srcdir, src), err)) + logger.warning('cannot copy image file %r: %s', + path.join(self.srcdir, src), err) def copy_download_files(self): # type: () -> None @@ -614,8 +614,8 @@ class StandaloneHTMLBuilder(Builder): copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_downloads', dest)) except Exception as err: - self.warn('cannot copy downloadable file %r: %s' % - (path.join(self.srcdir, src), err)) + logger.warning('cannot copy downloadable file %r: %s', + path.join(self.srcdir, src), err) def copy_static_files(self): # type: () -> None @@ -655,7 +655,7 @@ class StandaloneHTMLBuilder(Builder): for static_path in self.config.html_static_path: entry = path.join(self.confdir, static_path) if not path.exists(entry): - self.warn('html_static_path entry %r does not exist' % entry) + logger.warning('html_static_path entry %r does not exist', entry) continue copy_asset(entry, path.join(self.outdir, '_static'), excluded, context=ctx, renderer=self.templates) @@ -664,7 +664,7 @@ class StandaloneHTMLBuilder(Builder): logobase = path.basename(self.config.html_logo) logotarget = path.join(self.outdir, '_static', logobase) if not path.isfile(path.join(self.confdir, self.config.html_logo)): - self.warn('logo file %r does not exist' % self.config.html_logo) + logger.warning('logo file %r does not exist', self.config.html_logo) elif not path.isfile(logotarget): copyfile(path.join(self.confdir, self.config.html_logo), logotarget) @@ -672,7 +672,7 @@ class StandaloneHTMLBuilder(Builder): iconbase = path.basename(self.config.html_favicon) icontarget = path.join(self.outdir, '_static', iconbase) if not path.isfile(path.join(self.confdir, self.config.html_favicon)): - self.warn('favicon file %r does not exist' % self.config.html_favicon) + logger.warning('favicon file %r does not exist', self.config.html_favicon) elif not path.isfile(icontarget): copyfile(path.join(self.confdir, self.config.html_favicon), icontarget) @@ -687,7 +687,7 @@ class StandaloneHTMLBuilder(Builder): for extra_path in self.config.html_extra_path: entry = path.join(self.confdir, extra_path) if not path.exists(entry): - self.warn('html_extra_path entry %r does not exist' % entry) + logger.warning('html_extra_path entry %r does not exist', entry) continue copy_asset(entry, self.outdir, excluded) @@ -748,9 +748,9 @@ class StandaloneHTMLBuilder(Builder): self.indexer.load(f, self.indexer_format) # type: ignore except (IOError, OSError, ValueError): if keep: - self.warn('search index couldn\'t be loaded, but not all ' - 'documents will be built: the index will be ' - 'incomplete.') + logger.warning('search index couldn\'t be loaded, but not all ' + 'documents will be built: the index will be ' + 'incomplete.') # delete all entries for files that will be rebuilt self.indexer.prune(keep) @@ -789,9 +789,9 @@ class StandaloneHTMLBuilder(Builder): if has_wildcard(pattern): # warn if both patterns contain wildcards if has_wildcard(matched): - self.warn('page %s matches two patterns in ' - 'html_sidebars: %r and %r' % - (pagename, matched, pattern)) + logger.warning('page %s matches two patterns in ' + 'html_sidebars: %r and %r', + pagename, matched, pattern) # else the already matched pattern is more specific # than the present one, because it contains no wildcard continue @@ -863,9 +863,9 @@ class StandaloneHTMLBuilder(Builder): try: output = self.templates.render(templatename, ctx) except UnicodeError: - self.warn("a Unicode error occurred when rendering the page %s. " - "Please make sure all config values that contain " - "non-ASCII content are Unicode strings." % pagename) + logger.warning("a Unicode error occurred when rendering the page %s. " + "Please make sure all config values that contain " + "non-ASCII content are Unicode strings.", pagename) return if not outfilename: @@ -876,7 +876,7 @@ class StandaloneHTMLBuilder(Builder): with codecs.open(outfilename, 'w', encoding, 'xmlcharrefreplace') as f: # type: ignore # NOQA f.write(output) except (IOError, OSError) as err: - self.warn("error writing file %s: %s" % (outfilename, err)) + logger.warning("error writing file %s: %s", outfilename, err) if self.copysource and ctx.get('sourcename'): # copy the source file for the "show source" link source_name = path.join(self.outdir, '_sources', @@ -1270,8 +1270,8 @@ class JSONHTMLBuilder(SerializingHTMLBuilder): def validate_config_values(app): # type: (Sphinx) -> None if app.config.html_translator_class: - app.warn('html_translator_class is deprecated. ' - 'Use Sphinx.set_translator() API instead.') + logger.warning('html_translator_class is deprecated. ' + 'Use Sphinx.set_translator() API instead.') def setup(app): diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py index 15a127e7b..18cf0046f 100644 --- a/sphinx/builders/latex.py +++ b/sphinx/builders/latex.py @@ -76,16 +76,16 @@ class LaTeXBuilder(Builder): # type: () -> None preliminary_document_data = [list(x) for x in self.config.latex_documents] if not preliminary_document_data: - self.warn('no "latex_documents" config value found; no documents ' - 'will be written') + logger.warning('no "latex_documents" config value found; no documents ' + 'will be written') return # assign subdirs to titles self.titles = [] # type: List[Tuple[unicode, unicode]] for entry in preliminary_document_data: docname = entry[0] if docname not in self.env.all_docs: - self.warn('"latex_documents" config value references unknown ' - 'document %s' % docname) + logger.warning('"latex_documents" config value references unknown ' + 'document %s', docname) continue self.document_data.append(entry) # type: ignore if docname.endswith(SEP+'index'): @@ -241,50 +241,58 @@ class LaTeXBuilder(Builder): def validate_config_values(app): # type: (Sphinx) -> None if app.config.latex_toplevel_sectioning not in (None, 'part', 'chapter', 'section'): - app.warn('invalid latex_toplevel_sectioning, ignored: %s' % - app.config.latex_toplevel_sectioning) + logger.warning('invalid latex_toplevel_sectioning, ignored: %s' % + app.config.latex_toplevel_sectioning) app.config.latex_toplevel_sectioning = None # type: ignore if app.config.latex_use_parts: if app.config.latex_toplevel_sectioning: - app.warn('latex_use_parts conflicts with latex_toplevel_sectioning, ignored.') + logger.warning('latex_use_parts conflicts with ' + 'latex_toplevel_sectioning, ignored.') else: - app.warn('latex_use_parts is deprecated. Use latex_toplevel_sectioning instead.') + logger.warning('latex_use_parts is deprecated. ' + 'Use latex_toplevel_sectioning instead.') app.config.latex_toplevel_sectioning = 'part' # type: ignore if app.config.latex_use_modindex is not True: # changed by user - app.warn('latex_use_modindex is deprecated. Use latex_domain_indices instead.') + logger.warning('latex_use_modindex is deprecated. ' + 'Use latex_domain_indices instead.') if app.config.latex_preamble: if app.config.latex_elements.get('preamble'): - app.warn("latex_preamble conflicts with latex_elements['preamble'], ignored.") + logger.warning("latex_preamble conflicts with " + "latex_elements['preamble'], ignored.") else: - app.warn("latex_preamble is deprecated. Use latex_elements['preamble'] instead.") + logger.warning("latex_preamble is deprecated. " + "Use latex_elements['preamble'] instead.") app.config.latex_elements['preamble'] = app.config.latex_preamble if app.config.latex_paper_size != 'letter': if app.config.latex_elements.get('papersize'): - app.warn("latex_paper_size conflicts with latex_elements['papersize'], ignored.") + logger.warning("latex_paper_size conflicts with " + "latex_elements['papersize'], ignored.") else: - app.warn("latex_paper_size is deprecated. " - "Use latex_elements['papersize'] instead.") + logger.warning("latex_paper_size is deprecated. " + "Use latex_elements['papersize'] instead.") if app.config.latex_paper_size: app.config.latex_elements['papersize'] = app.config.latex_paper_size + 'paper' if app.config.latex_font_size != '10pt': if app.config.latex_elements.get('pointsize'): - app.warn("latex_font_size conflicts with latex_elements['pointsize'], ignored.") + logger.warning("latex_font_size conflicts with " + "latex_elements['pointsize'], ignored.") else: - app.warn("latex_font_size is deprecated. Use latex_elements['pointsize'] instead.") + logger.warning("latex_font_size is deprecated. " + "Use latex_elements['pointsize'] instead.") app.config.latex_elements['pointsize'] = app.config.latex_font_size if 'footer' in app.config.latex_elements: if 'postamble' in app.config.latex_elements: - app.warn("latex_elements['footer'] conflicts with " - "latex_elements['postamble'], ignored.") + logger.warning("latex_elements['footer'] conflicts with " + "latex_elements['postamble'], ignored.") else: - app.warn("latex_elements['footer'] is deprecated. " - "Use latex_elements['preamble'] instead.") + logger.warning("latex_elements['footer'] is deprecated. " + "Use latex_elements['preamble'] instead.") app.config.latex_elements['postamble'] = app.config.latex_elements['footer'] diff --git a/sphinx/builders/linkcheck.py b/sphinx/builders/linkcheck.py index 6a3c03e35..d9e5d5696 100644 --- a/sphinx/builders/linkcheck.py +++ b/sphinx/builders/linkcheck.py @@ -233,7 +233,7 @@ class CheckExternalLinksBuilder(Builder): if status == 'working' and info == 'old': return if lineno: - logger.info('(line %4d) ' % lineno, nonl=1) + logger.info('(line %4d) ', lineno, nonl=1) if status == 'ignored': if info: logger.info(darkgray('-ignored- ') + uri + ': ' + info) @@ -247,8 +247,8 @@ class CheckExternalLinksBuilder(Builder): elif status == 'broken': self.write_entry('broken', docname, lineno, uri + ': ' + info) if self.app.quiet or self.app.warningiserror: - self.warn('broken link: %s (%s)' % (uri, info), - '%s:%s' % (self.env.doc2path(docname), lineno)) + logger.warning('broken link: %s (%s)', uri, info, + location=(self.env.doc2path(docname), lineno)) else: logger.info(red('broken ') + uri + red(' - ' + info)) elif status == 'redirected': diff --git a/sphinx/builders/manpage.py b/sphinx/builders/manpage.py index cd8684c79..20034b3f8 100644 --- a/sphinx/builders/manpage.py +++ b/sphinx/builders/manpage.py @@ -45,8 +45,8 @@ class ManualPageBuilder(Builder): def init(self): # type: () -> None if not self.config.man_pages: - self.warn('no "man_pages" config value found; no manual pages ' - 'will be written') + logger.warning('no "man_pages" config value found; no manual pages ' + 'will be written') def get_outdated_docs(self): # type: () -> Union[unicode, List[unicode]] diff --git a/sphinx/builders/texinfo.py b/sphinx/builders/texinfo.py index e46320456..804fd9587 100644 --- a/sphinx/builders/texinfo.py +++ b/sphinx/builders/texinfo.py @@ -124,16 +124,16 @@ class TexinfoBuilder(Builder): # type: () -> None preliminary_document_data = [list(x) for x in self.config.texinfo_documents] if not preliminary_document_data: - self.warn('no "texinfo_documents" config value found; no documents ' - 'will be written') + logger.warning('no "texinfo_documents" config value found; no documents ' + 'will be written') return # assign subdirs to titles self.titles = [] # type: List[Tuple[unicode, unicode]] for entry in preliminary_document_data: docname = entry[0] if docname not in self.env.all_docs: - self.warn('"texinfo_documents" config value references unknown ' - 'document %s' % docname) + logger.warning('"texinfo_documents" config value references unknown ' + 'document %s', docname) continue self.document_data.append(entry) # type: ignore if docname.endswith(SEP+'index'): @@ -240,7 +240,7 @@ class TexinfoBuilder(Builder): with open(fn, 'w') as mkfile: mkfile.write(TEXINFO_MAKEFILE) except (IOError, OSError) as err: - self.warn("error writing file %s: %s" % (fn, err)) + logger.warning("error writing file %s: %s", fn, err) logger.info(' done') diff --git a/sphinx/builders/text.py b/sphinx/builders/text.py index d9616d6b5..7147baa5c 100644 --- a/sphinx/builders/text.py +++ b/sphinx/builders/text.py @@ -15,9 +15,12 @@ from os import path from docutils.io import StringOutput from sphinx.builders import Builder +from sphinx.util import logging from sphinx.util.osutil import ensuredir, os_path from sphinx.writers.text import TextWriter +logger = logging.getLogger(__name__) + class TextBuilder(Builder): name = 'text' @@ -65,7 +68,7 @@ class TextBuilder(Builder): with codecs.open(outfilename, 'w', 'utf-8') as f: f.write(self.writer.output) except (IOError, OSError) as err: - self.warn("error writing file %s: %s" % (outfilename, err)) + logger.warning("error writing file %s: %s", outfilename, err) def finish(self): pass diff --git a/sphinx/builders/xml.py b/sphinx/builders/xml.py index 73d9e72be..fc43b4c12 100644 --- a/sphinx/builders/xml.py +++ b/sphinx/builders/xml.py @@ -16,9 +16,12 @@ from docutils import nodes from docutils.io import StringOutput from sphinx.builders import Builder +from sphinx.util import logging from sphinx.util.osutil import ensuredir, os_path from sphinx.writers.xml import XMLWriter, PseudoXMLWriter +logger = logging.getLogger(__name__) + class XMLBuilder(Builder): """ @@ -80,7 +83,7 @@ class XMLBuilder(Builder): with codecs.open(outfilename, 'w', 'utf-8') as f: f.write(self.writer.output) except (IOError, OSError) as err: - self.warn("error writing file %s: %s" % (outfilename, err)) + logger.warning("error writing file %s: %s", outfilename, err) def finish(self): pass diff --git a/sphinx/config.py b/sphinx/config.py index 7c163e1e3..27a184dce 100644 --- a/sphinx/config.py +++ b/sphinx/config.py @@ -219,7 +219,7 @@ class Config(object): if isinstance(value, binary_type) and nonascii_re.search(value): # type: ignore logger.warning('the config value %r is set to a string with non-ASCII ' 'characters; this can lead to Unicode errors occurring. ' - 'Please use Unicode strings, e.g. %r.' % (name, u'Content')) + 'Please use Unicode strings, e.g. %r.', name, u'Content') def convert_overrides(self, name, value): # type: (unicode, Any) -> Any @@ -258,7 +258,7 @@ class Config(object): elif name in self._raw_config: self.__dict__[name] = self._raw_config[name] except ValueError as exc: - logger.warning("%s" % exc) + logger.warning("%s", exc) def init_values(self): # type: () -> None @@ -270,14 +270,14 @@ class Config(object): config.setdefault(realvalname, {})[key] = value # type: ignore continue elif valname not in self.values: - logger.warning('unknown config value %r in override, ignoring' % valname) + logger.warning('unknown config value %r in override, ignoring', valname) continue if isinstance(value, string_types): config[valname] = self.convert_overrides(valname, value) else: config[valname] = value except ValueError as exc: - logger.warning("%s" % exc) + logger.warning("%s", exc) for name in config: if name in self.values: self.__dict__[name] = config[name] diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py index 1ced8aba3..52d520a05 100644 --- a/sphinx/domains/cpp.py +++ b/sphinx/domains/cpp.py @@ -22,6 +22,7 @@ from sphinx.roles import XRefRole from sphinx.locale import l_, _ from sphinx.domains import Domain, ObjType from sphinx.directives import ObjectDescription +from sphinx.util import logging from sphinx.util.nodes import make_refnode from sphinx.util.pycompat import UnicodeMixin from sphinx.util.docfields import Field, GroupedField @@ -34,6 +35,8 @@ if False: from sphinx.config import Config # NOQA from sphinx.environment import BuildEnvironment # NOQA +logger = logging.getLogger(__name__) + """ Important note on ids ---------------------------------------------------------------------------- @@ -3060,7 +3063,7 @@ class Symbol(object): msg = "Duplicate declaration, also defined in '%s'.\n" msg += "Declaration is '%s'." msg = msg % (ourChild.docname, name) - env.warn(otherChild.docname, msg) + logger.warning(msg, location=otherChild.docname) else: # Both have declarations, and in the same docname. # This can apparently happen, it should be safe to @@ -4872,7 +4875,7 @@ class CPPDomain(Domain): msg = "Duplicate declaration, also defined in '%s'.\n" msg += "Name of declaration is '%s'." msg = msg % (ourNames[name], name) - self.env.warn(docname, msg) + logger.warning(msg, docname) else: ourNames[name] = docname @@ -4882,7 +4885,7 @@ class CPPDomain(Domain): class Warner(object): def warn(self, msg): if emitWarnings: - env.warn_node(msg, node) + logger.warn_node(msg, node) warner = Warner() parser = DefinitionParser(target, warner, env.config) try: diff --git a/sphinx/domains/python.py b/sphinx/domains/python.py index 562c0be3d..1da874ea2 100644 --- a/sphinx/domains/python.py +++ b/sphinx/domains/python.py @@ -21,6 +21,7 @@ from sphinx.roles import XRefRole from sphinx.locale import l_, _ from sphinx.domains import Domain, ObjType, Index from sphinx.directives import ObjectDescription +from sphinx.util import logging from sphinx.util.nodes import make_refnode from sphinx.util.docfields import Field, GroupedField, TypedField @@ -31,6 +32,8 @@ if False: from sphinx.builders import Builder # NOQA from sphinx.environment import BuildEnvironment # NOQA +logger = logging.getLogger(__name__) + # REs for Python signatures py_sig_re = re.compile( @@ -784,7 +787,7 @@ class PythonDomain(Domain): if not matches: return None elif len(matches) > 1: - env.warn_node( + logger.warn_node( 'more than one target found for cross-reference ' '%r: %s' % (target, ', '.join(match[0] for match in matches)), node) diff --git a/sphinx/domains/std.py b/sphinx/domains/std.py index 359690239..eb355ecf2 100644 --- a/sphinx/domains/std.py +++ b/sphinx/domains/std.py @@ -23,7 +23,7 @@ from sphinx.roles import XRefRole from sphinx.locale import l_, _ from sphinx.domains import Domain, ObjType from sphinx.directives import ObjectDescription -from sphinx.util import ws_re +from sphinx.util import ws_re, logging from sphinx.util.nodes import clean_astext, make_refnode if False: @@ -41,6 +41,8 @@ if False: RoleFunction = Callable[[unicode, unicode, unicode, int, Inliner, Dict, List[unicode]], Tuple[List[nodes.Node], List[nodes.Node]]] +logger = logging.getLogger(__name__) + # RE for option descriptions option_desc_re = re.compile(r'((?:/|--|-|\+)?[-\.?@#_a-zA-Z0-9]+)(=?\s*.*)') @@ -163,12 +165,10 @@ class Cmdoption(ObjectDescription): potential_option = potential_option.strip() m = option_desc_re.match(potential_option) # type: ignore if not m: - self.env.warn( - self.env.docname, - 'Malformed option description %r, should ' - 'look like "opt", "-opt args", "--opt args", ' - '"/opt args" or "+opt args"' % potential_option, - self.lineno) + logger.warning('Malformed option description %r, should ' + 'look like "opt", "-opt args", "--opt args", ' + '"/opt args" or "+opt args"', potential_option, + location=(self.env.docname, self.lineno)) continue optname, args = m.groups() if count: @@ -573,8 +573,8 @@ class StandardDomain(Domain): label = node[0].astext() if label in self.data['citations']: path = env.doc2path(self.data['citations'][label][0]) - env.warn_node('duplicate citation %s, other instance in %s' % - (label, path), node) + logger.warn_node('duplicate citation %s, other instance in %s' % + (label, path), node) self.data['citations'][label] = (docname, node['ids'][0]) def note_labels(self, env, docname, document): @@ -596,8 +596,8 @@ class StandardDomain(Domain): # link and object descriptions continue if name in labels: - env.warn_node('duplicate label %s, ' % name + 'other instance ' - 'in ' + env.doc2path(labels[name][0]), node) + logger.warn_node('duplicate label %s, ' % name + 'other instance ' + 'in ' + env.doc2path(labels[name][0]), node) anonlabels[name] = docname, labelid if node.tagname == 'section': sectname = clean_astext(node[0]) # node[0] == title node @@ -688,7 +688,7 @@ class StandardDomain(Domain): return None if env.config.numfig is False: - env.warn_node('numfig is disabled. :numref: is ignored.', node) + logger.warn_node('numfig is disabled. :numref: is ignored.', node) return contnode target_node = env.get_doctree(docname).ids.get(labelid) @@ -701,7 +701,7 @@ class StandardDomain(Domain): if fignumber is None: return contnode except ValueError: - env.warn_node("no number is assigned for %s: %s" % (figtype, labelid), node) + logger.warn_node("no number is assigned for %s: %s" % (figtype, labelid), node) return contnode try: @@ -711,7 +711,7 @@ class StandardDomain(Domain): title = env.config.numfig_format.get(figtype, '') if figname is None and '%{name}' in title: - env.warn_node('the link has no caption: %s' % title, node) + logger.warn_node('the link has no caption: %s' % title, node) return contnode else: fignum = '.'.join(map(str, fignumber)) @@ -725,10 +725,10 @@ class StandardDomain(Domain): # old style format (cf. "Fig.%s") newtitle = title % fignum except KeyError as exc: - env.warn_node('invalid numfig_format: %s (%r)' % (title, exc), node) + logger.warn_node('invalid numfig_format: %s (%r)' % (title, exc), node) return contnode except TypeError: - env.warn_node('invalid numfig_format: %s' % title, node) + logger.warn_node('invalid numfig_format: %s' % title, node) return contnode return self.build_reference_node(fromdocname, builder, diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index 663442503..4b6e9bf31 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -447,7 +447,7 @@ class BuildEnvironment(object): if os.access(self.doc2path(docname), os.R_OK): self.found_docs.add(docname) else: - self.warn(docname, "document not readable. Ignored.") + logger.warning("document not readable. Ignored.", location=docname) # Current implementation is applying translated messages in the reading # phase.Therefore, in order to apply the updated message catalog, it is @@ -594,14 +594,14 @@ class BuildEnvironment(object): if ext_ok: continue if ext_ok is None: - app.warn('the %s extension does not declare if it ' - 'is safe for parallel reading, assuming it ' - 'isn\'t - please ask the extension author to ' - 'check and make it explicit' % extname) - app.warn('doing serial read') + logger.warning('the %s extension does not declare if it ' + 'is safe for parallel reading, assuming it ' + 'isn\'t - please ask the extension author to ' + 'check and make it explicit', extname) + logger.warning('doing serial read') else: - app.warn('the %s extension is not safe for parallel ' - 'reading, doing serial read' % extname) + logger.warning('the %s extension is not safe for parallel ' + 'reading, doing serial read', extname) par_ok = False break if par_ok: @@ -690,11 +690,11 @@ class BuildEnvironment(object): if lineend == -1: lineend = len(error.object) lineno = error.object.count(b'\n', 0, error.start) + 1 - self.warn(self.docname, 'undecodable source characters, ' - 'replacing with "?": %r' % - (error.object[linestart+1:error.start] + b'>>>' + - error.object[error.start:error.end] + b'<<<' + - error.object[error.end:lineend]), lineno) + logger.warning('undecodable source characters, replacing with "?": %r', + (error.object[linestart+1:error.start] + b'>>>' + + error.object[error.start:error.end] + b'<<<' + + error.object[error.end:lineend]), + location=(self.docname, lineno)) return (u'?', error.end) def read_doc(self, docname, app=None): @@ -724,8 +724,8 @@ class BuildEnvironment(object): if role_fn: roles._roles[''] = role_fn else: - self.warn(docname, 'default role %s not found' % - self.config.default_role) + logger.warning('default role %s not found', self.config.default_role, + location=docname) codecs.register_error('sphinx', self.warn_and_replace) # type: ignore @@ -816,16 +816,18 @@ class BuildEnvironment(object): def currmodule(self): # type () -> None """Backwards compatible alias. Will be removed.""" - self.warn(self.docname, 'env.currmodule is being referenced by an ' - 'extension; this API will be removed in the future') + logger.warning('env.currmodule is being referenced by an ' + 'extension; this API will be removed in the future', + location=self.docname) return self.ref_context.get('py:module') @property def currclass(self): # type: () -> None """Backwards compatible alias. Will be removed.""" - self.warn(self.docname, 'env.currclass is being referenced by an ' - 'extension; this API will be removed in the future') + logger.warning('env.currclass is being referenced by an ' + 'extension; this API will be removed in the future', + location=self.docname) return self.ref_context.get('py:class') def new_serialno(self, category=''): @@ -900,8 +902,8 @@ class BuildEnvironment(object): rel_filename, filename = self.relfn2path(targetname, docname) self.dependencies[docname].add(rel_filename) if not os.access(filename, os.R_OK): - self.warn_node('download file not readable: %s' % filename, - node) + logger.warn_node('download file not readable: %s' % filename, + node) continue uniquename = self.dlfiles.add_file(docname, filename) node['filename'] = uniquename @@ -919,8 +921,8 @@ class BuildEnvironment(object): if mimetype not in candidates: globbed.setdefault(mimetype, []).append(new_imgpath) except (OSError, IOError) as err: - self.warn_node('image file %s not readable: %s' % - (filename, err), node) + logger.warn_node('image file %s not readable: %s' % + (filename, err), node) for key, files in iteritems(globbed): candidates[key] = sorted(files, key=len)[0] # select by similarity @@ -932,13 +934,13 @@ class BuildEnvironment(object): node['candidates'] = candidates = {} imguri = node['uri'] if imguri.startswith('data:'): - self.warn_node('image data URI found. some builders might not support', node, - type='image', subtype='data_uri') + logger.warn_node('image data URI found. some builders might not support', node, + type='image', subtype='data_uri') candidates['?'] = imguri continue elif imguri.find('://') != -1: - self.warn_node('nonlocal image URI found: %s' % imguri, node, - type='image', subtype='nonlocal_uri') + logger.warn_node('nonlocal image URI found: %s' % imguri, node, + type='image', subtype='nonlocal_uri') candidates['?'] = imguri continue rel_imgpath, full_imgpath = self.relfn2path(imguri, docname) @@ -967,8 +969,8 @@ class BuildEnvironment(object): for imgpath in itervalues(candidates): self.dependencies[docname].add(imgpath) if not os.access(path.join(self.srcdir, imgpath), os.R_OK): - self.warn_node('image file not readable: %s' % imgpath, - node) + logger.warn_node('image file not readable: %s' % imgpath, + node) continue self.images.add_file(docname, imgpath) @@ -1152,7 +1154,7 @@ class BuildEnvironment(object): node.replace_self(newnode or contnode) # remove only-nodes that do not belong to our builder - process_only_nodes(doctree, builder.tags, warn_node=self.warn_node) + process_only_nodes(doctree, builder.tags) # allow custom references to be resolved builder.app.emit('doctree-resolved', doctree, fromdocname) @@ -1181,7 +1183,7 @@ class BuildEnvironment(object): (node['refdomain'], typ) else: msg = '%r reference target not found: %%(target)s' % typ - self.warn_node(msg % {'target': target}, node, type='ref', subtype=typ) + logger.warn_node(msg % {'target': target}, node, type='ref', subtype=typ) def _resolve_doc_reference(self, builder, refdoc, node, contnode): # type: (Builder, unicode, nodes.Node, nodes.Node) -> nodes.Node @@ -1232,9 +1234,9 @@ class BuildEnvironment(object): return None if len(results) > 1: nice_results = ' or '.join(':%s:' % r[0] for r in results) - self.warn_node('more than one target found for \'any\' cross-' - 'reference %r: could be %s' % (target, nice_results), - node) + logger.warn_node('more than one target found for \'any\' cross-' + 'reference %r: could be %s' % (target, nice_results), + node) res_role, newnode = results[0] # Override "any" class with the actual role type to get the styling # approximately correct. @@ -1255,7 +1257,7 @@ class BuildEnvironment(object): def traverse_toctree(parent, docname): if parent == docname: - self.warn(docname, 'self referenced toctree found. Ignored.') + logger.warning('self referenced toctree found. Ignored.', location=docname) return # traverse toctree by pre-order @@ -1295,4 +1297,5 @@ class BuildEnvironment(object): continue if 'orphan' in self.metadata[docname]: continue - self.warn(docname, 'document isn\'t included in any toctree') + logger.warning('document isn\'t included in any toctree', + location=docname) diff --git a/sphinx/environment/managers/indexentries.py b/sphinx/environment/managers/indexentries.py index 11e5ccbb0..43e3b4c83 100644 --- a/sphinx/environment/managers/indexentries.py +++ b/sphinx/environment/managers/indexentries.py @@ -16,7 +16,7 @@ from itertools import groupby from six import text_type from sphinx import addnodes -from sphinx.util import iteritems, split_index_msg, split_into +from sphinx.util import iteritems, split_index_msg, split_into, logging from sphinx.locale import _ from sphinx.environment.managers import EnvironmentManager @@ -27,6 +27,8 @@ if False: from sphinx.builders import Builder # NOQA from sphinx.environment import BuildEnvironment # NOQA +logger = logging.getLogger(__name__) + class IndexEntries(EnvironmentManager): name = 'indices' @@ -53,7 +55,7 @@ class IndexEntries(EnvironmentManager): for entry in node['entries']: split_index_msg(entry[0], entry[1]) except ValueError as exc: - self.env.warn_node(exc, node) + logger.warn_node(str(exc), node) node.parent.remove(node) else: for entry in node['entries']: @@ -119,9 +121,9 @@ class IndexEntries(EnvironmentManager): add_entry(first, _('see also %s') % second, None, link=False, key=index_key) else: - self.env.warn(fn, 'unknown index entry type %r' % type) + logger.warning('unknown index entry type %r', type, location=fn) except ValueError as err: - self.env.warn(fn, str(err)) + logger.warning(str(err), location=fn) # sort the index entries; put all symbols at the front, even those # following the letters in ASCII, this is where the chr(127) comes from diff --git a/sphinx/environment/managers/toctree.py b/sphinx/environment/managers/toctree.py index 6d92d9b12..64937b7fa 100644 --- a/sphinx/environment/managers/toctree.py +++ b/sphinx/environment/managers/toctree.py @@ -14,7 +14,7 @@ from six import iteritems from docutils import nodes from sphinx import addnodes -from sphinx.util import url_re +from sphinx.util import url_re, logging from sphinx.util.nodes import clean_astext, process_only_nodes from sphinx.transforms import SphinxContentsFilter from sphinx.environment.managers import EnvironmentManager @@ -25,6 +25,8 @@ if False: from sphinx.builders import Builder # NOQA from sphinx.environment import BuildEnvironment # NOQA +logger = logging.getLogger(__name__) + class Toctree(EnvironmentManager): name = 'toctree' @@ -169,7 +171,7 @@ class Toctree(EnvironmentManager): # the document does not exist anymore: return a dummy node that # renders to nothing return nodes.paragraph() - process_only_nodes(toc, builder.tags, warn_node=self.env.warn_node) + process_only_nodes(toc, builder.tags) for node in toc.traverse(nodes.reference): node['refuri'] = node['anchorname'] or '#' return toc @@ -296,16 +298,17 @@ class Toctree(EnvironmentManager): toc = nodes.bullet_list('', item) else: if ref in parents: - self.env.warn(ref, 'circular toctree references ' - 'detected, ignoring: %s <- %s' % - (ref, ' <- '.join(parents))) + logger.warning('circular toctree references ' + 'detected, ignoring: %s <- %s', + ref, ' <- '.join(parents), + location=ref) continue refdoc = ref toc = self.tocs[ref].deepcopy() maxdepth = self.env.metadata[ref].get('tocdepth', 0) if ref not in toctree_ancestors or (prune and maxdepth > 0): self._toctree_prune(toc, 2, maxdepth, collapse) - process_only_nodes(toc, builder.tags, warn_node=self.env.warn_node) + process_only_nodes(toc, builder.tags) if title and toc.children and len(toc.children) == 1: child = toc.children[0] for refnode in child.traverse(nodes.reference): @@ -314,13 +317,13 @@ class Toctree(EnvironmentManager): refnode.children = [nodes.Text(title)] if not toc.children: # empty toc means: no titles will show up in the toctree - self.env.warn_node( + logger.warn_node( 'toctree contains reference to document %r that ' 'doesn\'t have a title: no link will be generated' % ref, toctreenode) except KeyError: # this is raised if the included file does not exist - self.env.warn_node( + logger.warn_node( 'toctree contains reference to nonexisting document %r' % ref, toctreenode) else: diff --git a/sphinx/ext/autosectionlabel.py b/sphinx/ext/autosectionlabel.py index be769eb85..78cb3c042 100644 --- a/sphinx/ext/autosectionlabel.py +++ b/sphinx/ext/autosectionlabel.py @@ -10,8 +10,11 @@ """ from docutils import nodes +from sphinx.util import logging from sphinx.util.nodes import clean_astext +logger = logging.getLogger(__name__) + def register_sections_as_label(app, document): labels = app.env.domaindata['std']['labels'] @@ -23,8 +26,8 @@ def register_sections_as_label(app, document): sectname = clean_astext(node[0]) if name in labels: - app.env.warn_node('duplicate label %s, ' % name + 'other instance ' - 'in ' + app.env.doc2path(labels[name][0]), node) + logger.warn_node('duplicate label %s, ' % name + 'other instance ' + 'in ' + app.env.doc2path(labels[name][0]), node) anonlabels[name] = docname, labelid labels[name] = docname, labelid, sectname diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py index cbe7c08f0..1a373ef46 100644 --- a/sphinx/ext/autosummary/__init__.py +++ b/sphinx/ext/autosummary/__init__.py @@ -608,13 +608,13 @@ def process_generate_options(app): suffix = get_rst_suffix(app) if suffix is None: - app.warn('autosummary generats .rst files internally. ' - 'But your source_suffix does not contain .rst. Skipped.') + logging.warning('autosummary generats .rst files internally. ' + 'But your source_suffix does not contain .rst. Skipped.') return generate_autosummary_docs(genfiles, builder=app.builder, - warn=app.warn, info=app.info, suffix=suffix, - base_path=app.srcdir) + warn=logger.warning, info=logger.info, + suffix=suffix, base_path=app.srcdir) def setup(app): diff --git a/sphinx/ext/coverage.py b/sphinx/ext/coverage.py index 11e017f69..1698f936b 100644 --- a/sphinx/ext/coverage.py +++ b/sphinx/ext/coverage.py @@ -20,6 +20,7 @@ from six.moves import cPickle as pickle import sphinx from sphinx.builders import Builder +from sphinx.util import logging from sphinx.util.inspect import safe_getattr if False: @@ -27,6 +28,8 @@ if False: from typing import Any, Callable, IO, Pattern, Tuple # NOQA from sphinx.application import Sphinx # NOQA +logger = logging.getLogger(__name__) + # utility def write_header(f, text, char='-'): @@ -35,14 +38,14 @@ def write_header(f, text, char='-'): f.write(char * len(text) + '\n') -def compile_regex_list(name, exps, warnfunc): - # type: (unicode, unicode, Callable) -> List[Pattern] +def compile_regex_list(name, exps): + # type: (unicode, unicode) -> List[Pattern] lst = [] for exp in exps: try: lst.append(re.compile(exp)) except Exception: - warnfunc('invalid regex %r in %s' % (exp, name)) + logger.warning('invalid regex %r in %s', exp, name) return lst @@ -62,21 +65,18 @@ class CoverageBuilder(Builder): try: self.c_regexes.append((name, re.compile(exp))) except Exception: - self.warn('invalid regex %r in coverage_c_regexes' % exp) + logger.warning('invalid regex %r in coverage_c_regexes', exp) self.c_ignorexps = {} # type: Dict[unicode, List[Pattern]] for (name, exps) in iteritems(self.config.coverage_ignore_c_items): - self.c_ignorexps[name] = compile_regex_list( - 'coverage_ignore_c_items', exps, self.warn) - self.mod_ignorexps = compile_regex_list( - 'coverage_ignore_modules', self.config.coverage_ignore_modules, - self.warn) - self.cls_ignorexps = compile_regex_list( - 'coverage_ignore_classes', self.config.coverage_ignore_classes, - self.warn) - self.fun_ignorexps = compile_regex_list( - 'coverage_ignore_functions', self.config.coverage_ignore_functions, - self.warn) + self.c_ignorexps[name] = compile_regex_list('coverage_ignore_c_items', + exps) + self.mod_ignorexps = compile_regex_list('coverage_ignore_modules', + self.config.coverage_ignore_modules) + self.cls_ignorexps = compile_regex_list('coverage_ignore_classes', + self.config.coverage_ignore_classes) + self.fun_ignorexps = compile_regex_list('coverage_ignore_functions', + self.config.coverage_ignore_functions) def get_outdated_docs(self): # type: () -> unicode @@ -147,8 +147,7 @@ class CoverageBuilder(Builder): try: mod = __import__(mod_name, fromlist=['foo']) except ImportError as err: - self.warn('module %s could not be imported: %s' % - (mod_name, err)) + logger.warning('module %s could not be imported: %s', mod_name, err) self.py_undoc[mod_name] = {'error': err} continue diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index d8311c3b5..dafe2863e 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -270,7 +270,7 @@ Results of doctest builder run on %s def _warn_out(self, text): # type: (unicode) -> None if self.app.quiet or self.app.warningiserror: - self.warn(text) + logger.warning(text) else: logger.info(text, nonl=True) if isinstance(text, binary_type): @@ -347,9 +347,9 @@ Doctest summary for node in doctree.traverse(condition): source = 'test' in node and node['test'] or node.astext() if not source: - self.warn('no code/output in %s block at %s:%s' % - (node.get('testnodetype', 'doctest'), - self.env.doc2path(docname), node.line)) + logger.warning('no code/output in %s block at %s:%s', + node.get('testnodetype', 'doctest'), + self.env.doc2path(docname), node.line) code = TestCode(source, type=node.get('testnodetype', 'doctest'), lineno=node.line, options=node.get('options')) node_groups = node.get('groups', ['default']) @@ -442,9 +442,8 @@ Doctest summary doctest_encode(code[0].code, self.env.config.source_encoding), {}, # type: ignore # NOQA group.name, filename_str, code[0].lineno) except Exception: - self.warn('ignoring invalid doctest code: %r' % - code[0].code, - '%s:%s' % (filename, code[0].lineno)) + logger.warning('ignoring invalid doctest code: %r', code[0].code, + location=(filename, code[0].lineno)) continue if not test.examples: continue diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py index a87d7ca58..19da59cf7 100644 --- a/sphinx/ext/graphviz.py +++ b/sphinx/ext/graphviz.py @@ -26,6 +26,7 @@ from docutils.statemachine import ViewList import sphinx from sphinx.errors import SphinxError from sphinx.locale import _ +from sphinx.util import logging from sphinx.util.i18n import search_image_for_language from sphinx.util.osutil import ensuredir, ENOENT, EPIPE, EINVAL @@ -34,6 +35,8 @@ if False: from typing import Any, Tuple # NOQA from sphinx.application import Sphinx # NOQA +logger = logging.getLogger(__name__) + mapname_re = re.compile(r'<map id="(.*?)"') @@ -204,8 +207,8 @@ def render_dot(self, code, options, format, prefix='graphviz'): except OSError as err: if err.errno != ENOENT: # No such file or directory raise - self.builder.warn('dot command %r cannot be run (needed for graphviz ' - 'output), check the graphviz_dot setting' % graphviz_dot) + logger.warning('dot command %r cannot be run (needed for graphviz ' + 'output), check the graphviz_dot setting', graphviz_dot) if not hasattr(self.builder, '_graphviz_warned_dot'): self.builder._graphviz_warned_dot = {} self.builder._graphviz_warned_dot[graphviz_dot] = True @@ -236,7 +239,7 @@ def warn_for_deprecated_option(self, node): return if 'inline' in node: - self.builder.warn(':inline: option for graphviz is deprecated since version 1.4.0.') + logger.warning(':inline: option for graphviz is deprecated since version 1.4.0.') self.builder._graphviz_warned_inline = True @@ -250,7 +253,7 @@ def render_dot_html(self, node, code, options, prefix='graphviz', "'svg', but is %r" % format) fname, outfn = render_dot(self, code, options, format, prefix) except GraphvizError as exc: - self.builder.warn('dot code %r: ' % code + str(exc)) + logger.warning('dot code %r: ' % code + str(exc)) raise nodes.SkipNode if fname is None: @@ -296,7 +299,7 @@ def render_dot_latex(self, node, code, options, prefix='graphviz'): try: fname, outfn = render_dot(self, code, options, 'pdf', prefix) except GraphvizError as exc: - self.builder.warn('dot code %r: ' % code + str(exc)) + logger.warning('dot code %r: ' % code + str(exc)) raise nodes.SkipNode is_inline = self.is_inline(node) @@ -333,7 +336,7 @@ def render_dot_texinfo(self, node, code, options, prefix='graphviz'): try: fname, outfn = render_dot(self, code, options, 'png', prefix) except GraphvizError as exc: - self.builder.warn('dot code %r: ' % code + str(exc)) + logger.warning('dot code %r: ' % code + str(exc)) raise nodes.SkipNode if fname is not None: self.body.append('@image{%s,,,[graphviz],png}\n' % fname[:-4]) diff --git a/sphinx/ext/imgmath.py b/sphinx/ext/imgmath.py index 11d41d426..e02353452 100644 --- a/sphinx/ext/imgmath.py +++ b/sphinx/ext/imgmath.py @@ -25,6 +25,7 @@ from docutils import nodes import sphinx from sphinx.locale import _ from sphinx.errors import SphinxError, ExtensionError +from sphinx.util import logging from sphinx.util.png import read_png_depth, write_png_depth from sphinx.util.osutil import ensuredir, ENOENT, cd from sphinx.util.pycompat import sys_encoding @@ -36,6 +37,8 @@ if False: from sphinx.application import Sphinx # NOQA from sphinx.ext.mathbase import math as math_node, displaymath # NOQA +logger = logging.getLogger(__name__) + class MathExtError(SphinxError): category = 'Math extension error' @@ -142,9 +145,9 @@ def render_math(self, math): except OSError as err: if err.errno != ENOENT: # No such file or directory raise - self.builder.warn('LaTeX command %r cannot be run (needed for math ' - 'display), check the imgmath_latex setting' % - self.builder.config.imgmath_latex) + logger.warning('LaTeX command %r cannot be run (needed for math ' + 'display), check the imgmath_latex setting', + self.builder.config.imgmath_latex) self.builder._imgmath_warned_latex = True return None, None @@ -183,10 +186,10 @@ def render_math(self, math): except OSError as err: if err.errno != ENOENT: # No such file or directory raise - self.builder.warn('%s command %r cannot be run (needed for math ' - 'display), check the imgmath_%s setting' % - (image_translator, image_translator_executable, - image_translator)) + logger.warning('%s command %r cannot be run (needed for math ' + 'display), check the imgmath_%s setting', + image_translator, image_translator_executable, + image_translator) self.builder._imgmath_warned_image_translator = True return None, None @@ -234,7 +237,7 @@ def html_visit_math(self, node): sm = nodes.system_message(msg, type='WARNING', level=2, backrefs=[], source=node['latex']) sm.walkabout(self) - self.builder.warn('display latex %r: ' % node['latex'] + msg) + logger.warning('display latex %r: ' % node['latex'] + msg) raise nodes.SkipNode if fname is None: # something failed -- use text-only as a bad substitute @@ -262,7 +265,7 @@ def html_visit_displaymath(self, node): sm = nodes.system_message(msg, type='WARNING', level=2, backrefs=[], source=node['latex']) sm.walkabout(self) - self.builder.warn('inline latex %r: ' % node['latex'] + msg) + logger.warning('inline latex %r: ' % node['latex'] + msg) raise nodes.SkipNode self.body.append(self.starttag(node, 'div', CLASS='math')) self.body.append('<p>') diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py index 41f00e5a1..6ba27aedf 100644 --- a/sphinx/ext/intersphinx.py +++ b/sphinx/ext/intersphinx.py @@ -229,14 +229,14 @@ def fetch_inventory(app, uri, inv): else: f = open(path.join(app.srcdir, inv), 'rb') except Exception as err: - app.warn('intersphinx inventory %r not fetchable due to ' - '%s: %s' % (inv, err.__class__, err)) + logger.warning('intersphinx inventory %r not fetchable due to %s: %s', + inv, err.__class__, err) return try: if hasattr(f, 'url'): newinv = f.url # type: ignore if inv != newinv: - logger.info('intersphinx inventory has moved: %s -> %s' % (inv, newinv)) + logger.info('intersphinx inventory has moved: %s -> %s', inv, newinv) if uri in (inv, path.dirname(inv), path.dirname(inv) + '/'): uri = path.dirname(newinv) @@ -247,8 +247,8 @@ def fetch_inventory(app, uri, inv): except ValueError: raise ValueError('unknown or unsupported inventory version') except Exception as err: - app.warn('intersphinx inventory %r not readable due to ' - '%s: %s' % (inv, err.__class__.__name__, err)) + logger.warning('intersphinx inventory %r not readable due to %s: %s', + inv, err.__class__.__name__, err) else: return invdata @@ -274,7 +274,7 @@ def load_mappings(app): # new format name, (uri, inv) = key, value if not isinstance(name, string_types): - app.warn('intersphinx identifier %r is not string. Ignored' % name) + logger.warning('intersphinx identifier %r is not string. Ignored', name) continue else: # old format, no name @@ -295,8 +295,7 @@ def load_mappings(app): if '://' not in inv or uri not in cache \ or cache[uri][1] < cache_time: safe_inv_url = _get_safe_url(inv) # type: ignore - logger.info( - 'loading intersphinx inventory from %s...' % safe_inv_url) + logger.info('loading intersphinx inventory from %s...', safe_inv_url) invdata = fetch_inventory(app, uri, inv) if invdata: cache[uri] = (name, now, invdata) diff --git a/sphinx/ext/pngmath.py b/sphinx/ext/pngmath.py index 49c81b233..b1408129f 100644 --- a/sphinx/ext/pngmath.py +++ b/sphinx/ext/pngmath.py @@ -25,6 +25,7 @@ from docutils import nodes import sphinx from sphinx.errors import SphinxError, ExtensionError +from sphinx.util import logging from sphinx.util.png import read_png_depth, write_png_depth from sphinx.util.osutil import ensuredir, ENOENT, cd from sphinx.util.pycompat import sys_encoding @@ -36,6 +37,8 @@ if False: from sphinx.application import Sphinx # NOQA from sphinx.ext.mathbase import math as math_node, displaymath # NOQA +logger = logging.getLogger(__name__) + class MathExtError(SphinxError): category = 'Math extension error' @@ -133,9 +136,9 @@ def render_math(self, math): except OSError as err: if err.errno != ENOENT: # No such file or directory raise - self.builder.warn('LaTeX command %r cannot be run (needed for math ' - 'display), check the pngmath_latex setting' % - self.builder.config.pngmath_latex) + logger.warning('LaTeX command %r cannot be run (needed for math ' + 'display), check the pngmath_latex setting', + self.builder.config.pngmath_latex) self.builder._mathpng_warned_latex = True return None, None @@ -158,9 +161,9 @@ def render_math(self, math): except OSError as err: if err.errno != ENOENT: # No such file or directory raise - self.builder.warn('dvipng command %r cannot be run (needed for math ' - 'display), check the pngmath_dvipng setting' % - self.builder.config.pngmath_dvipng) + logger.warning('dvipng command %r cannot be run (needed for math ' + 'display), check the pngmath_dvipng setting', + self.builder.config.pngmath_dvipng) self.builder._mathpng_warned_dvipng = True return None, None stdout, stderr = p.communicate() @@ -206,7 +209,7 @@ def html_visit_math(self, node): sm = nodes.system_message(msg, type='WARNING', level=2, backrefs=[], source=node['latex']) sm.walkabout(self) - self.builder.warn('display latex %r: ' % node['latex'] + msg) + logger.warning('display latex %r: ' % node['latex'] + msg) raise nodes.SkipNode if fname is None: # something failed -- use text-only as a bad substitute @@ -234,7 +237,7 @@ def html_visit_displaymath(self, node): sm = nodes.system_message(msg, type='WARNING', level=2, backrefs=[], source=node['latex']) sm.walkabout(self) - self.builder.warn('inline latex %r: ' % node['latex'] + msg) + logger.warning('inline latex %r: ' % node['latex'] + msg) raise nodes.SkipNode self.body.append(self.starttag(node, 'div', CLASS='math')) self.body.append('<p>') @@ -252,7 +255,8 @@ def html_visit_displaymath(self, node): def setup(app): # type: (Sphinx) -> Dict[unicode, Any] - app.warn('sphinx.ext.pngmath has been deprecated. Please use sphinx.ext.imgmath instead.') + logger.warning('sphinx.ext.pngmath has been deprecated. ' + 'Please use sphinx.ext.imgmath instead.') try: mathbase_setup(app, (html_visit_math, None), (html_visit_displaymath, None)) except ExtensionError: diff --git a/sphinx/ext/todo.py b/sphinx/ext/todo.py index 5db878ad5..e8d8201df 100644 --- a/sphinx/ext/todo.py +++ b/sphinx/ext/todo.py @@ -18,6 +18,7 @@ from docutils.parsers.rst import directives import sphinx from sphinx.locale import _ from sphinx.environment import NoUri +from sphinx.util import logging from sphinx.util.nodes import set_source_info from docutils.parsers.rst import Directive from docutils.parsers.rst.directives.admonitions import BaseAdmonition @@ -28,6 +29,8 @@ if False: from sphinx.application import Sphinx # NOQA from sphinx.environment import BuildEnvironment # NOQA +logger = logging.getLogger(__name__) + class todo_node(nodes.Admonition, nodes.Element): pass @@ -97,7 +100,7 @@ def process_todos(app, doctree): }) if env.config.todo_emit_warnings: - env.warn_node("TODO entry found: %s" % node[1].astext(), node) + logger.warn_node("TODO entry found: %s" % node[1].astext(), node) class TodoList(Directive): diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py index a4f93c144..95aabd6c2 100644 --- a/sphinx/ext/viewcode.py +++ b/sphinx/ext/viewcode.py @@ -39,7 +39,7 @@ def _get_full_modname(app, modname, attribute): except AttributeError: # sphinx.ext.viewcode can't follow class instance attribute # then AttributeError logging output only verbose mode. - logger.verbose('Didn\'t find %s in %s' % (attribute, modname)) + logger.verbose('Didn\'t find %s in %s', attribute, modname) return None except Exception as e: # sphinx.ext.viewcode follow python domain directives. diff --git a/sphinx/transforms/__init__.py b/sphinx/transforms/__init__.py index 8d469eabe..fad0d0038 100644 --- a/sphinx/transforms/__init__.py +++ b/sphinx/transforms/__init__.py @@ -177,12 +177,11 @@ class AutoIndexUpgrader(Transform): def apply(self): # type: () -> None - env = self.document.settings.env for node in self.document.traverse(addnodes.index): if 'entries' in node and any(len(entry) == 4 for entry in node['entries']): msg = ('4 column based index found. ' 'It might be a bug of extensions you use: %r' % node['entries']) - env.warn_node(msg, node) + logger.warn_node(msg, node) for i, entry in enumerate(node['entries']): if len(entry) == 4: node['entries'][i] = entry + (None,) diff --git a/sphinx/transforms/i18n.py b/sphinx/transforms/i18n.py index 693ae663e..addd617d4 100644 --- a/sphinx/transforms/i18n.py +++ b/sphinx/transforms/i18n.py @@ -17,7 +17,7 @@ from docutils.utils import relative_path from docutils.transforms import Transform from sphinx import addnodes -from sphinx.util import split_index_msg +from sphinx.util import split_index_msg, logging from sphinx.util.i18n import find_catalog from sphinx.util.nodes import ( LITERAL_TYPE_NODES, IMAGE_TYPE_NODES, @@ -33,6 +33,8 @@ if False: from sphinx.application import Sphinx # NOQA from sphinx.config import Config # NOQA +logger = logging.getLogger(__name__) + def publish_msgstr(app, source, source_path, source_line, config, settings): # type: (Sphinx, unicode, unicode, int, Config, Dict) -> nodes.document @@ -272,8 +274,8 @@ class Locale(Transform): old_foot_refs = node.traverse(is_autonumber_footnote_ref) new_foot_refs = patch.traverse(is_autonumber_footnote_ref) if len(old_foot_refs) != len(new_foot_refs): - env.warn_node('inconsistent footnote references in ' - 'translated message', node) + logger.warn_node('inconsistent footnote references in ' + 'translated message', node) old_foot_namerefs = {} # type: Dict[unicode, List[nodes.footnote_reference]] for r in old_foot_refs: old_foot_namerefs.setdefault(r.get('refname'), []).append(r) @@ -307,8 +309,7 @@ class Locale(Transform): old_refs = node.traverse(is_refnamed_ref) new_refs = patch.traverse(is_refnamed_ref) if len(old_refs) != len(new_refs): - env.warn_node('inconsistent references in ' - 'translated message', node) + logger.warn_node('inconsistent references in translated message', node) old_ref_names = [r['refname'] for r in old_refs] new_ref_names = [r['refname'] for r in new_refs] orphans = list(set(old_ref_names) - set(new_ref_names)) @@ -336,8 +337,7 @@ class Locale(Transform): new_refs = patch.traverse(is_refnamed_footnote_ref) refname_ids_map = {} if len(old_refs) != len(new_refs): - env.warn_node('inconsistent references in ' - 'translated message', node) + logger.warn_node('inconsistent references in translated message', node) for old in old_refs: refname_ids_map[old["refname"]] = old["ids"] for new in new_refs: @@ -352,8 +352,7 @@ class Locale(Transform): new_refs = patch.traverse(addnodes.pending_xref) xref_reftarget_map = {} if len(old_refs) != len(new_refs): - env.warn_node('inconsistent term references in ' - 'translated message', node) + logger.warn_node('inconsistent term references in translated message', node) def get_ref_key(node): # type: (nodes.Node) -> Tuple[unicode, unicode, unicode] diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index 2ce31a283..95b14be9a 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -23,7 +23,7 @@ from sphinx.util.console import colorize if False: # For type annotation - from typing import Any, Generator, IO, Tuple # NOQA + from typing import Any, Generator, IO, Tuple, Union # NOQA from docutils import nodes # NOQA from sphinx.application import Sphinx # NOQA @@ -422,12 +422,12 @@ def setup(app, status, warning): for handler in logger.handlers[:]: logger.removeHandler(handler) - info_handler = NewLineStreamHandler(SafeEncodingWriter(status)) + info_handler = NewLineStreamHandler(SafeEncodingWriter(status)) # type: ignore info_handler.addFilter(InfoFilter()) info_handler.setLevel(VERBOSITY_MAP.get(app.verbosity)) info_handler.setFormatter(ColorizeFormatter()) - warning_handler = WarningStreamHandler(SafeEncodingWriter(warning)) + warning_handler = WarningStreamHandler(SafeEncodingWriter(warning)) # type: ignore warning_handler.addFilter(WarningSuppressor(app)) warning_handler.addFilter(WarningIsErrorFilter(app)) warning_handler.addFilter(WarningLogRecordTranslator(app)) diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py index 8a58570bb..a5aa58444 100644 --- a/sphinx/util/nodes.py +++ b/sphinx/util/nodes.py @@ -18,6 +18,7 @@ from docutils import nodes from sphinx import addnodes from sphinx.locale import pairindextypes +from sphinx.util import logging if False: # For type annotation @@ -25,6 +26,8 @@ if False: from sphinx.builders import Builder # NOQA from sphinx.utils.tags import Tags # NOQA +logger = logging.getLogger(__name__) + class WarningStream(object): @@ -304,15 +307,14 @@ def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc, traversed if includefile not in traversed: try: traversed.append(includefile) - builder.info(colorfunc(includefile) + " ", nonl=1) + logger.info(colorfunc(includefile) + " ", nonl=1) subtree = inline_all_toctrees(builder, docnameset, includefile, builder.env.get_doctree(includefile), colorfunc, traversed) docnameset.add(includefile) except Exception: - builder.warn('toctree contains ref to nonexisting ' - 'file %r' % includefile, - builder.env.doc2path(docname)) + logger.warning('toctree contains ref to nonexisting file %r', + includefile, location=docname) else: sof = addnodes.start_of_file(docname=includefile) sof.children = subtree.children @@ -350,8 +352,8 @@ def set_role_source_info(inliner, lineno, node): node.source, node.line = inliner.reporter.get_source_and_line(lineno) -def process_only_nodes(doctree, tags, warn_node=None): - # type: (nodes.Node, Tags, Callable) -> None +def process_only_nodes(doctree, tags): + # type: (nodes.Node, Tags) -> None # A comment on the comment() nodes being inserted: replacing by [] would # result in a "Losing ids" exception if there is a target node before # the only node, so we make sure docutils can transfer the id to @@ -360,10 +362,8 @@ def process_only_nodes(doctree, tags, warn_node=None): try: ret = tags.eval_condition(node['expr']) except Exception as err: - if warn_node is None: - raise err - warn_node('exception while evaluating only ' - 'directive expression: %s' % err, node) + logger.warn_node('exception while evaluating only ' + 'directive expression: %s' % err, node) node.replace_self(node.children or nodes.comment()) else: if ret: diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py index b2d6587e5..0daaffd82 100644 --- a/sphinx/writers/html.py +++ b/sphinx/writers/html.py @@ -22,9 +22,12 @@ from docutils.writers.html4css1 import Writer, HTMLTranslator as BaseTranslator from sphinx import addnodes from sphinx.deprecation import RemovedInSphinx16Warning from sphinx.locale import admonitionlabels, _ +from sphinx.util import logging from sphinx.util.images import get_image_size from sphinx.util.smartypants import sphinx_smarty_pants +logger = logging.getLogger(__name__) + # A good overview of the purpose behind these classes can be found here: # http://www.arnebrodowski.de/blog/write-your-own-restructuredtext-writer.html @@ -289,7 +292,7 @@ class HTMLTranslator(BaseTranslator): prefix = self.builder.config.numfig_format.get(figtype) if prefix is None: msg = 'numfig_format is not defined for %s' % figtype - self.builder.warn(msg) + logger.warning(msg) else: numbers = self.builder.fignumbers[key][figure_id] self.body.append(prefix % '.'.join(map(str, numbers)) + ' ') @@ -299,7 +302,7 @@ class HTMLTranslator(BaseTranslator): if figtype: if len(node['ids']) == 0: msg = 'Any IDs not assigned for %s node' % node.tagname - self.builder.env.warn_node(msg, node) + logger.warn_node(msg, node) else: append_fignumber(figtype, node['ids'][0]) @@ -522,8 +525,8 @@ class HTMLTranslator(BaseTranslator): if not ('width' in node and 'height' in node): size = get_image_size(os.path.join(self.builder.srcdir, olduri)) if size is None: - self.builder.env.warn_node('Could not obtain image size. ' - ':scale: option is ignored.', node) + logger.warn_node('Could not obtain image size. ' + ':scale: option is ignored.', node) else: if 'width' not in node: node['width'] = str(size[0]) @@ -755,10 +758,10 @@ class HTMLTranslator(BaseTranslator): self.body.append(self.starttag(node, 'tr', '', CLASS='field')) def visit_math(self, node, math_env=''): - self.builder.warn('using "math" markup without a Sphinx math extension ' - 'active, please use one of the math extensions ' - 'described at http://sphinx-doc.org/ext/math.html', - (self.builder.current_docname, node.line)) + logger.warning('using "math" markup without a Sphinx math extension ' + 'active, please use one of the math extensions ' + 'described at http://sphinx-doc.org/ext/math.html', + location=(self.builder.current_docname, node.line)) raise nodes.SkipNode def unknown_visit(self, node): diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index ab5ce9307..75335fd97 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -26,7 +26,7 @@ from sphinx import highlighting from sphinx.errors import SphinxError from sphinx.deprecation import RemovedInSphinx16Warning from sphinx.locale import admonitionlabels, _ -from sphinx.util import split_into +from sphinx.util import split_into, logging from sphinx.util.i18n import format_date from sphinx.util.nodes import clean_astext, traverse_parent from sphinx.util.template import LaTeXRenderer @@ -38,6 +38,7 @@ if False: from typing import Any, Callable, Iterator, Pattern, Tuple, Union # NOQA from sphinx.builder import Builder # NOQA +logger = logging.getLogger(__name__) BEGIN_DOC = r''' \begin{document} @@ -438,8 +439,8 @@ class LaTeXTranslator(nodes.NodeVisitor): if builder.config.language and not self.babel.is_supported_language(): # emit warning if specified language is invalid # (only emitting, nothing changed to processing) - self.builder.warn('no Babel option known for language %r' % - builder.config.language) + logger.warning('no Babel option known for language %r', + builder.config.language) # simply use babel.get_language() always, as get_language() returns # 'english' even if language is invalid or empty @@ -490,7 +491,7 @@ class LaTeXTranslator(nodes.NodeVisitor): tocdepth = document['tocdepth'] + self.top_sectionlevel - 2 maxdepth = len(self.sectionnames) - self.top_sectionlevel if tocdepth > maxdepth: - self.builder.warn('too large :maxdepth:, ignored.') + logger.warning('too large :maxdepth:, ignored.') tocdepth = maxdepth self.elements['tocdepth'] = '\\setcounter{tocdepth}{%d}' % tocdepth @@ -566,7 +567,7 @@ class LaTeXTranslator(nodes.NodeVisitor): for key in self.builder.config.latex_elements: if key not in self.elements: msg = _("Unknown configure key: latex_elements[%r] is ignored.") - self.builder.warn(msg % key) + logger.warning(msg % key) def restrict_footnote(self, node): # type: (nodes.Node) -> None @@ -891,8 +892,8 @@ class LaTeXTranslator(nodes.NodeVisitor): if self.this_is_the_title: if len(node.children) != 1 and not isinstance(node.children[0], nodes.Text): - self.builder.warn('document title is not a single Text node', - (self.curfilestack[-1], node.line)) + logger.warning('document title is not a single Text node', + location=(self.curfilestack[-1], node.line)) if not self.elements['title']: # text needs to be escaped since it is inserted into # the output literally @@ -930,10 +931,9 @@ class LaTeXTranslator(nodes.NodeVisitor): # Redirect body output until title is finished. self.pushbody([]) else: - self.builder.warn( - 'encountered title node not in section, topic, table, ' - 'admonition or sidebar', - (self.curfilestack[-1], node.line or '')) + logger.warning('encountered title node not in section, topic, table, ' + 'admonition or sidebar', + location=(self.curfilestack[-1], node.line or '')) self.body.append('\\sphinxstyleothertitle{') self.context.append('}\n') self.in_title = 1 @@ -1573,7 +1573,7 @@ class LaTeXTranslator(nodes.NodeVisitor): try: return rstdim_to_latexdim(width_str) except ValueError: - self.builder.warn('dimension unit %s is invalid. Ignored.' % width_str) + logger.warning('dimension unit %s is invalid. Ignored.', width_str) def is_inline(self, node): # type: (nodes.Node) -> bool @@ -1886,10 +1886,9 @@ class LaTeXTranslator(nodes.NodeVisitor): p1, p2 = [self.encode(x) for x in split_into(2, 'seealso', string)] self.body.append(r'\index{%s|see{%s}}' % (p1, p2)) else: - self.builder.warn( - 'unknown index entry type %s found' % type) + logger.warning('unknown index entry type %s found', type) except ValueError as err: - self.builder.warn(str(err)) + logger.warning(str(err)) raise nodes.SkipNode def visit_raw(self, node): @@ -1953,8 +1952,8 @@ class LaTeXTranslator(nodes.NodeVisitor): else: self.context.append('}}}') else: - self.builder.warn('unusable reference target found: %s' % uri, - (self.curfilestack[-1], node.line)) + logger.warning('unusable reference target found: %s', uri, + location=(self.curfilestack[-1], node.line)) self.context.append('') def depart_reference(self, node): @@ -2459,10 +2458,10 @@ class LaTeXTranslator(nodes.NodeVisitor): def visit_math(self, node): # type: (nodes.Node) -> None - self.builder.warn('using "math" markup without a Sphinx math extension ' - 'active, please use one of the math extensions ' - 'described at http://sphinx-doc.org/ext/math.html', - (self.curfilestack[-1], node.line)) + logger.warning('using "math" markup without a Sphinx math extension ' + 'active, please use one of the math extensions ' + 'described at http://sphinx-doc.org/ext/math.html', + location=(self.curfilestack[-1], node.line)) raise nodes.SkipNode visit_math_block = visit_math diff --git a/sphinx/writers/manpage.py b/sphinx/writers/manpage.py index 249256576..715d1b395 100644 --- a/sphinx/writers/manpage.py +++ b/sphinx/writers/manpage.py @@ -21,9 +21,12 @@ from docutils.writers.manpage import ( from sphinx import addnodes from sphinx.deprecation import RemovedInSphinx16Warning from sphinx.locale import admonitionlabels, _ +from sphinx.util import logging import sphinx.util.docutils from sphinx.util.i18n import format_date +logger = logging.getLogger(__name__) + class ManualPageWriter(Writer): def __init__(self, builder): @@ -437,9 +440,9 @@ class ManualPageTranslator(BaseTranslator): pass def visit_math(self, node): - self.builder.warn('using "math" markup without a Sphinx math extension ' - 'active, please use one of the math extensions ' - 'described at http://sphinx-doc.org/ext/math.html') + logger.warning('using "math" markup without a Sphinx math extension ' + 'active, please use one of the math extensions ' + 'described at http://sphinx-doc.org/ext/math.html') raise nodes.SkipNode visit_math_block = visit_math diff --git a/sphinx/writers/texinfo.py b/sphinx/writers/texinfo.py index edaa57d0f..e9543b1b2 100644 --- a/sphinx/writers/texinfo.py +++ b/sphinx/writers/texinfo.py @@ -23,6 +23,7 @@ from sphinx import addnodes, __display_version__ from sphinx.errors import ExtensionError from sphinx.deprecation import RemovedInSphinx16Warning from sphinx.locale import admonitionlabels, _ +from sphinx.util import logging from sphinx.util.i18n import format_date from sphinx.writers.latex import collected_footnote @@ -31,6 +32,8 @@ if False: from typing import Any, Callable, Iterator, Pattern, Tuple, Union # NOQA from sphinx.builders.texinfo import TexinfoBuilder # NOQA +logger = logging.getLogger(__name__) + COPYING = """\ @quotation @@ -651,9 +654,9 @@ class TexinfoTranslator(nodes.NodeVisitor): if isinstance(parent, (nodes.Admonition, nodes.sidebar, nodes.topic)): raise nodes.SkipNode elif not isinstance(parent, nodes.section): - self.builder.warn( - 'encountered title node not in section, topic, table, ' - 'admonition or sidebar', (self.curfilestack[-1], node.line)) + logger.warning('encountered title node not in section, topic, table, ' + 'admonition or sidebar', + location=(self.curfilestack[-1], node.line)) self.visit_rubric(node) else: try: @@ -1331,8 +1334,8 @@ class TexinfoTranslator(nodes.NodeVisitor): node.parent.get('literal_block'))): self.body.append('\n@caption{') else: - self.builder.warn('caption not inside a figure.', - (self.curfilestack[-1], node.line)) + logger.warning('caption not inside a figure.', + location=(self.curfilestack[-1], node.line)) def depart_caption(self, node): # type: (nodes.Node) -> None @@ -1434,13 +1437,13 @@ class TexinfoTranslator(nodes.NodeVisitor): def unimplemented_visit(self, node): # type: (nodes.Node) -> None - self.builder.warn("unimplemented node type: %r" % node, - (self.curfilestack[-1], node.line)) + logger.warning("unimplemented node type: %r", node, + location=(self.curfilestack[-1], node.line)) def unknown_visit(self, node): # type: (nodes.Node) -> None - self.builder.warn("unknown node type: %r" % node, - (self.curfilestack[-1], node.line)) + logger.warning("unknown node type: %r", node, + location=(self.curfilestack[-1], node.line)) def unknown_departure(self, node): # type: (nodes.Node) -> None @@ -1756,9 +1759,9 @@ class TexinfoTranslator(nodes.NodeVisitor): def visit_math(self, node): # type: (nodes.Node) -> None - self.builder.warn('using "math" markup without a Sphinx math extension ' - 'active, please use one of the math extensions ' - 'described at http://sphinx-doc.org/ext/math.html') + logger.warning('using "math" markup without a Sphinx math extension ' + 'active, please use one of the math extensions ' + 'described at http://sphinx-doc.org/ext/math.html') raise nodes.SkipNode visit_math_block = visit_math diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py index 21d41bea3..7ec69d3ae 100644 --- a/sphinx/writers/text.py +++ b/sphinx/writers/text.py @@ -22,12 +22,15 @@ from docutils.utils import column_width from sphinx import addnodes from sphinx.deprecation import RemovedInSphinx16Warning from sphinx.locale import admonitionlabels, _ +from sphinx.util import logging if False: # For type annotation from typing import Any, Callable, Tuple, Union # NOQA from sphinx.builders.text import TextBuilder # NOQA +logger = logging.getLogger(__name__) + class TextWrapper(textwrap.TextWrapper): """Custom subclass that uses a different word separator regex.""" @@ -1174,10 +1177,10 @@ class TextTranslator(nodes.NodeVisitor): def visit_math(self, node): # type: (nodes.Node) -> None - self.builder.warn('using "math" markup without a Sphinx math extension ' - 'active, please use one of the math extensions ' - 'described at http://sphinx-doc.org/ext/math.html', - (self.builder.current_docname, node.line)) + logger.warning('using "math" markup without a Sphinx math extension ' + 'active, please use one of the math extensions ' + 'described at http://sphinx-doc.org/ext/math.html', + location=(self.builder.current_docname, node.line)) raise nodes.SkipNode visit_math_block = visit_math diff --git a/tests/test_application.py b/tests/test_application.py index 1f4a30d97..2b765f9c6 100644 --- a/tests/test_application.py +++ b/tests/test_application.py @@ -47,26 +47,6 @@ def test_emit_with_nonascii_name_node(app, status, warning): app.emit('my_event', node) -@with_app() -def test_output(app, status, warning): - # info with newline - status.truncate(0) # __init__ writes to status - status.seek(0) - app.info("Nothing here...") - assert status.getvalue() == "Nothing here...\n" - # info without newline - status.truncate(0) - status.seek(0) - app.info("Nothing here...", True) - assert status.getvalue() == "Nothing here..." - - # warning - old_count = app._warncount - app.warn("Bad news!") - assert strip_escseq(warning.getvalue()) == "WARNING: Bad news!\n" - assert app._warncount == old_count + 1 - - @with_app() def test_extensions(app, status, warning): app.setup_extension('shutil') diff --git a/tests/test_environment.py b/tests/test_environment.py index f65a6f3f0..11de4a93b 100644 --- a/tests/test_environment.py +++ b/tests/test_environment.py @@ -9,35 +9,26 @@ :license: BSD, see LICENSE for details. """ -from six import PY3 +from six import StringIO -from util import TestApp, remove_unicode_literals, path +from util import TestApp, path from sphinx.builders.html import StandaloneHTMLBuilder from sphinx.builders.latex import LaTeXBuilder app = env = None -warnings = [] def setup_module(): global app, env - app = TestApp(srcdir='root-envtest') + app = TestApp(srcdir='root-envtest', warning=StringIO()) env = app.env - env.set_warnfunc(lambda *args, **kwargs: warnings.append(args)) def teardown_module(): app.cleanup() -def warning_emitted(file, text): - for warning in warnings: - if len(warning) == 2 and file in warning[1] and text in warning[0]: - return True - return False - - # Tests are run in the order they appear in the file, therefore we can # afford to not run update() in the setup but in its own test @@ -49,12 +40,12 @@ def test_first_update(): def test_images(): - assert warning_emitted('images', 'image file not readable: foo.png') - assert warning_emitted('images', 'nonlocal image URI found: ' - 'http://www.python.org/logo.png') + assert ('image file not readable: foo.png' + in app._warning.getvalue()) + assert ('nonlocal image URI found: http://www.python.org/logo.png' + in app._warning.getvalue()) tree = env.get_doctree('images') - app._warning.reset() htmlbuilder = StandaloneHTMLBuilder(app) htmlbuilder.imgpath = 'dummy' htmlbuilder.post_process_images(tree) @@ -64,7 +55,6 @@ def test_images(): assert set(htmlbuilder.images.values()) == \ set(['img.png', 'img1.png', 'simg.png', 'svgimg.svg', 'img.foo.png']) - app._warning.reset() latexbuilder = LaTeXBuilder(app) latexbuilder.post_process_images(tree) assert set(latexbuilder.images.keys()) == \ diff --git a/tests/test_util_logging.py b/tests/test_util_logging.py index 37c4a2d2a..59c899369 100644 --- a/tests/test_util_logging.py +++ b/tests/test_util_logging.py @@ -296,9 +296,10 @@ def test_output_with_unencodable_char(app, status, warning): self.stream.write(object.encode('cp1252').decode('cp1252')) logging.setup(app, StreamWriter(status), warning) + logger = logging.getLogger(__name__) # info with UnicodeEncodeError status.truncate(0) status.seek(0) - app.info(u"unicode \u206d...") + logger.info(u"unicode \u206d...") assert status.getvalue() == "unicode ?...\n" From 15b46598e21c05fae6064b8f64e9dd00a73ed9d9 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 27 Dec 2016 02:13:56 +0900 Subject: [PATCH 070/190] logger.warning() supports node as location parameter --- sphinx/builders/__init__.py | 4 +-- sphinx/domains/cpp.py | 2 +- sphinx/domains/python.py | 7 +++-- sphinx/domains/std.py | 20 +++++++------- sphinx/environment/__init__.py | 29 +++++++++++---------- sphinx/environment/managers/indexentries.py | 2 +- sphinx/environment/managers/toctree.py | 12 ++++----- sphinx/ext/autosectionlabel.py | 5 ++-- sphinx/ext/todo.py | 3 ++- sphinx/transforms/__init__.py | 2 +- sphinx/transforms/i18n.py | 13 +++++---- sphinx/util/logging.py | 28 ++++++++------------ sphinx/util/nodes.py | 4 +-- sphinx/writers/html.py | 6 ++--- tests/test_util_logging.py | 24 +++++++---------- 15 files changed, 77 insertions(+), 84 deletions(-) diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index 7448e2682..3a592a257 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -161,8 +161,8 @@ class Builder(object): if candidate: break else: - logger.warn_node('no matching candidate for image URI %r' % node['uri'], - node) + logger.warning('no matching candidate for image URI %r', node['uri'], + location=node) continue node['uri'] = candidate else: diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py index 52d520a05..cfdbc99d9 100644 --- a/sphinx/domains/cpp.py +++ b/sphinx/domains/cpp.py @@ -4885,7 +4885,7 @@ class CPPDomain(Domain): class Warner(object): def warn(self, msg): if emitWarnings: - logger.warn_node(msg, node) + logger.warning(msg, location=node) warner = Warner() parser = DefinitionParser(target, warner, env.config) try: diff --git a/sphinx/domains/python.py b/sphinx/domains/python.py index 1da874ea2..886b1f863 100644 --- a/sphinx/domains/python.py +++ b/sphinx/domains/python.py @@ -787,10 +787,9 @@ class PythonDomain(Domain): if not matches: return None elif len(matches) > 1: - logger.warn_node( - 'more than one target found for cross-reference ' - '%r: %s' % (target, ', '.join(match[0] for match in matches)), - node) + logger.warning('more than one target found for cross-reference %r: %s', + target, ', '.join(match[0] for match in matches), + location=node) name, obj = matches[0] if obj[1] == 'module': diff --git a/sphinx/domains/std.py b/sphinx/domains/std.py index eb355ecf2..c351a15af 100644 --- a/sphinx/domains/std.py +++ b/sphinx/domains/std.py @@ -573,8 +573,8 @@ class StandardDomain(Domain): label = node[0].astext() if label in self.data['citations']: path = env.doc2path(self.data['citations'][label][0]) - logger.warn_node('duplicate citation %s, other instance in %s' % - (label, path), node) + logger.warning('duplicate citation %s, other instance in %s', label, path, + location=node) self.data['citations'][label] = (docname, node['ids'][0]) def note_labels(self, env, docname, document): @@ -596,8 +596,9 @@ class StandardDomain(Domain): # link and object descriptions continue if name in labels: - logger.warn_node('duplicate label %s, ' % name + 'other instance ' - 'in ' + env.doc2path(labels[name][0]), node) + logger.warning('duplicate label %s, ' % name + 'other instance ' + 'in ' + env.doc2path(labels[name][0]), + location=node) anonlabels[name] = docname, labelid if node.tagname == 'section': sectname = clean_astext(node[0]) # node[0] == title node @@ -688,7 +689,7 @@ class StandardDomain(Domain): return None if env.config.numfig is False: - logger.warn_node('numfig is disabled. :numref: is ignored.', node) + logger.warning('numfig is disabled. :numref: is ignored.', location=node) return contnode target_node = env.get_doctree(docname).ids.get(labelid) @@ -701,7 +702,8 @@ class StandardDomain(Domain): if fignumber is None: return contnode except ValueError: - logger.warn_node("no number is assigned for %s: %s" % (figtype, labelid), node) + logger.warning("no number is assigned for %s: %s", figtype, labelid, + location=node) return contnode try: @@ -711,7 +713,7 @@ class StandardDomain(Domain): title = env.config.numfig_format.get(figtype, '') if figname is None and '%{name}' in title: - logger.warn_node('the link has no caption: %s' % title, node) + logger.warning('the link has no caption: %s', title, location=node) return contnode else: fignum = '.'.join(map(str, fignumber)) @@ -725,10 +727,10 @@ class StandardDomain(Domain): # old style format (cf. "Fig.%s") newtitle = title % fignum except KeyError as exc: - logger.warn_node('invalid numfig_format: %s (%r)' % (title, exc), node) + logger.warning('invalid numfig_format: %s (%r)', title, exc, location=node) return contnode except TypeError: - logger.warn_node('invalid numfig_format: %s' % title, node) + logger.warning('invalid numfig_format: %s', title, location=node) return contnode return self.build_reference_node(fromdocname, builder, diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index 4b6e9bf31..1fb9ec19e 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -902,8 +902,8 @@ class BuildEnvironment(object): rel_filename, filename = self.relfn2path(targetname, docname) self.dependencies[docname].add(rel_filename) if not os.access(filename, os.R_OK): - logger.warn_node('download file not readable: %s' % filename, - node) + logger.warning('download file not readable: %s', filename, + location=node) continue uniquename = self.dlfiles.add_file(docname, filename) node['filename'] = uniquename @@ -921,8 +921,8 @@ class BuildEnvironment(object): if mimetype not in candidates: globbed.setdefault(mimetype, []).append(new_imgpath) except (OSError, IOError) as err: - logger.warn_node('image file %s not readable: %s' % - (filename, err), node) + logger.warning('image file %s not readable: %s', filename, err, + location=node) for key, files in iteritems(globbed): candidates[key] = sorted(files, key=len)[0] # select by similarity @@ -934,13 +934,13 @@ class BuildEnvironment(object): node['candidates'] = candidates = {} imguri = node['uri'] if imguri.startswith('data:'): - logger.warn_node('image data URI found. some builders might not support', node, - type='image', subtype='data_uri') + logger.warning('image data URI found. some builders might not support', + location=node, type='image', subtype='data_uri') candidates['?'] = imguri continue elif imguri.find('://') != -1: - logger.warn_node('nonlocal image URI found: %s' % imguri, node, - type='image', subtype='nonlocal_uri') + logger.warning('nonlocal image URI found: %s', imguri, + location=node, type='image', subtype='nonlocal_uri') candidates['?'] = imguri continue rel_imgpath, full_imgpath = self.relfn2path(imguri, docname) @@ -969,8 +969,8 @@ class BuildEnvironment(object): for imgpath in itervalues(candidates): self.dependencies[docname].add(imgpath) if not os.access(path.join(self.srcdir, imgpath), os.R_OK): - logger.warn_node('image file not readable: %s' % imgpath, - node) + logger.warning('image file not readable: %s', imgpath, + location=node) continue self.images.add_file(docname, imgpath) @@ -1183,7 +1183,8 @@ class BuildEnvironment(object): (node['refdomain'], typ) else: msg = '%r reference target not found: %%(target)s' % typ - logger.warn_node(msg % {'target': target}, node, type='ref', subtype=typ) + logger.warning(msg % {'target': target}, + location=node, type='ref', subtype=typ) def _resolve_doc_reference(self, builder, refdoc, node, contnode): # type: (Builder, unicode, nodes.Node, nodes.Node) -> nodes.Node @@ -1234,9 +1235,9 @@ class BuildEnvironment(object): return None if len(results) > 1: nice_results = ' or '.join(':%s:' % r[0] for r in results) - logger.warn_node('more than one target found for \'any\' cross-' - 'reference %r: could be %s' % (target, nice_results), - node) + logger.warning('more than one target found for \'any\' cross-' + 'reference %r: could be %s', target, nice_results, + location=node) res_role, newnode = results[0] # Override "any" class with the actual role type to get the styling # approximately correct. diff --git a/sphinx/environment/managers/indexentries.py b/sphinx/environment/managers/indexentries.py index 43e3b4c83..ef9c84d02 100644 --- a/sphinx/environment/managers/indexentries.py +++ b/sphinx/environment/managers/indexentries.py @@ -55,7 +55,7 @@ class IndexEntries(EnvironmentManager): for entry in node['entries']: split_index_msg(entry[0], entry[1]) except ValueError as exc: - logger.warn_node(str(exc), node) + logger.warning(str(exc), location=node) node.parent.remove(node) else: for entry in node['entries']: diff --git a/sphinx/environment/managers/toctree.py b/sphinx/environment/managers/toctree.py index 64937b7fa..1df3f0999 100644 --- a/sphinx/environment/managers/toctree.py +++ b/sphinx/environment/managers/toctree.py @@ -317,15 +317,13 @@ class Toctree(EnvironmentManager): refnode.children = [nodes.Text(title)] if not toc.children: # empty toc means: no titles will show up in the toctree - logger.warn_node( - 'toctree contains reference to document %r that ' - 'doesn\'t have a title: no link will be generated' - % ref, toctreenode) + logger.warning('toctree contains reference to document %r that ' + 'doesn\'t have a title: no link will be generated', + ref, location=toctreenode) except KeyError: # this is raised if the included file does not exist - logger.warn_node( - 'toctree contains reference to nonexisting document %r' - % ref, toctreenode) + logger.warning('toctree contains reference to nonexisting document %r', + ref, location=toctreenode) else: # if titles_only is given, only keep the main title and # sub-toctrees diff --git a/sphinx/ext/autosectionlabel.py b/sphinx/ext/autosectionlabel.py index 78cb3c042..d45ba66a6 100644 --- a/sphinx/ext/autosectionlabel.py +++ b/sphinx/ext/autosectionlabel.py @@ -26,8 +26,9 @@ def register_sections_as_label(app, document): sectname = clean_astext(node[0]) if name in labels: - logger.warn_node('duplicate label %s, ' % name + 'other instance ' - 'in ' + app.env.doc2path(labels[name][0]), node) + logger.warning('duplicate label %s, ' % name + 'other instance ' + 'in ' + app.env.doc2path(labels[name][0]), + location=node) anonlabels[name] = docname, labelid labels[name] = docname, labelid, sectname diff --git a/sphinx/ext/todo.py b/sphinx/ext/todo.py index e8d8201df..f575e7462 100644 --- a/sphinx/ext/todo.py +++ b/sphinx/ext/todo.py @@ -100,7 +100,8 @@ def process_todos(app, doctree): }) if env.config.todo_emit_warnings: - logger.warn_node("TODO entry found: %s" % node[1].astext(), node) + logger.warning("TODO entry found: %s", node[1].astext(), + location=node) class TodoList(Directive): diff --git a/sphinx/transforms/__init__.py b/sphinx/transforms/__init__.py index fad0d0038..4eb65af6e 100644 --- a/sphinx/transforms/__init__.py +++ b/sphinx/transforms/__init__.py @@ -181,7 +181,7 @@ class AutoIndexUpgrader(Transform): if 'entries' in node and any(len(entry) == 4 for entry in node['entries']): msg = ('4 column based index found. ' 'It might be a bug of extensions you use: %r' % node['entries']) - logger.warn_node(msg, node) + logger.warning(msg, location=node) for i, entry in enumerate(node['entries']): if len(entry) == 4: node['entries'][i] = entry + (None,) diff --git a/sphinx/transforms/i18n.py b/sphinx/transforms/i18n.py index addd617d4..8663c573d 100644 --- a/sphinx/transforms/i18n.py +++ b/sphinx/transforms/i18n.py @@ -274,8 +274,8 @@ class Locale(Transform): old_foot_refs = node.traverse(is_autonumber_footnote_ref) new_foot_refs = patch.traverse(is_autonumber_footnote_ref) if len(old_foot_refs) != len(new_foot_refs): - logger.warn_node('inconsistent footnote references in ' - 'translated message', node) + logger.warning('inconsistent footnote references in translated message', + location=node) old_foot_namerefs = {} # type: Dict[unicode, List[nodes.footnote_reference]] for r in old_foot_refs: old_foot_namerefs.setdefault(r.get('refname'), []).append(r) @@ -309,7 +309,8 @@ class Locale(Transform): old_refs = node.traverse(is_refnamed_ref) new_refs = patch.traverse(is_refnamed_ref) if len(old_refs) != len(new_refs): - logger.warn_node('inconsistent references in translated message', node) + logger.warning('inconsistent references in translated message', + location=node) old_ref_names = [r['refname'] for r in old_refs] new_ref_names = [r['refname'] for r in new_refs] orphans = list(set(old_ref_names) - set(new_ref_names)) @@ -337,7 +338,8 @@ class Locale(Transform): new_refs = patch.traverse(is_refnamed_footnote_ref) refname_ids_map = {} if len(old_refs) != len(new_refs): - logger.warn_node('inconsistent references in translated message', node) + logger.warning('inconsistent references in translated message', + location=node) for old in old_refs: refname_ids_map[old["refname"]] = old["ids"] for new in new_refs: @@ -352,7 +354,8 @@ class Locale(Transform): new_refs = patch.traverse(addnodes.pending_xref) xref_reftarget_map = {} if len(old_refs) != len(new_refs): - logger.warn_node('inconsistent term references in translated message', node) + logger.warning('inconsistent term references in translated message', + location=node) def get_ref_key(node): # type: (nodes.Node) -> Tuple[unicode, unicode, unicode] diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index 95b14be9a..ecec2649a 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -16,6 +16,7 @@ from contextlib import contextmanager from collections import defaultdict from six import PY2, StringIO +from docutils import nodes from docutils.utils import get_source_line from sphinx.errors import SphinxWarning @@ -92,23 +93,6 @@ class SphinxWarningLogRecord(logging.LogRecord): class SphinxLoggerAdapter(logging.LoggerAdapter): """LoggerAdapter allowing ``type`` and ``subtype`` keywords.""" - def warn_node(self, message, node, **kwargs): - # type: (unicode, nodes.Node, Any) -> None - """Emit a warning for specific node. - - :param message: a message of warning - :param node: a node related with the warning - """ - (source, line) = get_source_line(node) - if source and line: - kwargs['location'] = "%s:%s" % (source, line) - elif source: - kwargs['location'] = "%s:" % source - elif line: - kwargs['location'] = "<unknown>:%s" % line - - self.warning(message, **kwargs) - def log(self, level, msg, *args, **kwargs): # type: (Union[int, str], unicode, Any, Any) -> None if isinstance(level, int): @@ -374,6 +358,16 @@ class WarningLogRecordTranslator(logging.Filter): record.location = '%s' % self.app.env.doc2path(docname) else: record.location = None + elif isinstance(location, nodes.Node): + (source, line) = get_source_line(location) + if source and line: + record.location = "%s:%s" % (source, line) + elif source: + record.location = "%s:" % source + elif line: + record.location = "<unknown>:%s" % line + else: + record.location = None elif location and ':' not in location: record.location = '%s' % self.app.env.doc2path(location) diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py index a5aa58444..4c574c242 100644 --- a/sphinx/util/nodes.py +++ b/sphinx/util/nodes.py @@ -362,8 +362,8 @@ def process_only_nodes(doctree, tags): try: ret = tags.eval_condition(node['expr']) except Exception as err: - logger.warn_node('exception while evaluating only ' - 'directive expression: %s' % err, node) + logger.warning('exception while evaluating only directive expression: %s', err, + location=node) node.replace_self(node.children or nodes.comment()) else: if ret: diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py index 0daaffd82..6a455cdb4 100644 --- a/sphinx/writers/html.py +++ b/sphinx/writers/html.py @@ -302,7 +302,7 @@ class HTMLTranslator(BaseTranslator): if figtype: if len(node['ids']) == 0: msg = 'Any IDs not assigned for %s node' % node.tagname - logger.warn_node(msg, node) + logger.warning(msg, location=node) else: append_fignumber(figtype, node['ids'][0]) @@ -525,8 +525,8 @@ class HTMLTranslator(BaseTranslator): if not ('width' in node and 'height' in node): size = get_image_size(os.path.join(self.builder.srcdir, olduri)) if size is None: - logger.warn_node('Could not obtain image size. ' - ':scale: option is ignored.', node) + logger.warning('Could not obtain image size. :scale: option is ignored.', + location=node) else: if 'width' not in node: node['width'] = str(size[0]) diff --git a/tests/test_util_logging.py b/tests/test_util_logging.py index 59c899369..0fc7277a7 100644 --- a/tests/test_util_logging.py +++ b/tests/test_util_logging.py @@ -199,30 +199,24 @@ def test_warning_location(app, status, warning): assert 'index.txt:10: WARNING: message2' in warning.getvalue() logger.warning('message3', location=None) - assert '\x1b[31mWARNING: message3' in warning.getvalue() # \x1b[31m = darkred - - -@with_app() -def test_warn_node(app, status, warning): - logging.setup(app, status, warning) - logger = logging.getLogger(__name__) + assert colorize('darkred', 'WARNING: message3') in warning.getvalue() node = nodes.Node() node.source, node.line = ('index.txt', 10) - logger.warn_node('message1', node) - assert 'index.txt:10: WARNING: message1' in warning.getvalue() + logger.warning('message4', location=node) + assert 'index.txt:10: WARNING: message4' in warning.getvalue() node.source, node.line = ('index.txt', None) - logger.warn_node('message2', node) - assert 'index.txt:: WARNING: message2' in warning.getvalue() + logger.warning('message5', location=node) + assert 'index.txt:: WARNING: message5' in warning.getvalue() node.source, node.line = (None, 10) - logger.warn_node('message3', node) - assert '<unknown>:10: WARNING: message3' in warning.getvalue() + logger.warning('message6', location=node) + assert '<unknown>:10: WARNING: message6' in warning.getvalue() node.source, node.line = (None, None) - logger.warn_node('message4', node) - assert '\x1b[31mWARNING: message4' in warning.getvalue() # \x1b[31m = darkred + logger.warning('message7', location=node) + assert colorize('darkred', 'WARNING: message7') in warning.getvalue() @with_app() From 8140ae33b51b17089f189ada27d280d79837c43b Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 28 Dec 2016 21:14:56 +0900 Subject: [PATCH 071/190] Add doc/extdev/logging.rst --- doc/extdev/index.rst | 1 + doc/extdev/logging.rst | 78 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) create mode 100644 doc/extdev/logging.rst diff --git a/doc/extdev/index.rst b/doc/extdev/index.rst index b27db4b2d..1f3871c21 100644 --- a/doc/extdev/index.rst +++ b/doc/extdev/index.rst @@ -54,3 +54,4 @@ APIs used for writing extensions domainapi parserapi nodes + logging diff --git a/doc/extdev/logging.rst b/doc/extdev/logging.rst new file mode 100644 index 000000000..169bf6a5a --- /dev/null +++ b/doc/extdev/logging.rst @@ -0,0 +1,78 @@ +.. _logging-api: + +Logging API +=========== + +.. function:: sphinx.util.logging.getLogger(name) + + Return a logger wrapped by :class:`SphinxLoggerAdapter` with the specified *name*. + + Example usage:: + + from sphinx.util import logging # Load instead python's logging module + + logger = logging.getLogger(__name__) + logger.info('Hello, this is an extension!') + +.. class:: SphinxLoggerAdapter(logging.LoggerAdapter) + + .. method:: SphinxLoggerAdapter.error(level, msg, *args, **kwargs) + .. method:: SphinxLoggerAdapter.critical(level, msg, *args, **kwargs) + .. method:: SphinxLoggerAdapter.warning(level, msg, *args, **kwargs) + + Logs a message with specified level on this logger. + Basically, the arguments are same as python's logging module. + + In addition, Sphinx logger supports following keyword arguments: + + **type**, ***subtype*** + Indicate categories of warning logs. It is used to suppress + warnings by :confval:`suppress_warnings` setting. + + **location** + Indicate where the warning is happened. It is used to show + the path and line number to each log. It allows docname, + tuple of docname and line number and nodes:: + + logger = sphinx.util.logging.getLogger(__name__) + logger.warning('Warning happened!', location='index') + logger.warning('Warning happened!', location=('chapter1/index', 10)) + logger.warning('Warning happened!', location=some_node) + + **color** + Indicate the color of logs. By default, warning level logs are + colored as ``"darkred"``. The others are not colored. + + .. method:: SphinxLoggerAdapter.log(level, msg, *args, **kwargs) + .. method:: SphinxLoggerAdapter.info(level, msg, *args, **kwargs) + .. method:: SphinxLoggerAdapter.verbose(level, msg, *args, **kwargs) + .. method:: SphinxLoggerAdapter.debug(level, msg, *args, **kwargs) + .. method:: SphinxLoggerAdapter.debug2(level, msg, *args, **kwargs) + + Logs a message with specified level on this logger. + Basically, the arguments are same as python's logging module. + + In addition, Sphinx logger supports following keyword arguments: + + **nonl** + If true, the logger does not fold lines at end of the log message. + The default is ``False``. + + **color** + Indicate the color of logs. By default, debug level logs are + colored as ``"darkgray"``, and debug2 ones are ``"lightgray"``. + The others are not colored. + +.. function:: pending_logging() + + Make all logs as pending while the context:: + + with pending_logging(): + logger.warning('Warning message!') # not flushed yet + some_long_process() + + # the warning is flushed here + +.. function:: pending_warnings() + + Make warning logs as pending while the context. Similar to :func:`pending_logging`. From 50f4d5636f4977fd917294133745832eaf931e6f Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Mon, 2 Jan 2017 13:03:49 +0900 Subject: [PATCH 072/190] Close #2367: ``Sphinx.warn()``, ``Sphinx.info()`` and other logging methods are now deprecated --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 2a0743959..358b78679 100644 --- a/CHANGES +++ b/CHANGES @@ -8,6 +8,8 @@ Incompatible changes members by default. Thanks to Luc Saffre. * LaTeX ``\includegraphics`` command isn't overloaded: only ``\sphinxincludegraphics`` has the custom code to fit image to available width if oversized. +* #2367: ``Sphinx.warn()``, ``Sphinx.info()`` and other logging methods are now + deprecated. Please use ``sphinx.util.logging`` (:ref:`logging-api`) instead. Features added -------------- From 33b6058b8dc4ea8d95a57825e888f67bb76f05d2 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 4 Jan 2017 22:23:44 +0900 Subject: [PATCH 073/190] Update CHANGES --- CHANGES | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 358b78679..f8376d49f 100644 --- a/CHANGES +++ b/CHANGES @@ -8,8 +8,6 @@ Incompatible changes members by default. Thanks to Luc Saffre. * LaTeX ``\includegraphics`` command isn't overloaded: only ``\sphinxincludegraphics`` has the custom code to fit image to available width if oversized. -* #2367: ``Sphinx.warn()``, ``Sphinx.info()`` and other logging methods are now - deprecated. Please use ``sphinx.util.logging`` (:ref:`logging-api`) instead. Features added -------------- @@ -27,6 +25,8 @@ Deprecated * ``sphinx.util.compat.Directive`` class is now deprecated. Please use instead ``docutils.parsers.rsr.Directive`` * ``sphinx.util.compat.docutils_version`` is now deprecated +* #2367: ``Sphinx.warn()``, ``Sphinx.info()`` and other logging methods are now + deprecated. Please use ``sphinx.util.logging`` (:ref:`logging-api`) instead. Release 1.5.2 (in development) =============================== From d4c4720a071e1f0e4ef4d9acf78e310e8df25b52 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 4 Jan 2017 23:17:45 +0900 Subject: [PATCH 074/190] Update by review --- sphinx/application.py | 2 +- sphinx/builders/latex.py | 4 ++-- sphinx/ext/imgmath.py | 4 ++-- sphinx/ext/pngmath.py | 4 ++-- sphinx/ext/viewcode.py | 3 +-- 5 files changed, 8 insertions(+), 9 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index 584ad109f..b10e0ebb6 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -319,7 +319,7 @@ class Sphinx(object): if isinstance(err, IOError) and err.errno == ENOENT: logger.info('not yet created') else: - logger.info('failed: %s' % err) + logger.info('failed: %s', err) self._init_env(freshenv=True) def _init_builder(self, buildername): diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py index 18cf0046f..7a73ce4aa 100644 --- a/sphinx/builders/latex.py +++ b/sphinx/builders/latex.py @@ -122,7 +122,7 @@ class LaTeXBuilder(Builder): destination = FileOutput( destination_path=path.join(self.outdir, targetname), encoding='utf-8') - logger.info("processing " + targetname + "... ", nonl=1) + logger.info("processing %s...", targetname, nonl=1) toctrees = self.env.get_doctree(docname).traverse(addnodes.toctree) if toctrees: if toctrees[0].get('maxdepth') > 0: @@ -241,7 +241,7 @@ class LaTeXBuilder(Builder): def validate_config_values(app): # type: (Sphinx) -> None if app.config.latex_toplevel_sectioning not in (None, 'part', 'chapter', 'section'): - logger.warning('invalid latex_toplevel_sectioning, ignored: %s' % + logger.warning('invalid latex_toplevel_sectioning, ignored: %s', app.config.latex_toplevel_sectioning) app.config.latex_toplevel_sectioning = None # type: ignore diff --git a/sphinx/ext/imgmath.py b/sphinx/ext/imgmath.py index e02353452..e559530c3 100644 --- a/sphinx/ext/imgmath.py +++ b/sphinx/ext/imgmath.py @@ -237,7 +237,7 @@ def html_visit_math(self, node): sm = nodes.system_message(msg, type='WARNING', level=2, backrefs=[], source=node['latex']) sm.walkabout(self) - logger.warning('display latex %r: ' % node['latex'] + msg) + logger.warning('display latex %r: %s', node['latex'], msg) raise nodes.SkipNode if fname is None: # something failed -- use text-only as a bad substitute @@ -265,7 +265,7 @@ def html_visit_displaymath(self, node): sm = nodes.system_message(msg, type='WARNING', level=2, backrefs=[], source=node['latex']) sm.walkabout(self) - logger.warning('inline latex %r: ' % node['latex'] + msg) + logger.warning('inline latex %r: %s', node['latex'], msg) raise nodes.SkipNode self.body.append(self.starttag(node, 'div', CLASS='math')) self.body.append('<p>') diff --git a/sphinx/ext/pngmath.py b/sphinx/ext/pngmath.py index b1408129f..f4431b07f 100644 --- a/sphinx/ext/pngmath.py +++ b/sphinx/ext/pngmath.py @@ -209,7 +209,7 @@ def html_visit_math(self, node): sm = nodes.system_message(msg, type='WARNING', level=2, backrefs=[], source=node['latex']) sm.walkabout(self) - logger.warning('display latex %r: ' % node['latex'] + msg) + logger.warning('display latex %r: %s', node['latex'], msg) raise nodes.SkipNode if fname is None: # something failed -- use text-only as a bad substitute @@ -237,7 +237,7 @@ def html_visit_displaymath(self, node): sm = nodes.system_message(msg, type='WARNING', level=2, backrefs=[], source=node['latex']) sm.walkabout(self) - logger.warning('inline latex %r: ' % node['latex'] + msg) + logger.warning('inline latex %r: %s', node['latex'], msg) raise nodes.SkipNode self.body.append(self.starttag(node, 'div', CLASS='math')) self.body.append('<p>') diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py index 95aabd6c2..c639c6c55 100644 --- a/sphinx/ext/viewcode.py +++ b/sphinx/ext/viewcode.py @@ -47,8 +47,7 @@ def _get_full_modname(app, modname, attribute): # by py:function or other directives, viewcode emits a lot of warnings. # It should be displayed only verbose mode. logger.verbose(traceback.format_exc().rstrip()) - logger.verbose('viewcode can\'t import %s, failed with error "%s"' % - (modname, e)) + logger.verbose('viewcode can\'t import %s, failed with error "%s"', modname, e) return None From 23d0d0abbf459445e1dede2b16f0ae555f8979e3 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 1 Jan 2017 22:00:14 +0900 Subject: [PATCH 075/190] Add RemovedInSphinx18Warning and promote RemovedInSphinx17Warning --- sphinx/deprecation.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/sphinx/deprecation.py b/sphinx/deprecation.py index 163992712..9ea4ab1f3 100644 --- a/sphinx/deprecation.py +++ b/sphinx/deprecation.py @@ -14,7 +14,11 @@ class RemovedInSphinx16Warning(DeprecationWarning): pass -class RemovedInSphinx17Warning(PendingDeprecationWarning): +class RemovedInSphinx17Warning(DeprecationWarning): + pass + + +class RemovedInSphinx18Warning(PendingDeprecationWarning): pass @@ -22,4 +26,4 @@ class RemovedInSphinx20Warning(PendingDeprecationWarning): pass -RemovedInNextVersionWarning = RemovedInSphinx16Warning +RemovedInNextVersionWarning = RemovedInSphinx17Warning From 76443623fa302fed301ef60f298d232bd78bfbc0 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 1 Jan 2017 22:05:23 +0900 Subject: [PATCH 076/190] Drop deprecated termsep node --- doc/extdev/nodes.rst | 1 - sphinx/addnodes.py | 16 ---------------- sphinx/writers/html.py | 10 ---------- sphinx/writers/latex.py | 11 ----------- sphinx/writers/manpage.py | 11 ----------- sphinx/writers/texinfo.py | 14 -------------- sphinx/writers/text.py | 11 ----------- 7 files changed, 74 deletions(-) diff --git a/doc/extdev/nodes.rst b/doc/extdev/nodes.rst index 359410e25..5d8272eae 100644 --- a/doc/extdev/nodes.rst +++ b/doc/extdev/nodes.rst @@ -55,4 +55,3 @@ You should not need to generate the nodes below in extensions. .. autoclass:: start_of_file .. autoclass:: productionlist .. autoclass:: production -.. autoclass:: termsep diff --git a/sphinx/addnodes.py b/sphinx/addnodes.py index b85637c87..c410cb9b7 100644 --- a/sphinx/addnodes.py +++ b/sphinx/addnodes.py @@ -9,10 +9,7 @@ :license: BSD, see LICENSE for details. """ -import warnings - from docutils import nodes -from sphinx.deprecation import RemovedInSphinx16Warning class translatable(object): @@ -273,19 +270,6 @@ class abbreviation(nodes.Inline, nodes.TextElement): """Node for abbreviations with explanations.""" -class termsep(nodes.Structural, nodes.Element): - """Separates two terms within a <term> node. - - .. versionchanged:: 1.4 - sphinx.addnodes.termsep is deprecated. It will be removed at Sphinx-1.6. - """ - - def __init__(self, *args, **kw): - warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6', - RemovedInSphinx16Warning, stacklevel=2) - super(termsep, self).__init__(*args, **kw) - - class manpage(nodes.Inline, nodes.TextElement): """Node for references to manpages.""" diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py index 6a455cdb4..e6dd89faa 100644 --- a/sphinx/writers/html.py +++ b/sphinx/writers/html.py @@ -13,14 +13,12 @@ import sys import posixpath import os import copy -import warnings from six import string_types from docutils import nodes from docutils.writers.html4css1 import Writer, HTMLTranslator as BaseTranslator from sphinx import addnodes -from sphinx.deprecation import RemovedInSphinx16Warning from sphinx.locale import admonitionlabels, _ from sphinx.util import logging from sphinx.util.images import get_image_size @@ -697,14 +695,6 @@ class HTMLTranslator(BaseTranslator): def depart_definition(self, node): self.body.append('</dd>\n') - def visit_termsep(self, node): - warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6. ' - 'This warning is displayed because some Sphinx extension ' - 'uses sphinx.addnodes.termsep. Please report it to ' - 'author of the extension.', RemovedInSphinx16Warning) - self.body.append('<br />') - raise nodes.SkipNode - def visit_manpage(self, node): return self.visit_literal_emphasis(node) diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 75335fd97..a31885a96 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -15,7 +15,6 @@ import re import sys from os import path -import warnings from six import itervalues, text_type from docutils import nodes, writers @@ -24,7 +23,6 @@ from docutils.writers.latex2e import Babel from sphinx import addnodes from sphinx import highlighting from sphinx.errors import SphinxError -from sphinx.deprecation import RemovedInSphinx16Warning from sphinx.locale import admonitionlabels, _ from sphinx.util import split_into, logging from sphinx.util.i18n import format_date @@ -1468,15 +1466,6 @@ class LaTeXTranslator(nodes.NodeVisitor): self.unrestrict_footnote(node) self.in_term -= 1 - def visit_termsep(self, node): - # type: (nodes.Node) -> None - warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6. ' - 'This warning is displayed because some Sphinx extension ' - 'uses sphinx.addnodes.termsep. Please report it to ' - 'author of the extension.', RemovedInSphinx16Warning) - self.body.append(', ') - raise nodes.SkipNode - def visit_classifier(self, node): # type: (nodes.Node) -> None self.body.append('{[}') diff --git a/sphinx/writers/manpage.py b/sphinx/writers/manpage.py index 715d1b395..76b402462 100644 --- a/sphinx/writers/manpage.py +++ b/sphinx/writers/manpage.py @@ -9,8 +9,6 @@ :license: BSD, see LICENSE for details. """ -import warnings - from docutils import nodes from docutils.writers.manpage import ( MACRO_DEF, @@ -19,7 +17,6 @@ from docutils.writers.manpage import ( ) from sphinx import addnodes -from sphinx.deprecation import RemovedInSphinx16Warning from sphinx.locale import admonitionlabels, _ from sphinx.util import logging import sphinx.util.docutils @@ -219,14 +216,6 @@ class ManualPageTranslator(BaseTranslator): else: BaseTranslator.visit_term(self, node) - def visit_termsep(self, node): - warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6. ' - 'This warning is displayed because some Sphinx extension ' - 'uses sphinx.addnodes.termsep. Please report it to ' - 'author of the extension.', RemovedInSphinx16Warning) - self.body.append(', ') - raise nodes.SkipNode - # overwritten -- we don't want source comments to show up def visit_comment(self, node): raise nodes.SkipNode diff --git a/sphinx/writers/texinfo.py b/sphinx/writers/texinfo.py index e9543b1b2..a992869e3 100644 --- a/sphinx/writers/texinfo.py +++ b/sphinx/writers/texinfo.py @@ -12,7 +12,6 @@ import re import textwrap from os import path -import warnings from six import itervalues from six.moves import range @@ -21,7 +20,6 @@ from docutils import nodes, writers from sphinx import addnodes, __display_version__ from sphinx.errors import ExtensionError -from sphinx.deprecation import RemovedInSphinx16Warning from sphinx.locale import admonitionlabels, _ from sphinx.util import logging from sphinx.util.i18n import format_date @@ -1071,18 +1069,6 @@ class TexinfoTranslator(nodes.NodeVisitor): # type: (nodes.Node) -> None pass - def visit_termsep(self, node): - # type: (nodes.Node) -> None - warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6. ' - 'This warning is displayed because some Sphinx extension ' - 'uses sphinx.addnodes.termsep. Please report it to ' - 'author of the extension.', RemovedInSphinx16Warning) - self.body.append('\n%s ' % self.at_item_x) - - def depart_termsep(self, node): - # type: (nodes.Node) -> None - pass - def visit_classifier(self, node): # type: (nodes.Node) -> None self.body.append(' : ') diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py index 7ec69d3ae..9bd427468 100644 --- a/sphinx/writers/text.py +++ b/sphinx/writers/text.py @@ -12,7 +12,6 @@ import os import re import textwrap from itertools import groupby -import warnings from six.moves import zip_longest @@ -20,7 +19,6 @@ from docutils import nodes, writers from docutils.utils import column_width from sphinx import addnodes -from sphinx.deprecation import RemovedInSphinx16Warning from sphinx.locale import admonitionlabels, _ from sphinx.util import logging @@ -774,15 +772,6 @@ class TextTranslator(nodes.NodeVisitor): if not self._classifier_count_in_li: self.end_state(end=None) - def visit_termsep(self, node): - # type: (nodes.Node) -> None - warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6. ' - 'This warning is displayed because some Sphinx extension ' - 'uses sphinx.addnodes.termsep. Please report it to ' - 'author of the extension.', RemovedInSphinx16Warning) - self.add_text(', ') - raise nodes.SkipNode - def visit_classifier(self, node): # type: (nodes.Node) -> None self.add_text(' : ') From ea856fd90978f72242b7a178ace623350995fd87 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 1 Jan 2017 22:25:07 +0900 Subject: [PATCH 077/190] Drop LDML support of format_date() --- doc/config.rst | 25 -------------------- sphinx/util/i18n.py | 52 +++++++++++++++-------------------------- tests/test_util_i18n.py | 18 -------------- 3 files changed, 19 insertions(+), 76 deletions(-) diff --git a/doc/config.rst b/doc/config.rst index 647aa6bc7..faba1152f 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -378,18 +378,6 @@ Project information %Y'`` (or, if translation is enabled with :confval:`language`, an equivalent format for the selected locale). - .. versionchanged:: 1.4 - - Format specification was changed from strftime to Locale Data Markup - Language. strftime format is also supported for backward compatibility - until Sphinx-1.5. - - .. versionchanged:: 1.4.1 - - Format specification was changed again from Locale Data Markup Language - to strftime. LDML format is also supported for backward compatibility - until Sphinx-1.5. - .. confval:: highlight_language The default language to highlight source code in. The default is @@ -765,19 +753,6 @@ that use Sphinx's HTMLWriter class. The empty string is equivalent to ``'%b %d, %Y'`` (or a locale-dependent equivalent). - .. versionchanged:: 1.4 - - Format specification was changed from strftime to Locale Data Markup - Language. strftime format is also supported for backward compatibility - until Sphinx-1.5. - - .. versionchanged:: 1.4.1 - - Format specification was changed again from Locale Data Markup Language - to strftime. LDML format is also supported for backward compatibility - until Sphinx-1.5. - - .. confval:: html_use_smartypants If true, `SmartyPants <http://daringfireball.net/projects/smartypants/>`_ diff --git a/sphinx/util/i18n.py b/sphinx/util/i18n.py index 42eb477a4..72059d634 100644 --- a/sphinx/util/i18n.py +++ b/sphinx/util/i18n.py @@ -12,7 +12,6 @@ import gettext import io import os import re -import warnings from os import path from datetime import datetime from collections import namedtuple @@ -24,7 +23,6 @@ from babel.messages.mofile import write_mo from sphinx.errors import SphinxError from sphinx.util import logging from sphinx.util.osutil import SEP, walk -from sphinx.deprecation import RemovedInSphinx16Warning logger = logging.getLogger(__name__) @@ -197,9 +195,6 @@ def babel_format_date(date, format, locale, formatter=babel.dates.format_date): def format_date(format, date=None, language=None): # type: (str, datetime, unicode) -> unicode - if format is None: - format = 'medium' - if date is None: # If time is not specified, try to use $SOURCE_DATE_EPOCH variable # See https://wiki.debian.org/ReproducibleBuilds/TimestampsProposal @@ -209,37 +204,28 @@ def format_date(format, date=None, language=None): else: date = datetime.now() - if re.match('EEE|MMM|dd|DDD|MM|WW|medium|YY', format): - # consider the format as babel's - warnings.warn('LDML format support will be dropped at Sphinx-1.6', - RemovedInSphinx16Warning) + result = [] + tokens = re.split('(%.)', format) + for token in tokens: + if token in date_format_mappings: + babel_format = date_format_mappings.get(token, '') - return babel_format_date(date, format, locale=language, - formatter=babel.dates.format_datetime) - else: - # consider the format as ustrftime's and try to convert it to babel's - result = [] - tokens = re.split('(%.)', format) - for token in tokens: - if token in date_format_mappings: - babel_format = date_format_mappings.get(token, '') - - # Check if we have to use a different babel formatter then - # format_datetime, because we only want to format a date - # or a time. - if token == '%x': - function = babel.dates.format_date - elif token == '%X': - function = babel.dates.format_time - else: - function = babel.dates.format_datetime - - result.append(babel_format_date(date, babel_format, locale=language, - formatter=function)) + # Check if we have to use a different babel formatter then + # format_datetime, because we only want to format a date + # or a time. + if token == '%x': + function = babel.dates.format_date + elif token == '%X': + function = babel.dates.format_time else: - result.append(token) + function = babel.dates.format_datetime - return "".join(result) + result.append(babel_format_date(date, babel_format, locale=language, + formatter=function)) + else: + result.append(token) + + return "".join(result) def get_image_filename_for_language(filename, env): diff --git a/tests/test_util_i18n.py b/tests/test_util_i18n.py index 849796a8f..933f3403a 100644 --- a/tests/test_util_i18n.py +++ b/tests/test_util_i18n.py @@ -169,15 +169,6 @@ def test_get_catalogs_with_compact(dir): def test_format_date(): date = datetime.date(2016, 2, 7) - # default format - format = None - assert i18n.format_date(format, date=date) == 'Feb 7, 2016' - assert i18n.format_date(format, date=date, language='') == 'Feb 7, 2016' - assert i18n.format_date(format, date=date, language='unknown') == 'Feb 7, 2016' - assert i18n.format_date(format, date=date, language='en') == 'Feb 7, 2016' - assert i18n.format_date(format, date=date, language='ja') == '2016/02/07' - assert i18n.format_date(format, date=date, language='de') == '07.02.2016' - # strftime format format = '%B %d, %Y' assert i18n.format_date(format, date=date) == 'February 07, 2016' @@ -187,15 +178,6 @@ def test_format_date(): assert i18n.format_date(format, date=date, language='ja') == u'2月 07, 2016' assert i18n.format_date(format, date=date, language='de') == 'Februar 07, 2016' - # LDML format - format = 'MMM dd, YYYY' - assert i18n.format_date(format, date=date) == 'Feb 07, 2016' - assert i18n.format_date(format, date=date, language='') == 'Feb 07, 2016' - assert i18n.format_date(format, date=date, language='unknown') == 'Feb 07, 2016' - assert i18n.format_date(format, date=date, language='en') == 'Feb 07, 2016' - assert i18n.format_date(format, date=date, language='ja') == u'2月 07, 2016' - assert i18n.format_date(format, date=date, language='de') == 'Feb. 07, 2016' - # raw string format = 'Mon Mar 28 12:37:08 2016, commit 4367aef' assert i18n.format_date(format, date=date) == format From 390e5a6ec211479b6024c03188aec838585ac51e Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 1 Jan 2017 22:38:33 +0900 Subject: [PATCH 078/190] Drop deprecated options for epub3 builder --- doc/config.rst | 14 -------------- sphinx/builders/epub3.py | 18 ------------------ 2 files changed, 32 deletions(-) diff --git a/doc/config.rst b/doc/config.rst index faba1152f..f57e02e64 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -1510,20 +1510,6 @@ the `Dublin Core metadata <http://dublincore.org/>`_. .. [#] https://developer.mozilla.org/en-US/docs/Web/CSS/writing-mode -.. confval:: epub3_page_progression_direction - - The global direction in which the content flows. - Allowed values are ``'ltr'`` (left-to-right), ``'rtl'`` (right-to-left) and - ``'default'``. The default value is ``'ltr'``. - - When the ``'default'`` value is specified, the Author is expressing no - preference and the Reading System may chose the rendering direction. - - .. versionadded:: 1.4 - - .. deprecated:: 1.5 - Use ``epub_writing_mode`` instead. - .. _latex-options: Options for LaTeX output diff --git a/sphinx/builders/epub3.py b/sphinx/builders/epub3.py index ca4a44749..c2022e622 100644 --- a/sphinx/builders/epub3.py +++ b/sphinx/builders/epub3.py @@ -260,31 +260,13 @@ class Epub3Builder(EpubBuilder): self.files.append(outname) -def validate_config_values(app): - if app.config.epub3_description is not None: - logger.warning('epub3_description is deprecated. Use epub_description instead.') - app.config.epub_description = app.config.epub3_description - - if app.config.epub3_contributor is not None: - logger.warning('epub3_contributor is deprecated. Use epub_contributor instead.') - app.config.epub_contributor = app.config.epub3_contributor - - if app.config.epub3_page_progression_direction is not None: - logger.warning('epub3_page_progression_direction option is deprecated' - ' from 1.5. Use epub_writing_mode instead.') - - def setup(app): app.setup_extension('sphinx.builders.epub') app.add_builder(Epub3Builder) - app.connect('builder-inited', validate_config_values) app.add_config_value('epub_description', '', 'epub3', string_classes) app.add_config_value('epub_contributor', 'unknown', 'epub3', string_classes) app.add_config_value('epub_writing_mode', 'horizontal', 'epub3', string_classes) - app.add_config_value('epub3_description', None, 'epub3', string_classes) - app.add_config_value('epub3_contributor', None, 'epub3', string_classes) - app.add_config_value('epub3_page_progression_direction', None, 'epub3', string_classes) return { 'version': 'builtin', From 5d715cb14877058f9586e92c0f5f224bce56ff81 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 1 Jan 2017 22:43:07 +0900 Subject: [PATCH 079/190] Drop deprecated options for latex builder --- doc/_static/conf.py.txt | 5 --- doc/config.rst | 38 -------------------- doc/invocation.rst | 2 +- sphinx/builders/latex.py | 50 -------------------------- sphinx/make_mode.py | 2 +- sphinx/templates/quickstart/make.bat_t | 4 +-- sphinx/writers/latex.py | 4 --- tests/root/Makefile | 7 ++-- 8 files changed, 6 insertions(+), 106 deletions(-) diff --git a/doc/_static/conf.py.txt b/doc/_static/conf.py.txt index ab54f15b8..f70ae3568 100644 --- a/doc/_static/conf.py.txt +++ b/doc/_static/conf.py.txt @@ -268,11 +268,6 @@ latex_documents = [ # # latex_logo = None -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# -# latex_use_parts = False - # If true, show page references after internal links. # # latex_show_pagerefs = False diff --git a/doc/config.rst b/doc/config.rst index f57e02e64..b4f8568e4 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -1581,16 +1581,6 @@ These options influence LaTeX output. See further :doc:`latex`. .. versionadded:: 1.4 -.. confval:: latex_use_parts - - If true, the topmost sectioning unit is parts, else it is chapters. Default: - ``False``. - - .. versionadded:: 0.3 - - .. deprecated:: 1.4 - Use :confval:`latex_toplevel_sectioning`. - .. confval:: latex_appendices A list of document names to append as an appendix to all manuals. @@ -1606,13 +1596,6 @@ These options influence LaTeX output. See further :doc:`latex`. .. versionadded:: 1.0 -.. confval:: latex_use_modindex - - If true, add a module index to LaTeX documents. Default is ``True``. - - .. deprecated:: 1.0 - Use :confval:`latex_domain_indices`. - .. confval:: latex_show_pagerefs If true, add page references after internal references. This is very useful @@ -1863,27 +1846,6 @@ These options influence LaTeX output. See further :doc:`latex`. .. versionchanged:: 1.2 This overrides the files which is provided from Sphinx such as sphinx.sty. -.. confval:: latex_preamble - - Additional LaTeX markup for the preamble. - - .. deprecated:: 0.5 - Use the ``'preamble'`` key in the :confval:`latex_elements` value. - -.. confval:: latex_paper_size - - The output paper size (``'letter'`` or ``'a4'``). Default is ``'letter'``. - - .. deprecated:: 0.5 - Use the ``'papersize'`` key in the :confval:`latex_elements` value. - -.. confval:: latex_font_size - - The font size ('10pt', '11pt' or '12pt'). Default is ``'10pt'``. - - .. deprecated:: 0.5 - Use the ``'pointsize'`` key in the :confval:`latex_elements` value. - .. _text-options: diff --git a/doc/invocation.rst b/doc/invocation.rst index d65fa27c4..783e1fe6d 100644 --- a/doc/invocation.rst +++ b/doc/invocation.rst @@ -404,7 +404,7 @@ variables to customize behavior: .. describe:: PAPER - The value for :confval:`latex_paper_size`. + The value for '"papersize"` key of :confval:`latex_elements`. .. describe:: SPHINXBUILD diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py index 7a73ce4aa..d77fb999a 100644 --- a/sphinx/builders/latex.py +++ b/sphinx/builders/latex.py @@ -245,47 +245,6 @@ def validate_config_values(app): app.config.latex_toplevel_sectioning) app.config.latex_toplevel_sectioning = None # type: ignore - if app.config.latex_use_parts: - if app.config.latex_toplevel_sectioning: - logger.warning('latex_use_parts conflicts with ' - 'latex_toplevel_sectioning, ignored.') - else: - logger.warning('latex_use_parts is deprecated. ' - 'Use latex_toplevel_sectioning instead.') - app.config.latex_toplevel_sectioning = 'part' # type: ignore - - if app.config.latex_use_modindex is not True: # changed by user - logger.warning('latex_use_modindex is deprecated. ' - 'Use latex_domain_indices instead.') - - if app.config.latex_preamble: - if app.config.latex_elements.get('preamble'): - logger.warning("latex_preamble conflicts with " - "latex_elements['preamble'], ignored.") - else: - logger.warning("latex_preamble is deprecated. " - "Use latex_elements['preamble'] instead.") - app.config.latex_elements['preamble'] = app.config.latex_preamble - - if app.config.latex_paper_size != 'letter': - if app.config.latex_elements.get('papersize'): - logger.warning("latex_paper_size conflicts with " - "latex_elements['papersize'], ignored.") - else: - logger.warning("latex_paper_size is deprecated. " - "Use latex_elements['papersize'] instead.") - if app.config.latex_paper_size: - app.config.latex_elements['papersize'] = app.config.latex_paper_size + 'paper' - - if app.config.latex_font_size != '10pt': - if app.config.latex_elements.get('pointsize'): - logger.warning("latex_font_size conflicts with " - "latex_elements['pointsize'], ignored.") - else: - logger.warning("latex_font_size is deprecated. " - "Use latex_elements['pointsize'] instead.") - app.config.latex_elements['pointsize'] = app.config.latex_font_size - if 'footer' in app.config.latex_elements: if 'postamble' in app.config.latex_elements: logger.warning("latex_elements['footer'] conflicts with " @@ -327,23 +286,14 @@ def setup(app): app.add_config_value('latex_logo', None, None, string_classes) app.add_config_value('latex_appendices', [], None) app.add_config_value('latex_keep_old_macro_names', True, None) - # now deprecated - use latex_toplevel_sectioning - app.add_config_value('latex_use_parts', False, None) app.add_config_value('latex_toplevel_sectioning', None, None, [str]) - app.add_config_value('latex_use_modindex', True, None) # deprecated app.add_config_value('latex_domain_indices', True, None, [list]) app.add_config_value('latex_show_urls', 'no', None) app.add_config_value('latex_show_pagerefs', False, None) - # paper_size and font_size are still separate values - # so that you can give them easily on the command line - app.add_config_value('latex_paper_size', 'letter', None) - app.add_config_value('latex_font_size', '10pt', None) app.add_config_value('latex_elements', {}, None) app.add_config_value('latex_additional_files', [], None) app.add_config_value('latex_docclass', default_latex_docclass, None) - # now deprecated - use latex_elements - app.add_config_value('latex_preamble', '', None) return { 'version': 'builtin', diff --git a/sphinx/make_mode.py b/sphinx/make_mode.py index dddb0e07a..2da5d5b71 100644 --- a/sphinx/make_mode.py +++ b/sphinx/make_mode.py @@ -273,7 +273,7 @@ class Make(object): papersize = os.getenv('PAPER', '') opts = self.opts if papersize in ('a4', 'letter'): - opts.extend(['-D', 'latex_paper_size=' + papersize]) + opts.extend(['-D', 'latex_elements.papersize=' + papersize]) if doctreedir is None: doctreedir = self.builddir_join('doctrees') diff --git a/sphinx/templates/quickstart/make.bat_t b/sphinx/templates/quickstart/make.bat_t index 8f993a7b1..8438b5f7e 100644 --- a/sphinx/templates/quickstart/make.bat_t +++ b/sphinx/templates/quickstart/make.bat_t @@ -11,8 +11,8 @@ set BUILDDIR={{ rbuilddir }} set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% {{ rsrcdir }} set I18NSPHINXOPTS=%SPHINXOPTS% {{ rsrcdir }} if NOT "%PAPER%" == "" ( - set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% - set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% + set ALLSPHINXOPTS=-D latex_elements.papersize=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_elements.papersize=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index a31885a96..221d2b2ac 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -719,10 +719,6 @@ class LaTeXTranslator(nodes.NodeVisitor): if isinstance(indices_config, list): if indexname not in indices_config: continue - # deprecated config value - if indexname == 'py-modindex' and \ - not self.builder.config.latex_use_modindex: - continue content, collapsed = indexcls(domain).generate( self.builder.docnames) if not content: diff --git a/tests/root/Makefile b/tests/root/Makefile index 7d5162fe7..85a93bc54 100644 --- a/tests/root/Makefile +++ b/tests/root/Makefile @@ -4,12 +4,9 @@ # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build -PAPER = # Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +ALLSPHINXOPTS = -d _build/doctrees $(SPHINXOPTS) . .PHONY: help clean html web pickle htmlhelp latex changes linkcheck @@ -18,7 +15,7 @@ help: @echo " html to make standalone HTML files" @echo " pickle to make pickle files (usable by e.g. sphinx-web)" @echo " htmlhelp to make HTML files and an HTML help project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latex to make LaTeX files" @echo " changes to make an overview over all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" From 431d865d4aff6ed556f07c628b181422d8e9c4fc Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 1 Jan 2017 22:58:14 +0900 Subject: [PATCH 080/190] Drop deprecated options for html builder --- doc/config.rst | 21 --------------------- doc/extdev/appapi.rst | 5 ----- sphinx/application.py | 6 ------ sphinx/builders/html.py | 14 -------------- sphinx/config.py | 5 +---- tests/test_api_translator.py | 24 ++---------------------- 6 files changed, 3 insertions(+), 72 deletions(-) diff --git a/doc/config.rst b/doc/config.rst index b4f8568e4..dd6f3e998 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -852,13 +852,6 @@ that use Sphinx's HTMLWriter class. .. versionadded:: 1.0 -.. confval:: html_use_modindex - - If true, add a module index to the HTML documents. Default is ``True``. - - .. deprecated:: 1.0 - Use :confval:`html_domain_indices`. - .. confval:: html_use_index If true, add an index to the HTML documents. Default is ``True``. @@ -921,20 +914,6 @@ that use Sphinx's HTMLWriter class. .. versionadded:: 0.6 -.. confval:: html_translator_class - - A string with the fully-qualified name of a HTML Translator class, that is, a - subclass of Sphinx's :class:`~sphinx.writers.html.HTMLTranslator`, that is - used to translate document trees to HTML. Default is ``None`` (use the - builtin translator). - - .. seealso:: :meth:`~sphinx.application.Sphinx.set_translator` - - .. deprecated:: 1.5 - - Implement your translator as extension and use `Sphinx.set_translator` - instead. - .. confval:: html_show_copyright If true, "(C) Copyright ..." is shown in the HTML footer. Default is diff --git a/doc/extdev/appapi.rst b/doc/extdev/appapi.rst index b3ffb7af0..26349a981 100644 --- a/doc/extdev/appapi.rst +++ b/doc/extdev/appapi.rst @@ -89,11 +89,6 @@ package. This allows extensions to use custom translator and define custom nodes for the translator (see :meth:`add_node`). - This is a API version of :confval:`html_translator_class` for all other - builders. Note that if :confval:`html_translator_class` is specified and - this API is called for html related builders, API overriding takes - precedence. - .. versionadded:: 1.3 .. method:: Sphinx.add_node(node, **kwds) diff --git a/sphinx/application.py b/sphinx/application.py index b10e0ebb6..27ef41efe 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -189,12 +189,6 @@ class Sphinx(object): 'This project needs at least Sphinx v%s and therefore cannot ' 'be built with this version.' % self.config.needs_sphinx) - # force preload html_translator_class - if self.config.html_translator_class: - translator_class = self.import_object(self.config.html_translator_class, - 'html_translator_class setting') - self.set_translator('html', translator_class) - # set confdir to srcdir if -C given (!= no confdir); a few pieces # of code expect a confdir to be set if self.confdir is None: diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index b979929b7..4a5c1fa8f 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -301,10 +301,6 @@ class StandaloneHTMLBuilder(Builder): if isinstance(indices_config, list): if indexname not in indices_config: continue - # deprecated config value - if indexname == 'py-modindex' and \ - not self.config.html_use_modindex: - continue content, collapse = indexcls(domain).generate() if content: self.domain_indices.append( @@ -1267,13 +1263,6 @@ class JSONHTMLBuilder(SerializingHTMLBuilder): SerializingHTMLBuilder.init(self) -def validate_config_values(app): - # type: (Sphinx) -> None - if app.config.html_translator_class: - logger.warning('html_translator_class is deprecated. ' - 'Use Sphinx.set_translator() API instead.') - - def setup(app): # type: (Sphinx) -> Dict[unicode, Any] # builders @@ -1283,8 +1272,6 @@ def setup(app): app.add_builder(PickleHTMLBuilder) app.add_builder(JSONHTMLBuilder) - app.connect('builder-inited', validate_config_values) - # config values app.add_config_value('html_theme', 'alabaster', 'html') app.add_config_value('html_theme_path', [], 'html') @@ -1302,7 +1289,6 @@ def setup(app): app.add_config_value('html_use_smartypants', True, 'html') app.add_config_value('html_sidebars', {}, 'html') app.add_config_value('html_additional_pages', {}, 'html') - app.add_config_value('html_use_modindex', True, 'html') # deprecated app.add_config_value('html_domain_indices', True, 'html', [list]) app.add_config_value('html_add_permalinks', u'\u00B6', 'html') app.add_config_value('html_use_index', True, 'html') diff --git a/sphinx/config.py b/sphinx/config.py index 27a184dce..c55660a5c 100644 --- a/sphinx/config.py +++ b/sphinx/config.py @@ -124,9 +124,6 @@ class Config(object): tls_verify = (True, 'env'), tls_cacerts = (None, 'env'), - - # pre-initialized confval for HTML builder - html_translator_class = (None, 'html', string_classes), ) # type: Dict[unicode, Tuple] def __init__(self, dirname, filename, overrides, tags): @@ -250,7 +247,7 @@ class Config(object): def pre_init_values(self): # type: () -> None """Initialize some limited config variables before loading extensions""" - variables = ['needs_sphinx', 'suppress_warnings', 'html_translator_class'] + variables = ['needs_sphinx', 'suppress_warnings'] for name in variables: try: if name in self.overrides: diff --git a/tests/test_api_translator.py b/tests/test_api_translator.py index 7a70fd4c8..3d346cda1 100644 --- a/tests/test_api_translator.py +++ b/tests/test_api_translator.py @@ -24,21 +24,12 @@ def teardown_module(): @with_app('html') def test_html_translator(app, status, warning): - # no set_translator(), no html_translator_class + # no set_translator() translator_class = app.builder.translator_class assert translator_class assert translator_class.__name__ == 'SmartyPantsHTMLTranslator' -@with_app('html', confoverrides={ - 'html_translator_class': 'translator.ExtHTMLTranslator'}) -def test_html_with_html_translator_class(app, status, warning): - # no set_translator(), but html_translator_class - translator_class = app.builder.translator_class - assert translator_class - assert translator_class.__name__ == 'ExtHTMLTranslator' - - @with_app('html', confoverrides={'html_use_smartypants': False}) def test_html_with_smartypants(app, status, warning): @@ -50,18 +41,7 @@ def test_html_with_smartypants(app, status, warning): @with_app('html', testroot='api-set-translator') def test_html_with_set_translator_for_html_(app, status, warning): - # use set_translator(), no html_translator_class - translator_class = app.builder.translator_class - assert translator_class - assert translator_class.__name__ == 'ConfHTMLTranslator' - - -@with_app('html', testroot='api-set-translator', - confoverrides={'html_translator_class': 'translator.ExtHTMLTranslator'}) -def test_html_with_set_translator_for_html_and_html_translator_class( - app, status, warning): - # use set_translator() and html_translator_class. - # set_translator() is given priority over html_translator_clas. + # use set_translator() translator_class = app.builder.translator_class assert translator_class assert translator_class.__name__ == 'ConfHTMLTranslator' From 099daa602b003f5e5f6a00d34f55e632f772086d Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 1 Jan 2017 23:03:57 +0900 Subject: [PATCH 081/190] Drop deprecated options for graphviz extension --- doc/ext/graphviz.rst | 14 +------------- sphinx/ext/graphviz.py | 21 --------------------- 2 files changed, 1 insertion(+), 34 deletions(-) diff --git a/doc/ext/graphviz.rst b/doc/ext/graphviz.rst index 555df7c28..ef0483da7 100644 --- a/doc/ext/graphviz.rst +++ b/doc/ext/graphviz.rst @@ -76,21 +76,9 @@ It adds these directives: alternate text for HTML output. If not given, the alternate text defaults to the graphviz code. -.. versionadded:: 1.1 - All three directives support an ``inline`` flag that controls paragraph - breaks in the output. When set, the graph is inserted into the current - paragraph. If the flag is not given, paragraph breaks are introduced before - and after the image (the default). - .. versionadded:: 1.1 All three directives support a ``caption`` option that can be used to give a - caption to the diagram. Naturally, diagrams marked as "inline" cannot have a - caption. - -.. deprecated:: 1.4 - ``inline`` option is deprecated. - All three directives generate inline node by default. If ``caption`` is given, - these generate block node instead. + caption to the diagram. .. versionchanged:: 1.4 All three directives support a ``graphviz_dot`` option that can be switch the diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py index 19da59cf7..7df115ea0 100644 --- a/sphinx/ext/graphviz.py +++ b/sphinx/ext/graphviz.py @@ -82,7 +82,6 @@ class Graphviz(Directive): option_spec = { 'alt': directives.unchanged, 'align': align_spec, - 'inline': directives.flag, 'caption': directives.unchanged, 'graphviz_dot': directives.unchanged, 'name': directives.unchanged, @@ -122,8 +121,6 @@ class Graphviz(Directive): node['alt'] = self.options['alt'] if 'align' in self.options: node['align'] = self.options['align'] - if 'inline' in self.options: - node['inline'] = True caption = self.options.get('caption') if caption: @@ -144,7 +141,6 @@ class GraphvizSimple(Directive): option_spec = { 'alt': directives.unchanged, 'align': align_spec, - 'inline': directives.flag, 'caption': directives.unchanged, 'graphviz_dot': directives.unchanged, 'name': directives.unchanged, @@ -162,8 +158,6 @@ class GraphvizSimple(Directive): node['alt'] = self.options['alt'] if 'align' in self.options: node['align'] = self.options['align'] - if 'inline' in self.options: - node['inline'] = True caption = self.options.get('caption') if caption: @@ -233,16 +227,6 @@ def render_dot(self, code, options, format, prefix='graphviz'): return relfn, outfn -def warn_for_deprecated_option(self, node): - # type: (nodes.NodeVisitor, graphviz) -> None - if hasattr(self.builder, '_graphviz_warned_inline'): - return - - if 'inline' in node: - logger.warning(':inline: option for graphviz is deprecated since version 1.4.0.') - self.builder._graphviz_warned_inline = True - - def render_dot_html(self, node, code, options, prefix='graphviz', imgcls=None, alt=None): # type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode, unicode, unicode) -> Tuple[unicode, unicode] # NOQA @@ -290,7 +274,6 @@ def render_dot_html(self, node, code, options, prefix='graphviz', def html_visit_graphviz(self, node): # type: (nodes.NodeVisitor, graphviz) -> None - warn_for_deprecated_option(self, node) render_dot_html(self, node, node['code'], node['options']) @@ -327,7 +310,6 @@ def render_dot_latex(self, node, code, options, prefix='graphviz'): def latex_visit_graphviz(self, node): # type: (nodes.NodeVisitor, graphviz) -> None - warn_for_deprecated_option(self, node) render_dot_latex(self, node, node['code'], node['options']) @@ -345,13 +327,11 @@ def render_dot_texinfo(self, node, code, options, prefix='graphviz'): def texinfo_visit_graphviz(self, node): # type: (nodes.NodeVisitor, graphviz) -> None - warn_for_deprecated_option(self, node) render_dot_texinfo(self, node, node['code'], node['options']) def text_visit_graphviz(self, node): # type: (nodes.NodeVisitor, graphviz) -> None - warn_for_deprecated_option(self, node) if 'alt' in node.attributes: self.add_text(_('[graph: %s]') % node['alt']) else: @@ -361,7 +341,6 @@ def text_visit_graphviz(self, node): def man_visit_graphviz(self, node): # type: (nodes.NodeVisitor, graphviz) -> None - warn_for_deprecated_option(self, node) if 'alt' in node.attributes: self.body.append(_('[graph: %s]') % node['alt']) else: From 45d3f2e8b279efa9d42068d4109cd97eb3f2d899 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 1 Jan 2017 23:35:54 +0900 Subject: [PATCH 082/190] Drop defindex template --- sphinx/themes/basic/defindex.html | 35 ------------------------------- 1 file changed, 35 deletions(-) delete mode 100644 sphinx/themes/basic/defindex.html diff --git a/sphinx/themes/basic/defindex.html b/sphinx/themes/basic/defindex.html deleted file mode 100644 index 190680724..000000000 --- a/sphinx/themes/basic/defindex.html +++ /dev/null @@ -1,35 +0,0 @@ -{# - basic/defindex.html - ~~~~~~~~~~~~~~~~~~~ - - Default template for the "index" page. - - :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. - :license: BSD, see LICENSE for details. -#}{{ warn('Now base template defindex.html is deprecated.') }} -{%- extends "layout.html" %} -{% set title = _('Overview') %} -{% block body %} - <h1>{{ docstitle|e }}</h1> - <p> - {{ _('Welcome! This is') }} - {% block description %}{{ _('the documentation for') }} {{ project|e }} - {{ release|e }}{% if last_updated %}, {{ _('last updated') }} {{ last_updated|e }}{% endif %}{% endblock %}. - </p> - {% block tables %} - <p><strong>{{ _('Indices and tables:') }}</strong></p> - <table class="contentstable"><tr> - <td style="width: 50%"> - <p class="biglink"><a class="biglink" href="{{ pathto("contents") }}">{{ _('Complete Table of Contents') }}</a><br> - <span class="linkdescr">{{ _('lists all sections and subsections') }}</span></p> - <p class="biglink"><a class="biglink" href="{{ pathto("search") }}">{{ _('Search Page') }}</a><br> - <span class="linkdescr">{{ _('search this documentation') }}</span></p> - </td><td style="width: 50%"> - <p class="biglink"><a class="biglink" href="{{ pathto("modindex") }}">{{ _('Global Module Index') }}</a><br> - <span class="linkdescr">{{ _('quick access to all modules') }}</span></p> - <p class="biglink"><a class="biglink" href="{{ pathto("genindex") }}">{{ _('General Index') }}</a><br> - <span class="linkdescr">{{ _('all functions, classes, terms') }}</span></p> - </td></tr> - </table> - {% endblock %} -{% endblock %} From c1c874dc5fae8a565547809405b64cc2e045bc07 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 4 Jan 2017 11:18:56 +0900 Subject: [PATCH 083/190] Update CHANGES to prevent warnings for any-role --- CHANGES | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index f8376d49f..97f6b8b43 100644 --- a/CHANGES +++ b/CHANGES @@ -124,7 +124,7 @@ Incompatible changes * Fix ``genindex.html``, Sphinx's document template, link address to itself to satisfy xhtml standard. * Use epub3 builder by default. And the old epub builder is renamed to epub2. * Fix ``epub`` and ``epub3`` builders that contained links to ``genindex`` even if ``epub_use_index = False``. -* `html_translator_class` is now deprecated. +* ``html_translator_class`` is now deprecated. Use `Sphinx.set_translator()` API instead. * Drop python 2.6 and 3.3 support * Drop epub3 builder's ``epub3_page_progression_direction`` option (use ``epub3_writing_mode``). From d0c202da59445bf3a3149b79f557279a1cec450e Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 4 Jan 2017 22:14:39 +0900 Subject: [PATCH 084/190] Add "Features removed" section to CHANGES --- CHANGES | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/CHANGES b/CHANGES index 97f6b8b43..5d35eeef3 100644 --- a/CHANGES +++ b/CHANGES @@ -9,6 +9,28 @@ Incompatible changes * LaTeX ``\includegraphics`` command isn't overloaded: only ``\sphinxincludegraphics`` has the custom code to fit image to available width if oversized. +Features removed +---------------- + +* Configuration variables + + - epub3_contributor + - epub3_description + - epub3_page_progression_direction + - html_translator_class + - html_use_modindex + - latex_font_size + - latex_paper_size + - latex_preamble + - latex_use_modindex + - latex_use_parts + +* ``termsep`` node +* defindex.html template +* LDML format support in `today`, `today_fmt` and `html_last_updated_fmt` +* ``:inline:`` option for the directives of sphinx.ext.graphviz extension +* sphinx.ext.pngmath extension + Features added -------------- From 80c3d9c848007add749ca34f2702d14a85a7da03 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 5 Jan 2017 15:27:20 +0900 Subject: [PATCH 085/190] Fix typo --- sphinx/ext/autosummary/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py index 1a373ef46..89fdbe522 100644 --- a/sphinx/ext/autosummary/__init__.py +++ b/sphinx/ext/autosummary/__init__.py @@ -608,8 +608,8 @@ def process_generate_options(app): suffix = get_rst_suffix(app) if suffix is None: - logging.warning('autosummary generats .rst files internally. ' - 'But your source_suffix does not contain .rst. Skipped.') + logger.warning('autosummary generats .rst files internally. ' + 'But your source_suffix does not contain .rst. Skipped.') return generate_autosummary_docs(genfiles, builder=app.builder, From 50c783ef55b787856f6fb7bee79370ff789246fa Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 5 Jan 2017 15:30:47 +0900 Subject: [PATCH 086/190] Remove unused function: display_chunk --- sphinx/util/__init__.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index 404556f36..d8a70e99a 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -536,12 +536,3 @@ def split_docinfo(text): return '', result[0] else: return result[1:] - - -def display_chunk(chunk): - # type: (Union[List, Tuple, unicode]) -> unicode - if isinstance(chunk, (list, tuple)): - if len(chunk) == 1: - return text_type(chunk[0]) - return '%s .. %s' % (chunk[0], chunk[-1]) - return text_type(chunk) From f695aac2e28e1463385b399b61fc2c4b4ef40c5c Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Fri, 6 Jan 2017 11:34:10 +0900 Subject: [PATCH 087/190] Fix mypy violations --- sphinx/application.py | 6 +++--- sphinx/builders/__init__.py | 2 +- sphinx/builders/html.py | 11 ++++++----- sphinx/builders/htmlhelp.py | 3 +++ sphinx/search/__init__.py | 2 +- 5 files changed, 14 insertions(+), 10 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index 27ef41efe..7a4736d42 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -55,7 +55,7 @@ if False: from docutils.parsers import Parser # NOQA from docutils.transform import Transform # NOQA from sphinx.builders import Builder # NOQA - from sphinx.domains import Domain # NOQA + from sphinx.domains import Domain, Index # NOQA # List of all known core events. Maps name to arguments description. events = { @@ -430,7 +430,7 @@ class Sphinx(object): logger.debug2(message, *args, **kwargs) def _display_chunk(chunk): - # type: (Union[List, Tuple, unicode]) -> unicode + # type: (Any) -> unicode if isinstance(chunk, (list, tuple)): if len(chunk) == 1: return text_type(chunk[0]) @@ -742,7 +742,7 @@ class Sphinx(object): self.domains[domain].roles[name] = role def add_index_to_domain(self, domain, index): - # type: (unicode, unicode) -> None + # type: (unicode, Type[Index]) -> None logger.debug('[app] adding index to domain: %r', (domain, index)) if domain not in self.domains: raise ExtensionError('domain %s not yet registered' % domain) diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index 3a592a257..35bf52ad6 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -364,7 +364,7 @@ class Builder(object): # add all toctree-containing files that may have changed for docname in list(docnames): - for tocdocname in self.env.files_to_rebuild.get(docname, []): + for tocdocname in self.env.files_to_rebuild.get(docname, set()): if tocdocname in self.env.found_docs: docnames.add(tocdocname) docnames.add(self.config.master_doc) diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index 4a5c1fa8f..2fd7a72a8 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -816,6 +816,7 @@ class StandaloneHTMLBuilder(Builder): ctx['warn'] = self.warn # current_page_name is backwards compatibility ctx['pagename'] = ctx['current_page_name'] = pagename + ctx['encoding'] = self.config.html_output_encoding default_baseuri = self.get_target_uri(pagename) # in the singlehtml builder, default_baseuri still contains an #anchor # part, which relative_uri doesn't really like... @@ -843,14 +844,11 @@ class StandaloneHTMLBuilder(Builder): return False ctx['hasdoc'] = hasdoc - if self.name != 'htmlhelp': - ctx['encoding'] = encoding = self.config.html_output_encoding - else: - ctx['encoding'] = encoding = self.encoding ctx['toctree'] = lambda **kw: self._get_local_toctree(pagename, **kw) self.add_sidebars(pagename, ctx) ctx.update(addctx) + self.update_page_context(pagename, templatename, ctx, event_arg) newtmpl = self.app.emit_firstresult('html-page-context', pagename, templatename, ctx, event_arg) if newtmpl: @@ -869,7 +867,7 @@ class StandaloneHTMLBuilder(Builder): # outfilename's path is in general different from self.outdir ensuredir(path.dirname(outfilename)) try: - with codecs.open(outfilename, 'w', encoding, 'xmlcharrefreplace') as f: # type: ignore # NOQA + with codecs.open(outfilename, 'w', ctx['encoding'], 'xmlcharrefreplace') as f: # type: ignore # NOQA f.write(output) except (IOError, OSError) as err: logger.warning("error writing file %s: %s", outfilename, err) @@ -880,6 +878,9 @@ class StandaloneHTMLBuilder(Builder): ensuredir(path.dirname(source_name)) copyfile(self.env.doc2path(pagename), source_name) + def update_page_context(self, pagename, templatename, ctx, event_arg): + pass + def handle_finish(self): # type: () -> None if self.indexer: diff --git a/sphinx/builders/htmlhelp.py b/sphinx/builders/htmlhelp.py index 5f94460c8..852700123 100644 --- a/sphinx/builders/htmlhelp.py +++ b/sphinx/builders/htmlhelp.py @@ -199,6 +199,9 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder): return codecs.open(path.join(outdir, basename), mode, self.encoding, 'xmlcharrefreplace') + def update_page_context(self, pagename, templatename, ctx, event_arg): + ctx['encoding'] = self.encoding + def handle_finish(self): self.build_hhx(self.outdir, self.config.htmlhelp_basename) diff --git a/sphinx/search/__init__.py b/sphinx/search/__init__.py index 3f47983d3..aff6aec34 100644 --- a/sphinx/search/__init__.py +++ b/sphinx/search/__init__.py @@ -431,7 +431,7 @@ class IndexBuilder(object): # again, stemmer must not remove words from search index if not _filter(stemmed_word) and _filter(word): stemmed_word = word - already_indexed = docname in self._title_mapping.get(stemmed_word, []) + already_indexed = docname in self._title_mapping.get(stemmed_word, set()) if _filter(stemmed_word) and not already_indexed: self._mapping.setdefault(stemmed_word, set()).add(docname) From 402a11aebad096fc37cf34a5c692ed3c49348a17 Mon Sep 17 00:00:00 2001 From: shimizukawa <shimizukawa@gmail.com> Date: Sat, 7 Jan 2017 02:17:58 +0900 Subject: [PATCH 088/190] pytest: remove deprecated test util functions --- tests/test_util_logging.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/tests/test_util_logging.py b/tests/test_util_logging.py index 0fc7277a7..b3e65e1e3 100644 --- a/tests/test_util_logging.py +++ b/tests/test_util_logging.py @@ -19,10 +19,10 @@ from sphinx.util.console import colorize from sphinx.util.logging import is_suppressed_warning from sphinx.util.parallel import ParallelTasks -from util import with_app, raises, strip_escseq +import pytest +from util import strip_escseq -@with_app() def test_info_and_warning(app, status, warning): app.verbosity = 3 logging.setup(app, status, warning) @@ -47,7 +47,6 @@ def test_info_and_warning(app, status, warning): assert 'message5' in warning.getvalue() -@with_app() def test_verbosity_filter(app, status, warning): # verbosity = 0: INFO app.verbosity = 0 @@ -110,7 +109,6 @@ def test_verbosity_filter(app, status, warning): assert 'message4' in status.getvalue() -@with_app() def test_nonl_info_log(app, status, warning): logging.setup(app, status, warning) logger = logging.getLogger(__name__) @@ -135,7 +133,6 @@ def test_is_suppressed_warning(): assert is_suppressed_warning("rest", "duplicated_labels", suppress_warnings) is True -@with_app() def test_suppress_warnings(app, status, warning): logging.setup(app, status, warning) logger = logging.getLogger(__name__) @@ -173,7 +170,6 @@ def test_suppress_warnings(app, status, warning): assert app._warncount == 6 -@with_app() def test_warningiserror(app, status, warning): logging.setup(app, status, warning) logger = logging.getLogger(__name__) @@ -184,10 +180,10 @@ def test_warningiserror(app, status, warning): # if True, warning raises SphinxWarning exception app.warningiserror = True - raises(SphinxWarning, logger.warning, 'message') + with pytest.raises(SphinxWarning): + logger.warning('message') -@with_app() def test_warning_location(app, status, warning): logging.setup(app, status, warning) logger = logging.getLogger(__name__) @@ -219,7 +215,6 @@ def test_warning_location(app, status, warning): assert colorize('darkred', 'WARNING: message7') in warning.getvalue() -@with_app() def test_pending_warnings(app, status, warning): logging.setup(app, status, warning) logger = logging.getLogger(__name__) @@ -237,7 +232,6 @@ def test_pending_warnings(app, status, warning): assert 'WARNING: message2\nWARNING: message3' in strip_escseq(warning.getvalue()) -@with_app() def test_colored_logs(app, status, warning): app.verbosity = 3 logging.setup(app, status, warning) @@ -267,7 +261,6 @@ def test_colored_logs(app, status, warning): assert colorize('red', 'message9') in status.getvalue() -@with_app() def test_logging_in_ParallelTasks(app, status, warning): logging.setup(app, status, warning) logger = logging.getLogger(__name__) @@ -283,7 +276,6 @@ def test_logging_in_ParallelTasks(app, status, warning): assert 'index.txt: WARNING: message2' in warning.getvalue() -@with_app() def test_output_with_unencodable_char(app, status, warning): class StreamWriter(codecs.StreamWriter): def write(self, object): From 13df6f20ea9ff0a61b990ebb5ffad1ba2e251fa1 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 7 Jan 2017 15:02:19 +0900 Subject: [PATCH 089/190] Fix sphinx-build crashes with -vvvv option --- sphinx/util/logging.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index ecec2649a..eb9c03ff3 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -98,7 +98,7 @@ class SphinxLoggerAdapter(logging.LoggerAdapter): if isinstance(level, int): super(SphinxLoggerAdapter, self).log(level, msg, *args, **kwargs) else: - levelno = LEVEL_NAMES.get(level) + levelno = LEVEL_NAMES[level] super(SphinxLoggerAdapter, self).log(levelno, msg, *args, **kwargs) def verbose(self, msg, *args, **kwargs): @@ -418,7 +418,7 @@ def setup(app, status, warning): info_handler = NewLineStreamHandler(SafeEncodingWriter(status)) # type: ignore info_handler.addFilter(InfoFilter()) - info_handler.setLevel(VERBOSITY_MAP.get(app.verbosity)) + info_handler.setLevel(VERBOSITY_MAP[app.verbosity]) info_handler.setFormatter(ColorizeFormatter()) warning_handler = WarningStreamHandler(SafeEncodingWriter(warning)) # type: ignore From ae24524d5f95cb7c94ffb2404322f1d0c0e4a6e1 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 7 Jan 2017 15:03:06 +0900 Subject: [PATCH 090/190] Drop debug2() (refs: #3304) --- sphinx/application.py | 4 +-- sphinx/ext/autodoc.py | 2 +- sphinx/util/logging.py | 8 ------ tests/test_util_logging.py | 52 ++++++++++++-------------------------- 4 files changed, 19 insertions(+), 47 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index 307d63073..d6b781f59 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -425,9 +425,9 @@ class Sphinx(object): def debug2(self, message, *args, **kwargs): # type: (unicode, Any, Any) -> None """Emit a lowlevel debug-level informational message.""" - warnings.warn('app.debug2() is now deprecated. Use sphinx.util.logging instead.', + warnings.warn('app.debug2() is now deprecated. Use debug() instead.', RemovedInSphinx20Warning) - logger.debug2(message, *args, **kwargs) + logger.debug(message, *args, **kwargs) def _display_chunk(chunk): # type: (Any) -> unicode diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index 3e7f84b8e..01ced26de 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -1762,7 +1762,7 @@ class AutoDirective(Directive): if not self.result: return self.warnings - logger.debug2('[autodoc] output:\n%s', '\n'.join(self.result)) + logger.debug('[autodoc] output:\n%s', '\n'.join(self.result)) # record all filenames as dependencies -- this will at least # partially make automatic invalidation possible diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index eb9c03ff3..b4f8f5796 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -30,7 +30,6 @@ if False: VERBOSE = 15 -DEBUG2 = 5 LEVEL_NAMES = defaultdict(lambda: logging.WARNING) # type: Dict[str, int] LEVEL_NAMES.update({ @@ -41,7 +40,6 @@ LEVEL_NAMES.update({ 'INFO': logging.INFO, 'VERBOSE': VERBOSE, 'DEBUG': logging.DEBUG, - 'DEBUG2': DEBUG2, }) VERBOSITY_MAP = defaultdict(lambda: 0) # type: Dict[int, int] @@ -49,14 +47,12 @@ VERBOSITY_MAP.update({ 0: logging.INFO, 1: VERBOSE, 2: logging.DEBUG, - 3: DEBUG2, }) COLOR_MAP = defaultdict(lambda text: text) # type: Dict[int, unicode] COLOR_MAP.update({ logging.WARNING: 'darkred', logging.DEBUG: 'darkgray', - DEBUG2: 'lightgray', }) @@ -105,10 +101,6 @@ class SphinxLoggerAdapter(logging.LoggerAdapter): # type: (unicode, Any, Any) -> None self.log(VERBOSE, msg, *args, **kwargs) - def debug2(self, msg, *args, **kwargs): - # type: (unicode, Any, Any) -> None - self.log(DEBUG2, msg, *args, **kwargs) - def process(self, msg, kwargs): # type: ignore # type: (unicode, Dict) -> Tuple[unicode, Dict] extra = kwargs.setdefault('extra', {}) diff --git a/tests/test_util_logging.py b/tests/test_util_logging.py index b3e65e1e3..4083ec5bd 100644 --- a/tests/test_util_logging.py +++ b/tests/test_util_logging.py @@ -24,7 +24,7 @@ from util import strip_escseq def test_info_and_warning(app, status, warning): - app.verbosity = 3 + app.verbosity = 2 logging.setup(app, status, warning) logger = logging.getLogger(__name__) @@ -56,7 +56,6 @@ def test_verbosity_filter(app, status, warning): logger.info('message1') logger.verbose('message2') logger.debug('message3') - logger.debug2('message4') assert 'message1' in status.getvalue() assert 'message2' not in status.getvalue() @@ -71,7 +70,6 @@ def test_verbosity_filter(app, status, warning): logger.info('message1') logger.verbose('message2') logger.debug('message3') - logger.debug2('message4') assert 'message1' in status.getvalue() assert 'message2' in status.getvalue() @@ -86,28 +84,12 @@ def test_verbosity_filter(app, status, warning): logger.info('message1') logger.verbose('message2') logger.debug('message3') - logger.debug2('message4') assert 'message1' in status.getvalue() assert 'message2' in status.getvalue() assert 'message3' in status.getvalue() assert 'message4' not in status.getvalue() - # verbosity = 3: DEBUG2 - app.verbosity = 3 - logging.setup(app, status, warning) - logger = logging.getLogger(__name__) - - logger.info('message1') - logger.verbose('message2') - logger.debug('message3') - logger.debug2('message4') - - assert 'message1' in status.getvalue() - assert 'message2' in status.getvalue() - assert 'message3' in status.getvalue() - assert 'message4' in status.getvalue() - def test_nonl_info_log(app, status, warning): logging.setup(app, status, warning) @@ -233,32 +215,30 @@ def test_pending_warnings(app, status, warning): def test_colored_logs(app, status, warning): - app.verbosity = 3 + app.verbosity = 2 logging.setup(app, status, warning) logger = logging.getLogger(__name__) # default colors - logger.debug2('message1') - logger.debug('message2') - logger.verbose('message3') - logger.info('message4') - logger.warning('message5') - logger.critical('message6') - logger.error('message7') + logger.debug('message1') + logger.verbose('message2') + logger.info('message3') + logger.warning('message4') + logger.critical('message5') + logger.error('message6') - assert colorize('lightgray', 'message1') in status.getvalue() - assert colorize('darkgray', 'message2') in status.getvalue() + assert colorize('darkgray', 'message1') in status.getvalue() + assert 'message2\n' in status.getvalue() # not colored assert 'message3\n' in status.getvalue() # not colored - assert 'message4\n' in status.getvalue() # not colored - assert colorize('darkred', 'WARNING: message5') in warning.getvalue() + assert colorize('darkred', 'WARNING: message4') in warning.getvalue() + assert 'WARNING: message5\n' in warning.getvalue() # not colored assert 'WARNING: message6\n' in warning.getvalue() # not colored - assert 'WARNING: message7\n' in warning.getvalue() # not colored # color specification - logger.debug('message8', color='white') - logger.info('message9', color='red') - assert colorize('white', 'message8') in status.getvalue() - assert colorize('red', 'message9') in status.getvalue() + logger.debug('message7', color='white') + logger.info('message8', color='red') + assert colorize('white', 'message7') in status.getvalue() + assert colorize('red', 'message8') in status.getvalue() def test_logging_in_ParallelTasks(app, status, warning): From 269139ec0cb1d34767164fe58d4454c2a0541424 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 7 Jan 2017 15:10:53 +0900 Subject: [PATCH 091/190] Update docs --- doc/extdev/appapi.rst | 4 ++++ doc/extdev/logging.rst | 1 - 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/extdev/appapi.rst b/doc/extdev/appapi.rst index 26349a981..f6d21c057 100644 --- a/doc/extdev/appapi.rst +++ b/doc/extdev/appapi.rst @@ -415,6 +415,10 @@ The application object also provides support for emitting leveled messages. the build; just raise an exception (:exc:`sphinx.errors.SphinxError` or a custom subclass) to do that. +.. deprecated:: 1.6 + + Please use :ref:`logging-api` instead. + .. automethod:: Sphinx.warn .. automethod:: Sphinx.info diff --git a/doc/extdev/logging.rst b/doc/extdev/logging.rst index 169bf6a5a..60e11469e 100644 --- a/doc/extdev/logging.rst +++ b/doc/extdev/logging.rst @@ -47,7 +47,6 @@ Logging API .. method:: SphinxLoggerAdapter.info(level, msg, *args, **kwargs) .. method:: SphinxLoggerAdapter.verbose(level, msg, *args, **kwargs) .. method:: SphinxLoggerAdapter.debug(level, msg, *args, **kwargs) - .. method:: SphinxLoggerAdapter.debug2(level, msg, *args, **kwargs) Logs a message with specified level on this logger. Basically, the arguments are same as python's logging module. From 43c8d981b15b3ae06128d01a1024c903db9edbc3 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 7 Jan 2017 15:40:47 +0900 Subject: [PATCH 092/190] Update CHANGES --- CHANGES | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 21175489b..29eb095d5 100644 --- a/CHANGES +++ b/CHANGES @@ -325,7 +325,7 @@ Bugs fixed * `sphinx.ext.autodoc` crashes if target code imports * from mock modules by `autodoc_mock_imports`. * #1953: ``Sphinx.add_node`` does not add handlers the translator installed by - `html_translator_class` + ``html_translator_class`` * #1797: text builder inserts blank line on top * #2894: quickstart main() doesn't use argv argument * #2874: gettext builder could not extract all text under the ``only`` @@ -1295,7 +1295,7 @@ Features added for the ids defined on the node. Thanks to Olivier Heurtier. * PR#229: Allow registration of other translators. Thanks to Russell Sim. * Add app.set_translator() API to register or override a Docutils translator - class like `html_translator_class`. + class like ``html_translator_class``. * PR#267, #1134: add 'diff' parameter to literalinclude. Thanks to Richard Wall and WAKAYAMA shirou. * PR#272: Added 'bizstyle' theme. Thanks to Shoji KUMAGAI. From 60eb712fcfc7c68c611d7186eebece248e045cd0 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 4 Jan 2017 11:20:04 +0900 Subject: [PATCH 093/190] Remove deprecated pycompat-modules --- sphinx/util/pycompat.py | 42 +---------------------------------------- 1 file changed, 1 insertion(+), 41 deletions(-) diff --git a/sphinx/util/pycompat.py b/sphinx/util/pycompat.py index 185772cce..d4be08267 100644 --- a/sphinx/util/pycompat.py +++ b/sphinx/util/pycompat.py @@ -9,16 +9,10 @@ :license: BSD, see LICENSE for details. """ -import io import sys import codecs -import warnings -from six import PY3, class_types, text_type, exec_ -from six.moves import zip_longest -from itertools import product - -from sphinx.deprecation import RemovedInSphinx16Warning +from six import PY3, text_type, exec_ if False: # For type annotation @@ -155,37 +149,3 @@ def execfile_(filepath, _globals, open=open): else: raise exec_(code, _globals) - -# ------------------------------------------------------------------------------ -# Internal module backwards-compatibility - - -class _DeprecationWrapper(object): - def __init__(self, mod, deprecated): - # type: (Any, Dict) -> None - self._mod = mod - self._deprecated = deprecated - - def __getattr__(self, attr): - if attr in self._deprecated: - warnings.warn("sphinx.util.pycompat.%s is deprecated and will be " - "removed in Sphinx 1.6, please use the standard " - "library version instead." % attr, - RemovedInSphinx16Warning, stacklevel=2) - return self._deprecated[attr] - return getattr(self._mod, attr) - - -sys.modules[__name__] = _DeprecationWrapper(sys.modules[__name__], dict( # type: ignore - zip_longest = zip_longest, - product = product, - all = all, - any = any, - next = next, - open = open, - class_types = class_types, - base_exception = BaseException, - relpath = __import__('os').path.relpath, - StringIO = io.StringIO, - BytesIO = io.BytesIO, -)) From b88377169ef9fa06d50483efbf5eeda8c1cec88b Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 4 Jan 2017 11:20:37 +0900 Subject: [PATCH 094/190] Remove RemovedInSphinx16Warning --- sphinx/deprecation.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/sphinx/deprecation.py b/sphinx/deprecation.py index 9ea4ab1f3..5d3cb7ffa 100644 --- a/sphinx/deprecation.py +++ b/sphinx/deprecation.py @@ -10,10 +10,6 @@ """ -class RemovedInSphinx16Warning(DeprecationWarning): - pass - - class RemovedInSphinx17Warning(DeprecationWarning): pass From 2d6f4d6af893fab88e99e676ed8854f89321026d Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 7 Jan 2017 16:48:34 +0900 Subject: [PATCH 095/190] Emit deprecation warning for latex_elements["footer"] using warnings module --- sphinx/builders/latex.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py index d77fb999a..d4dcc29ec 100644 --- a/sphinx/builders/latex.py +++ b/sphinx/builders/latex.py @@ -10,6 +10,7 @@ """ import os +import warnings from os import path from six import iteritems @@ -20,6 +21,7 @@ from docutils.utils import new_document from docutils.frontend import OptionParser from sphinx import package_dir, addnodes, highlighting +from sphinx.deprecation import RemovedInSphinx17Warning from sphinx.util import texescape, logging from sphinx.config import string_classes, ENUM from sphinx.errors import SphinxError @@ -250,8 +252,9 @@ def validate_config_values(app): logger.warning("latex_elements['footer'] conflicts with " "latex_elements['postamble'], ignored.") else: - logger.warning("latex_elements['footer'] is deprecated. " - "Use latex_elements['preamble'] instead.") + warnings.warn("latex_elements['footer'] is deprecated. " + "Use latex_elements['preamble'] instead.", + RemovedInSphinx17Warning) app.config.latex_elements['postamble'] = app.config.latex_elements['footer'] From 1a821b89e9952fc257a3e01a5db9c63e00fc6400 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 7 Jan 2017 18:35:57 +0900 Subject: [PATCH 096/190] Drop deprecated function: make_admonition() --- CHANGES | 1 + doc/extdev/tutorial.rst | 20 +++++++++----------- sphinx/util/compat.py | 24 ------------------------ 3 files changed, 10 insertions(+), 35 deletions(-) diff --git a/CHANGES b/CHANGES index 29eb095d5..0492e2117 100644 --- a/CHANGES +++ b/CHANGES @@ -30,6 +30,7 @@ Features removed * LDML format support in `today`, `today_fmt` and `html_last_updated_fmt` * ``:inline:`` option for the directives of sphinx.ext.graphviz extension * sphinx.ext.pngmath extension +* ``sphinx.util.compat.make_admonition()`` Features added -------------- diff --git a/doc/extdev/tutorial.rst b/doc/extdev/tutorial.rst index 10a14fab7..c011dffef 100644 --- a/doc/extdev/tutorial.rst +++ b/doc/extdev/tutorial.rst @@ -246,7 +246,6 @@ todolist directive has neither content nor arguments that need to be handled. The ``todo`` directive function looks like this:: - from sphinx.util.compat import make_admonition from sphinx.locale import _ class TodoDirective(Directive): @@ -260,20 +259,20 @@ The ``todo`` directive function looks like this:: targetid = "todo-%d" % env.new_serialno('todo') targetnode = nodes.target('', '', ids=[targetid]) - ad = make_admonition(todo, self.name, [_('Todo')], self.options, - self.content, self.lineno, self.content_offset, - self.block_text, self.state, self.state_machine) + todo_node = todo('\n'.join(self.content)) + todo_node += nodes.title(_('Todo'), _('Todo')) + self.state.nested_parse(self.content, self.content_offset, todo_node) if not hasattr(env, 'todo_all_todos'): env.todo_all_todos = [] env.todo_all_todos.append({ 'docname': env.docname, 'lineno': self.lineno, - 'todo': ad[0].deepcopy(), + 'todo': todo_node.deepcopy(), 'target': targetnode, }) - return [targetnode] + ad + return [targetnode, todo_node] Several important things are covered here. First, as you can see, you can refer to the build environment instance using ``self.state.document.settings.env``. @@ -285,11 +284,10 @@ returns a new unique integer on each call and therefore leads to unique target names. The target node is instantiated without any text (the first two arguments). -An admonition is created using a standard docutils function (wrapped in Sphinx -for docutils cross-version compatibility). The first argument gives the node -class, in our case ``todo``. The third argument gives the admonition title (use -``arguments`` here to let the user specify the title). A list of nodes is -returned from ``make_admonition``. +On creating admonition node, the content body of the directive are parsed using +``self.state.nested_parse``. The first argument gives the content body, and +the second one gives content offset. The third argument gives the parent node +of parsed result, in our case the ``todo`` node. Then, the todo node is added to the environment. This is needed to be able to create a list of all todo entries throughout the documentation, in the place diff --git a/sphinx/util/compat.py b/sphinx/util/compat.py index a9348ce75..f47237d11 100644 --- a/sphinx/util/compat.py +++ b/sphinx/util/compat.py @@ -13,9 +13,6 @@ from __future__ import absolute_import import sys import warnings -from docutils import nodes -from docutils.parsers.rst import Directive # noqa - from docutils.parsers.rst import Directive # noqa from docutils import __version__ as _du_version @@ -24,27 +21,6 @@ from sphinx.deprecation import RemovedInSphinx17Warning docutils_version = tuple(int(x) for x in _du_version.split('.')[:2]) -def make_admonition(node_class, name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - warnings.warn('make_admonition is deprecated, use ' - 'docutils.parsers.rst.directives.admonitions.BaseAdmonition ' - 'instead', DeprecationWarning, stacklevel=2) - text = '\n'.join(content) - admonition_node = node_class(text) - if arguments: - title_text = arguments[0] - textnodes, messages = state.inline_text(title_text, lineno) - admonition_node += nodes.title(title_text, '', *textnodes) - admonition_node += messages - if 'class' in options: - classes = options['class'] - else: - classes = ['admonition-' + nodes.make_id(title_text)] - admonition_node['classes'] += classes - state.nested_parse(content, content_offset, admonition_node) - return [admonition_node] - - class _DeprecationWrapper(object): def __init__(self, mod, deprecated): self._mod = mod From 929683df270c18a221e38217e2ab85386f5aa590 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 5 Jan 2017 13:41:17 +0900 Subject: [PATCH 097/190] Sphinx.status_iterator() is now deprecated --- CHANGES | 2 ++ sphinx/application.py | 54 ++++++++++++---------------------- sphinx/builders/__init__.py | 18 ++++++------ sphinx/builders/epub.py | 6 ++-- sphinx/builders/gettext.py | 17 ++++++----- sphinx/builders/html.py | 15 +++++----- sphinx/environment/__init__.py | 13 ++++---- sphinx/ext/viewcode.py | 10 +++---- sphinx/util/__init__.py | 50 +++++++++++++++++++++++++++++-- sphinx/util/console.py | 2 +- tests/test_util.py | 50 ++++++++++++++++++++++++++++++- 11 files changed, 158 insertions(+), 79 deletions(-) diff --git a/CHANGES b/CHANGES index 29eb095d5..671e65b7c 100644 --- a/CHANGES +++ b/CHANGES @@ -49,6 +49,8 @@ Deprecated * ``sphinx.util.compat.docutils_version`` is now deprecated * #2367: ``Sphinx.warn()``, ``Sphinx.info()`` and other logging methods are now deprecated. Please use ``sphinx.util.logging`` (:ref:`logging-api`) instead. +* ``Sphinx.status_iterator()` and ``Sphinx.old_status_iterator()`` is now + deprecated. Please use ``sphinx.util:status_iterator()`` intead. Release 1.5.2 (in development) =============================== diff --git a/sphinx/application.py b/sphinx/application.py index d6b781f59..52194294d 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -21,7 +21,7 @@ import traceback from os import path from collections import deque -from six import iteritems, itervalues, text_type +from six import iteritems, itervalues from six.moves import cStringIO from docutils import nodes @@ -42,11 +42,10 @@ from sphinx.roles import XRefRole from sphinx.util import pycompat # noqa: F401 from sphinx.util import import_object from sphinx.util import logging +from sphinx.util import status_iterator, old_status_iterator, display_chunk from sphinx.util.tags import Tags from sphinx.util.osutil import ENOENT -from sphinx.util.console import ( # type: ignore - bold, darkgreen, term_width_line -) +from sphinx.util.console import bold, darkgreen # type: ignore from sphinx.util.i18n import find_catalog_source_files if False: @@ -431,48 +430,31 @@ class Sphinx(object): def _display_chunk(chunk): # type: (Any) -> unicode - if isinstance(chunk, (list, tuple)): - if len(chunk) == 1: - return text_type(chunk[0]) - return '%s .. %s' % (chunk[0], chunk[-1]) - return text_type(chunk) + warnings.warn('app._display_chunk() is now deprecated. ' + 'Use sphinx.util.display_chunk() instead.', + RemovedInSphinx17Warning) + return display_chunk(chunk) def old_status_iterator(self, iterable, summary, colorfunc=darkgreen, - stringify_func=_display_chunk): + stringify_func=display_chunk): # type: (Iterable, unicode, Callable, Callable[[Any], unicode]) -> Iterator - l = 0 - for item in iterable: - if l == 0: - logger.info(bold(summary), nonl=True) - l = 1 - logger.info(colorfunc(stringify_func(item)) + ' ', nonl=True) + warnings.warn('app.old_status_iterator() is now deprecated. ' + 'Use sphinx.util.status_iterator() instead.', + RemovedInSphinx17Warning) + for item in old_status_iterator(iterable, summary, + color="darkgreen", stringify_func=stringify_func): yield item - if l == 1: - logger.info('') # new version with progress info def status_iterator(self, iterable, summary, colorfunc=darkgreen, length=0, stringify_func=_display_chunk): # type: (Iterable, unicode, Callable, int, Callable[[Any], unicode]) -> Iterable - if length == 0: - for item in self.old_status_iterator(iterable, summary, colorfunc, - stringify_func): - yield item - return - l = 0 - summary = bold(summary) - for item in iterable: - l += 1 - s = '%s[%3d%%] %s' % (summary, 100*l/length, - colorfunc(stringify_func(item))) - if self.verbosity: - s += '\n' - else: - s = term_width_line(s) - logger.info(s, nonl=True) + warnings.warn('app.status_iterator() is now deprecated. ' + 'Use sphinx.util.status_iterator() instead.', + RemovedInSphinx17Warning) + for item in status_iterator(iterable, summary, length=length, verbosity=self.verbosity, + color="darkgreen", stringify_func=stringify_func): yield item - if l > 0: - logger.info('') # ---- general extensibility interface ------------------------------------- diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index 35bf52ad6..00aeac771 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -19,10 +19,10 @@ except ImportError: from docutils import nodes -from sphinx.util import i18n, path_stabilize, logging +from sphinx.util import i18n, path_stabilize, logging, status_iterator from sphinx.util.osutil import SEP, relative_uri from sphinx.util.i18n import find_catalog -from sphinx.util.console import bold, darkgreen # type: ignore +from sphinx.util.console import bold # type: ignore from sphinx.util.parallel import ParallelTasks, SerialTasks, make_chunks, \ parallel_available @@ -183,9 +183,9 @@ class Builder(object): return path.relpath(cat.mo_path, self.env.srcdir).replace(path.sep, SEP) logger.info(bold('building [mo]: ') + message) - for catalog in self.app.status_iterator( - catalogs, 'writing output... ', darkgreen, len(catalogs), - cat2relpath): + for catalog in status_iterator(catalogs, 'writing output... ', "darkgreen", + len(catalogs), self.app.verbosity, + stringify_func=cat2relpath): catalog.write_mo(self.config.language) def compile_all_catalogs(self): @@ -384,8 +384,8 @@ class Builder(object): def _write_serial(self, docnames): # type: (Sequence[unicode]) -> None with logging.pending_warnings(): - for docname in self.app.status_iterator( - docnames, 'writing output... ', darkgreen, len(docnames)): + for docname in status_iterator(docnames, 'writing output... ', "darkgreen", + len(docnames), self.app.verbosity): doctree = self.env.get_and_resolve_doctree(docname, self) self.write_doc_serialized(docname, doctree) self.write_doc(docname, doctree) @@ -406,8 +406,8 @@ class Builder(object): tasks = ParallelTasks(nproc) chunks = make_chunks(docnames, nproc) - for chunk in self.app.status_iterator( - chunks, 'writing output... ', darkgreen, len(chunks)): + for chunk in status_iterator(chunks, 'writing output... ', "darkgreen", + len(chunks), self.app.verbosity): arg = [] for i, docname in enumerate(chunk): doctree = self.env.get_and_resolve_doctree(docname, self) diff --git a/sphinx/builders/epub.py b/sphinx/builders/epub.py index a48f94436..d351be5a2 100644 --- a/sphinx/builders/epub.py +++ b/sphinx/builders/epub.py @@ -30,9 +30,9 @@ from docutils import nodes from sphinx import addnodes from sphinx.builders.html import StandaloneHTMLBuilder from sphinx.util import logging +from sphinx.util import status_iterator from sphinx.util.osutil import ensuredir, copyfile, make_filename, EEXIST from sphinx.util.smartypants import sphinx_smarty_pants as ssp -from sphinx.util.console import brown # type: ignore if False: # For type annotation @@ -470,8 +470,8 @@ class EpubBuilder(StandaloneHTMLBuilder): converting the format and resizing the image if necessary/possible. """ ensuredir(path.join(self.outdir, self.imagedir)) - for src in self.app.status_iterator(self.images, 'copying images... ', - brown, len(self.images)): + for src in status_iterator(self.images, 'copying images... ', "brown", + len(self.images), self.app.verbosity): dest = self.images[src] try: img = Image.open(path.join(self.srcdir, src)) diff --git a/sphinx/builders/gettext.py b/sphinx/builders/gettext.py index 6993210f3..0c255abf2 100644 --- a/sphinx/builders/gettext.py +++ b/sphinx/builders/gettext.py @@ -21,12 +21,12 @@ from uuid import uuid4 from six import iteritems from sphinx.builders import Builder -from sphinx.util import split_index_msg, logging +from sphinx.util import split_index_msg, logging, status_iterator from sphinx.util.tags import Tags from sphinx.util.nodes import extract_messages, traverse_translatable_index from sphinx.util.osutil import safe_relpath, ensuredir, canon_path from sphinx.util.i18n import find_catalog -from sphinx.util.console import darkgreen, purple, bold # type: ignore +from sphinx.util.console import bold # type: ignore from sphinx.locale import pairindextypes if False: @@ -224,8 +224,8 @@ class MessageCatalogBuilder(I18nBuilder): extract_translations = self.templates.environment.extract_translations - for template in self.app.status_iterator( - files, 'reading templates... ', purple, len(files)): + for template in status_iterator(files, 'reading templates... ', "purple", + len(files), self.app.verbosity): with open(template, 'r', encoding='utf-8') as f: # type: ignore context = f.read() for line, meth, msg in extract_translations(context): @@ -247,10 +247,11 @@ class MessageCatalogBuilder(I18nBuilder): ctime = datetime.fromtimestamp( # type: ignore timestamp, ltz).strftime('%Y-%m-%d %H:%M%z'), ) - for textdomain, catalog in self.app.status_iterator( - iteritems(self.catalogs), "writing message catalogs... ", - darkgreen, len(self.catalogs), - lambda textdomain__: textdomain__[0]): + for textdomain, catalog in status_iterator(iteritems(self.catalogs), + "writing message catalogs... ", + "darkgreen", len(self.catalogs), + self.app.verbosity, + lambda textdomain__: textdomain__[0]): # noop if config.gettext_compact is set ensuredir(path.join(self.outdir, path.dirname(textdomain))) diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index 2fd7a72a8..a07248d82 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -28,7 +28,7 @@ from docutils.frontend import OptionParser from docutils.readers.doctree import Reader as DoctreeReader from sphinx import package_dir, __display_version__ -from sphinx.util import jsonimpl, logging +from sphinx.util import jsonimpl, logging, status_iterator from sphinx.util.i18n import format_date from sphinx.util.osutil import SEP, os_path, relative_uri, ensuredir, \ movefile, copyfile @@ -42,7 +42,7 @@ from sphinx.theming import Theme from sphinx.builders import Builder from sphinx.application import ENV_PICKLE_FILENAME from sphinx.highlighting import PygmentsBridge -from sphinx.util.console import bold, darkgreen, brown # type: ignore +from sphinx.util.console import bold, darkgreen # type: ignore from sphinx.writers.html import HTMLWriter, HTMLTranslator, \ SmartyPantsHTMLTranslator @@ -584,8 +584,8 @@ class StandaloneHTMLBuilder(Builder): # copy image files if self.images: ensuredir(path.join(self.outdir, self.imagedir)) - for src in self.app.status_iterator(self.images, 'copying images... ', - brown, len(self.images)): + for src in status_iterator(self.images, 'copying images... ', "brown", + len(self.images), self.app.verbosity): dest = self.images[src] try: copyfile(path.join(self.srcdir, src), @@ -601,10 +601,9 @@ class StandaloneHTMLBuilder(Builder): # copy downloadable files if self.env.dlfiles: ensuredir(path.join(self.outdir, '_downloads')) - for src in self.app.status_iterator(self.env.dlfiles, - 'copying downloadable files... ', - brown, len(self.env.dlfiles), - stringify_func=to_relpath): + for src in status_iterator(self.env.dlfiles, 'copying downloadable files... ', + "brown", len(self.env.dlfiles), self.app.verbosity, + stringify_func=to_relpath): dest = self.env.dlfiles[src][1] try: copyfile(path.join(self.srcdir, src), diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index 1fb9ec19e..dee67c96d 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -33,14 +33,15 @@ from docutils.frontend import OptionParser from sphinx import addnodes from sphinx.io import SphinxStandaloneReader, SphinxDummyWriter, SphinxFileInput -from sphinx.util import get_matching_docs, docname_join, FilenameUniqDict, logging +from sphinx.util import logging +from sphinx.util import get_matching_docs, docname_join, FilenameUniqDict, status_iterator from sphinx.util.nodes import clean_astext, WarningStream, is_translatable, \ process_only_nodes from sphinx.util.osutil import SEP, getcwd, fs_encoding, ensuredir from sphinx.util.images import guess_mimetype from sphinx.util.i18n import find_catalog_files, get_image_filename_for_language, \ search_image_for_language -from sphinx.util.console import bold, purple # type: ignore +from sphinx.util.console import bold # type: ignore from sphinx.util.docutils import sphinx_domains from sphinx.util.matching import compile_matchers from sphinx.util.parallel import ParallelTasks, parallel_available, make_chunks @@ -623,8 +624,8 @@ class BuildEnvironment(object): def _read_serial(self, docnames, app): # type: (List[unicode], Sphinx) -> None - for docname in app.status_iterator(docnames, 'reading sources... ', - purple, len(docnames)): + for docname in status_iterator(docnames, 'reading sources... ', "purple", + len(docnames), self.app.verbosity): # remove all inventory entries for that file app.emit('env-purge-doc', self, docname) self.clear_doc(docname) @@ -661,8 +662,8 @@ class BuildEnvironment(object): chunks = make_chunks(docnames, nproc) warnings = [] # type: List[Tuple] - for chunk in app.status_iterator( - chunks, 'reading sources... ', purple, len(chunks)): + for chunk in status_iterator(chunks, 'reading sources... ', "purple", + len(chunks), self.app.verbosity): tasks.add_task(read_process, chunk, merge) # make sure all threads have finished diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py index c639c6c55..c6b666a0a 100644 --- a/sphinx/ext/viewcode.py +++ b/sphinx/ext/viewcode.py @@ -19,9 +19,8 @@ import sphinx from sphinx import addnodes from sphinx.locale import _ from sphinx.pycode import ModuleAnalyzer -from sphinx.util import get_full_modname, logging +from sphinx.util import get_full_modname, logging, status_iterator from sphinx.util.nodes import make_refnode -from sphinx.util.console import blue # type: ignore if False: # For type annotation @@ -147,9 +146,10 @@ def collect_pages(app): # app.builder.info(' (%d module code pages)' % # len(env._viewcode_modules), nonl=1) - for modname, entry in app.status_iterator( - iteritems(env._viewcode_modules), 'highlighting module code... ', # type:ignore - blue, len(env._viewcode_modules), lambda x: x[0]): # type:ignore + for modname, entry in status_iterator(iteritems(env._viewcode_modules), # type: ignore + 'highlighting module code... ', "blue", + len(env._viewcode_modules), # type: ignore + app.verbosity, lambda x: x[0]): if not entry: continue code, tags, used, refname = entry diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index 11e48b4a1..ff5e35fa4 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -29,7 +29,7 @@ from docutils.utils import relative_path from sphinx.errors import PycodeError, SphinxParallelError, ExtensionError from sphinx.util import logging -from sphinx.util.console import strip_colors +from sphinx.util.console import strip_colors, colorize, bold, term_width_line # type: ignore from sphinx.util.fileutil import copy_asset_file from sphinx.util.osutil import fs_encoding @@ -45,7 +45,7 @@ from sphinx.util.matching import patfilter # noqa if False: # For type annotation - from typing import Any, Callable, Iterable, Pattern, Sequence, Tuple # NOQA + from typing import Any, Callable, Iterable, Iterator, Pattern, Sequence, Tuple # NOQA logger = logging.getLogger(__name__) @@ -537,3 +537,49 @@ def split_docinfo(text): return '', result[0] else: return result[1:] + + +def display_chunk(chunk): + # type: (Any) -> unicode + if isinstance(chunk, (list, tuple)): + if len(chunk) == 1: + return text_type(chunk[0]) + return '%s .. %s' % (chunk[0], chunk[-1]) + return text_type(chunk) + + +def old_status_iterator(iterable, summary, color="darkgreen", stringify_func=display_chunk): + # type: (Iterable, unicode, str, Callable[[Any], unicode]) -> Iterator + l = 0 + for item in iterable: + if l == 0: + logger.info(bold(summary), nonl=True) + l = 1 + logger.info(stringify_func(item), color=color, nonl=True) + logger.info(" ", nonl=True) + yield item + if l == 1: + logger.info('') + + +# new version with progress info +def status_iterator(iterable, summary, color="darkgreen", length=0, verbosity=0, + stringify_func=display_chunk): + # type: (Iterable, unicode, str, int, int, Callable[[Any], unicode]) -> Iterable + if length == 0: + for item in old_status_iterator(iterable, summary, color, stringify_func): + yield item + return + l = 0 + summary = bold(summary) + for item in iterable: + l += 1 + s = '%s[%3d%%] %s' % (summary, 100 * l / length, colorize(color, stringify_func(item))) + if verbosity: + s += '\n' + else: + s = term_width_line(s) + logger.info(s, nonl=True) + yield item + if l > 0: + logger.info('') diff --git a/sphinx/util/console.py b/sphinx/util/console.py index 6dc4b88ca..f4e03775f 100644 --- a/sphinx/util/console.py +++ b/sphinx/util/console.py @@ -83,7 +83,7 @@ def coloron(): def colorize(name, text): - # type: (str, str) -> str + # type: (str, unicode) -> unicode return codes.get(name, '') + text + codes.get('reset', '') diff --git a/tests/test_util.py b/tests/test_util.py index d97329668..08a4f03c7 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -8,7 +8,14 @@ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ -from sphinx.util import encode_uri, split_docinfo + +import pytest +from mock import patch + +from sphinx.util import display_chunk, encode_uri, split_docinfo, status_iterator +from sphinx.util import logging + +from util import with_app, strip_escseq def test_encode_uri(): @@ -46,3 +53,44 @@ def test_splitdocinfo(): docinfo, content = split_docinfo(source) assert docinfo == ":multiline: one\n\ttwo\n\tthree\n" assert content == '\nHello world.\n' + + +def test_display_chunk(): + assert display_chunk('hello') == 'hello' + assert display_chunk(['hello']) == 'hello' + assert display_chunk(['hello', 'sphinx', 'world']) == 'hello .. world' + assert display_chunk(('hello',)) == 'hello' + assert display_chunk(('hello', 'sphinx', 'world')) == 'hello .. world' + + +@pytest.mark.sphinx('dummy') +@patch('sphinx.util.console._tw', 40) # terminal width = 40 +def test_status_iterator(app, status, warning): + logging.setup(app, status, warning) + + # test for old_status_iterator + status.truncate(0) + yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... ')) + output = strip_escseq(status.getvalue()) + assert 'testing ... hello sphinx world \n' in output + assert yields == ['hello', 'sphinx', 'world'] + + # test for status_iterator (verbosity=0) + status.truncate(0) + yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... ', + length=3, verbosity=0)) + output = strip_escseq(status.getvalue()) + assert 'testing ... [ 33%] hello \r' in output + assert 'testing ... [ 66%] sphinx \r' in output + assert 'testing ... [100%] world \r\n' in output + assert yields == ['hello', 'sphinx', 'world'] + + # test for status_iterator (verbosity=1) + status.truncate(0) + yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... ', + length=3, verbosity=1)) + output = strip_escseq(status.getvalue()) + assert 'testing ... [ 33%] hello\n' in output + assert 'testing ... [ 66%] sphinx\n' in output + assert 'testing ... [100%] world\n\n' in output + assert yields == ['hello', 'sphinx', 'world'] From 792286e219b707c83a12d02f8a9c8dae04978925 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 7 Jan 2017 20:35:35 +0900 Subject: [PATCH 098/190] Update CHANGES --- CHANGES | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 29eb095d5..eeae43baf 100644 --- a/CHANGES +++ b/CHANGES @@ -732,7 +732,8 @@ Incompatible changes ``"MMMM dd, YYYY"`` is default format for `today_fmt` and `html_last_updated_fmt`. However strftime format like ``"%B %d, %Y"`` is also supported for backward compatibility until Sphinx-1.5. Later format will be disabled from Sphinx-1.5. -* #2327: `latex_use_parts` is deprecated now. Use `latex_toplevel_sectioning` instead. +* #2327: ``latex_use_parts`` is deprecated now. Use `latex_toplevel_sectioning` + instead. * #2337: Use ``\url{URL}`` macro instead of ``\href{URL}{URL}`` in LaTeX writer. * #1498: manpage writer: don't make whole of item in definition list bold if it includes strong node. * #582: Remove hint message from quick search box for html output. From 9e9902cd5dcc616d359c8e3e4cf45c207300499d Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Sun, 8 Jan 2017 18:01:34 +0100 Subject: [PATCH 099/190] Deprecate use of notice environment in LaTeX source. --- sphinx/texinputs/sphinx.sty | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/sphinx/texinputs/sphinx.sty b/sphinx/texinputs/sphinx.sty index 249e9ed9d..3180fa52d 100644 --- a/sphinx/texinputs/sphinx.sty +++ b/sphinx/texinputs/sphinx.sty @@ -786,10 +786,7 @@ \csname\@backslashchar color@#2\endcsname } % the main dispatch for all types of notices -\newenvironment{sphinxadmonition}{\begin{notice}}{\end{notice}} -% use of ``notice'' is for backwards compatibility and will be removed in -% future release; sphinxadmonition environment will be defined directly. -\newenvironment{notice}[2]{% #1=type, #2=heading +\newenvironment{sphinxadmonition}[2]{% #1=type, #2=heading % can't use #1 directly in definition of end part \def\spx@noticetype {#1}% % set parameters of heavybox/lightbox @@ -800,6 +797,16 @@ \begin{sphinx#1}{#2}} % in end part, need to go around a LaTeX's "feature" {\edef\spx@temp{\noexpand\end{sphinx\spx@noticetype}}\spx@temp} +% use of ``notice'' is for backwards compatibility and will be removed in +% Sphinx 1.7. +\newenvironment{notice} + {\AtEndDocument{\typeout +{**** DEPRECATION WARNING:^^J +This document was probably built with a Sphinx extension using ``notice''^^J +environment. At Sphinx 1.7, ``notice'' environment will be removed. Please^^J +report to extension author to use ``sphinxadmonition'' instead.^^J% +****}}% + \begin{sphinxadmonition}}{\end{sphinxadmonition}} % Allow the release number to be specified independently of the % \date{}. This allows the date to reflect the document's date and From ecd7b621488c3721ca1bbbecfff0150c7365de54 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Mon, 9 Jan 2017 18:14:53 +0900 Subject: [PATCH 100/190] Update type annotations --- sphinx/application.py | 2 +- sphinx/builders/__init__.py | 2 +- sphinx/builders/html.py | 2 +- sphinx/domains/__init__.py | 4 ++-- sphinx/domains/python.py | 4 ++-- sphinx/ext/intersphinx.py | 2 +- sphinx/util/parallel.py | 2 +- sphinx/util/requests.py | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index d6b781f59..168e48983 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -564,7 +564,7 @@ class Sphinx(object): def emit(self, event, *args): # type: (unicode, Any) -> List try: - logger.debug2('[app] emitting event: %r%s', event, repr(args)[:100]) + logger.debug('[app] emitting event: %r%s', event, repr(args)[:100]) except Exception: # not every object likes to be repr()'d (think # random stuff coming via autodoc) diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index 35bf52ad6..191646a94 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -391,7 +391,7 @@ class Builder(object): self.write_doc(docname, doctree) def _write_parallel(self, docnames, nproc): - # type: (Iterable[unicode], int) -> None + # type: (Sequence[unicode], int) -> None def write_process(docs): # type: (List[Tuple[unicode, nodes.Node]]) -> None for docname, doctree in docs: diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index 2fd7a72a8..d9512b5df 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -732,7 +732,7 @@ class StandaloneHTMLBuilder(Builder): reference.append(node) def load_indexer(self, docnames): - # type: (Set[unicode]) -> None + # type: (Iterable[unicode]) -> None keep = set(self.env.all_docs) - set(docnames) try: searchindexfn = path.join(self.outdir, self.searchindex_filename) diff --git a/sphinx/domains/__init__.py b/sphinx/domains/__init__.py index a90ee84aa..2389e342c 100644 --- a/sphinx/domains/__init__.py +++ b/sphinx/domains/__init__.py @@ -79,7 +79,7 @@ class Index(object): self.domain = domain def generate(self, docnames=None): - # type: (List[unicode]) -> Tuple[List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool] # NOQA + # type: (Iterable[unicode]) -> Tuple[List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool] # NOQA """Return entries for the index given by *name*. If *docnames* is given, restrict to entries referring to these docnames. @@ -107,7 +107,7 @@ class Index(object): Qualifier and description are not rendered e.g. in LaTeX output. """ - return [] + return tuple() class Domain(object): diff --git a/sphinx/domains/python.py b/sphinx/domains/python.py index 886b1f863..2efc6db0b 100644 --- a/sphinx/domains/python.py +++ b/sphinx/domains/python.py @@ -27,7 +27,7 @@ from sphinx.util.docfields import Field, GroupedField, TypedField if False: # For type annotation - from typing import Any, Iterator, Tuple, Union # NOQA + from typing import Any, Iterable, Iterator, Tuple, Union # NOQA from sphinx.application import Sphinx # NOQA from sphinx.builders import Builder # NOQA from sphinx.environment import BuildEnvironment # NOQA @@ -582,7 +582,7 @@ class PythonModuleIndex(Index): shortname = l_('modules') def generate(self, docnames=None): - # type: (List[unicode]) -> Tuple[List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool] # NOQA + # type: (Iterable[unicode]) -> Tuple[List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool] # NOQA content = {} # type: Dict[unicode, List] # list of prefixes to ignore ignores = None # type: List[unicode] diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py index cc321e386..af39722ce 100644 --- a/sphinx/ext/intersphinx.py +++ b/sphinx/ext/intersphinx.py @@ -272,7 +272,7 @@ def load_mappings(app): if isinstance(value, (list, tuple)): # new format - name, (uri, inv) = key, value + name, (uri, inv) = key, value # type: ignore if not isinstance(name, string_types): logger.warning('intersphinx identifier %r is not string. Ignored', name) continue diff --git a/sphinx/util/parallel.py b/sphinx/util/parallel.py index 9246841e5..1d97a511c 100644 --- a/sphinx/util/parallel.py +++ b/sphinx/util/parallel.py @@ -88,7 +88,7 @@ class ParallelTasks(object): failed = False except BaseException as err: failed = True - errmsg = traceback.format_exception_only(err.__class__, err)[0].strip() + errmsg = traceback.format_exception_only(err.__class__, err)[0].strip() # type: ignore # NOQA ret = (errmsg, traceback.format_exc()) logging.convert_serializable(collector.logs) pipe.send((failed, collector.logs, ret)) diff --git a/sphinx/util/requests.py b/sphinx/util/requests.py index 3f6ff3879..03e815c2c 100644 --- a/sphinx/util/requests.py +++ b/sphinx/util/requests.py @@ -22,7 +22,7 @@ try: except ImportError: # python-requests package in Debian jessie does not provide ``requests.packages.urllib3``. # So try to import the exceptions from urllib3 package. - from urllib3.exceptions import SSLError, InsecureRequestWarning + from urllib3.exceptions import SSLError, InsecureRequestWarning # type: ignore # try to load requests[security] try: From de3d03a7b5f0ffd9e596a2da918b612e97c54885 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 10 Jan 2017 01:06:57 +0900 Subject: [PATCH 101/190] Fix testcase --- tests/test_intl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_intl.py b/tests/test_intl.py index 5fb3cea24..035a9c879 100644 --- a/tests/test_intl.py +++ b/tests/test_intl.py @@ -21,7 +21,7 @@ from six import string_types import pytest from util import tempdir, rootdir, path, assert_re_search, \ - assert_not_re_search, assert_startswith, assert_node, etree_parse + assert_not_re_search, assert_startswith, assert_node, etree_parse, strip_escseq sphinx_intl = pytest.mark.sphinx( From afc04d5f1c12967950337ef7e0ef1f2fb4dab64d Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Mon, 9 Jan 2017 17:24:57 +0100 Subject: [PATCH 102/190] Update CHANGES about deprecated latex environment ``notice`` modified: CHANGES --- CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES b/CHANGES index 9c1c684ec..1e2fa314c 100644 --- a/CHANGES +++ b/CHANGES @@ -49,6 +49,9 @@ Deprecated * ``sphinx.util.compat.docutils_version`` is now deprecated * #2367: ``Sphinx.warn()``, ``Sphinx.info()`` and other logging methods are now deprecated. Please use ``sphinx.util.logging`` (:ref:`logging-api`) instead. +* #3318: ``notice`` is now deprecated as LaTeX environment name and will be + removed at Sphinx 1.7. Extension authors please use ``sphinxadmonition`` + instead (as Sphinx does since 1.5.) Release 1.5.2 (in development) =============================== From 2402fd7ebe2ff64de32544c559d30b2b00024339 Mon Sep 17 00:00:00 2001 From: Gerald Baier <gerald.baier@tum.de> Date: Wed, 11 Jan 2017 11:23:34 +0100 Subject: [PATCH 103/190] apidoc takes extension options --- sphinx/apidoc.py | 10 ++++++++++ tests/test_apidoc.py | 18 ++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/sphinx/apidoc.py b/sphinx/apidoc.py index e48a527a5..89d59da16 100644 --- a/sphinx/apidoc.py +++ b/sphinx/apidoc.py @@ -346,6 +346,12 @@ Note: By default this script will not overwrite already created files.""") 'defaults to --doc-version') parser.add_option('--version', action='store_true', dest='show_version', help='Show version information and exit') + from sphinx.quickstart import EXTENSIONS + group = parser.add_option_group('Extension options') + for ext in EXTENSIONS: + group.add_option('--ext-' + ext, action='store_true', + dest='ext_' + ext, default=False, + help='enable %s extension' % ext) (opts, args) = parser.parse_args(argv[1:]) @@ -404,6 +410,10 @@ Note: By default this script will not overwrite already created files.""") module_path = rootpath, append_syspath = opts.append_syspath, ) + enabled_exts = {'ext_'+ext: getattr(opts, 'ext_'+ext) + for ext in EXTENSIONS if getattr(opts, 'ext_'+ext)} + d.update(enabled_exts) + if isinstance(opts.header, binary_type): d['project'] = d['project'].decode('utf-8') if isinstance(opts.author, binary_type): diff --git a/tests/test_apidoc.py b/tests/test_apidoc.py index d44868aeb..49788123d 100644 --- a/tests/test_apidoc.py +++ b/tests/test_apidoc.py @@ -145,3 +145,21 @@ def test_multibyte_parameters(make_app, apidoc): app.build() print(app._status.getvalue()) print(app._warning.getvalue()) + + +@pytest.mark.apidoc( + coderoot=(rootdir / 'root'), + options=['--ext-mathjax'], +) +def test_extension_parsed(make_app, apidoc): + outdir = apidoc.outdir + assert (outdir / 'conf.py').isfile() + + with open(outdir / 'conf.py') as f: + rst = f.read() + assert "sphinx.ext.mathjax" in rst + + app = make_app('text', srcdir=outdir) + app.build() + print(app._status.getvalue()) + print(app._warning.getvalue()) From 318e49fbd835721b5429df3c66fbfdccc03b5fc9 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 12 Jan 2017 14:15:11 +0900 Subject: [PATCH 104/190] Fix flake8 violations --- sphinx/builders/latex.py | 6 +++--- sphinx/builders/qthelp.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py index d77fb999a..cafe51a42 100644 --- a/sphinx/builders/latex.py +++ b/sphinx/builders/latex.py @@ -88,7 +88,7 @@ class LaTeXBuilder(Builder): 'document %s', docname) continue self.document_data.append(entry) # type: ignore - if docname.endswith(SEP+'index'): + if docname.endswith(SEP + 'index'): docname = docname[:-5] self.titles.append((docname, entry[2])) @@ -207,7 +207,7 @@ class LaTeXBuilder(Builder): if self.images: logger.info(bold('copying images...'), nonl=1) for src, dest in iteritems(self.images): - logger.info(' '+src, nonl=1) + logger.info(' ' + src, nonl=1) copy_asset_file(path.join(self.srcdir, src), path.join(self.outdir, dest)) logger.info('') @@ -225,7 +225,7 @@ class LaTeXBuilder(Builder): if self.config.latex_additional_files: logger.info(bold('copying additional files...'), nonl=1) for filename in self.config.latex_additional_files: - logger.info(' '+filename, nonl=1) + logger.info(' ' + filename, nonl=1) copy_asset_file(path.join(self.confdir, filename), self.outdir) logger.info('') diff --git a/sphinx/builders/qthelp.py b/sphinx/builders/qthelp.py index 4223e2ec9..27178676f 100644 --- a/sphinx/builders/qthelp.py +++ b/sphinx/builders/qthelp.py @@ -203,7 +203,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder): nspace = nspace.lower() # write the project file - with codecs.open(path.join(outdir, outname + '.qhp'), 'w', 'utf-8') as f: # type: ignore + with codecs.open(path.join(outdir, outname + '.qhp'), 'w', 'utf-8') as f: # type: ignore # NOQA f.write(project_template % { # type: ignore 'outname': htmlescape(outname), 'title': htmlescape(self.config.html_title), From d9ae4ed81eb894921b23b0bb0771e0efde7350c8 Mon Sep 17 00:00:00 2001 From: Gerald Baier <gerald.baier@tum.de> Date: Thu, 12 Jan 2017 08:52:32 +0100 Subject: [PATCH 105/190] implementing PR comments --- sphinx/apidoc.py | 4 ++-- tests/test_apidoc.py | 5 ----- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/sphinx/apidoc.py b/sphinx/apidoc.py index 89d59da16..87a9f3fb3 100644 --- a/sphinx/apidoc.py +++ b/sphinx/apidoc.py @@ -410,8 +410,8 @@ Note: By default this script will not overwrite already created files.""") module_path = rootpath, append_syspath = opts.append_syspath, ) - enabled_exts = {'ext_'+ext: getattr(opts, 'ext_'+ext) - for ext in EXTENSIONS if getattr(opts, 'ext_'+ext)} + enabled_exts = {'ext_' + ext: getattr(opts, 'ext_' + ext) + for ext in EXTENSIONS if getattr(opts, 'ext_' + ext)} d.update(enabled_exts) if isinstance(opts.header, binary_type): diff --git a/tests/test_apidoc.py b/tests/test_apidoc.py index 49788123d..7e6c4fd0a 100644 --- a/tests/test_apidoc.py +++ b/tests/test_apidoc.py @@ -158,8 +158,3 @@ def test_extension_parsed(make_app, apidoc): with open(outdir / 'conf.py') as f: rst = f.read() assert "sphinx.ext.mathjax" in rst - - app = make_app('text', srcdir=outdir) - app.build() - print(app._status.getvalue()) - print(app._warning.getvalue()) From a2cddfeac990b0ab472b304f86a8986ec41c9019 Mon Sep 17 00:00:00 2001 From: Gerald Baier <gerald.baier@tum.de> Date: Thu, 12 Jan 2017 15:15:40 +0100 Subject: [PATCH 106/190] moved import to top of file --- sphinx/apidoc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/apidoc.py b/sphinx/apidoc.py index 87a9f3fb3..82b1e7db6 100644 --- a/sphinx/apidoc.py +++ b/sphinx/apidoc.py @@ -25,6 +25,7 @@ from fnmatch import fnmatch from sphinx.util.osutil import FileAvoidWrite, walk from sphinx import __display_version__ +from sphinx.quickstart import EXTENSIONS if False: # For type annotation @@ -346,7 +347,6 @@ Note: By default this script will not overwrite already created files.""") 'defaults to --doc-version') parser.add_option('--version', action='store_true', dest='show_version', help='Show version information and exit') - from sphinx.quickstart import EXTENSIONS group = parser.add_option_group('Extension options') for ext in EXTENSIONS: group.add_option('--ext-' + ext, action='store_true', From 160b9a5fd08cf32ae313a7565303a92a434b424d Mon Sep 17 00:00:00 2001 From: Marco Buttu <marco.buttu@gmail.com> Date: Fri, 13 Jan 2017 22:36:17 +0100 Subject: [PATCH 107/190] Added pyversion option for doctest (issue 3303) --- sphinx/ext/doctest.py | 38 +++++++++++++++++++++++++++- tests/roots/test-doctest/doctest.txt | 16 +++++++++++- 2 files changed, 52 insertions(+), 2 deletions(-) diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index c355415f7..b71fa2569 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -15,6 +15,7 @@ import re import sys import time import codecs +import platform from os import path import doctest @@ -23,6 +24,8 @@ from six import itervalues, StringIO, binary_type, text_type, PY2 from docutils import nodes from docutils.parsers.rst import Directive, directives +from distutils.version import StrictVersion as V + import sphinx from sphinx.builders import Builder from sphinx.util import force_decode, logging @@ -103,10 +106,40 @@ class TestDirective(Directive): for option in option_strings: if (option[0] not in '+-' or option[1:] not in doctest.OPTIONFLAGS_BY_NAME): # type: ignore - # XXX warn? + self.state.document.reporter.warning( + "missing '+' or '-' in '%s' option." % option, + line=self.lineno) continue flag = doctest.OPTIONFLAGS_BY_NAME[option[1:]] # type: ignore node['options'][flag] = (option[0] == '+') + if self.name == 'doctest' and 'pyversion' in self.options: + try: + option = self.options['pyversion'] + option_strings = option.split() + # :pyversion: >= 3.6 --> op='>=', version='3.6' + operand, version = [item.strip() for item in option_strings] + operands = ('<=', '<', '==', '>=', '>') + if operand not in operands: + self.state.document.reporter.warning( + "'%s' is not a valid pyversion operand.\n" + "Avaliable operands: %s" % (operand, operands), + line=self.lineno) + else: + rv = V(platform.python_version()) # Running version + sv = V(version) # Specified version + skip = ((operand == '<=' and not (rv <= sv)) or + (operand == '<' and not (rv < sv)) or + (operand == '==' and not (rv == sv)) or + (operand == '>=' and not (rv >= sv)) or + (operand == '>' and not (rv > sv))) + if skip: + flag = doctest.OPTIONFLAGS_BY_NAME['SKIP'] + node['options'][flag] = True + except ValueError: + self.state.document.reporter.warning( + "'%s' is not a valid pyversion value" % option, + line=self.lineno) + return [node] @@ -122,12 +155,14 @@ class DoctestDirective(TestDirective): option_spec = { 'hide': directives.flag, 'options': directives.unchanged, + 'pyversion': directives.unchanged_required, } class TestcodeDirective(TestDirective): option_spec = { 'hide': directives.flag, + 'pyversion': directives.unchanged_required, } @@ -135,6 +170,7 @@ class TestoutputDirective(TestDirective): option_spec = { 'hide': directives.flag, 'options': directives.unchanged, + 'pyversion': directives.unchanged_required, } diff --git a/tests/roots/test-doctest/doctest.txt b/tests/roots/test-doctest/doctest.txt index 053601f3c..e45bc2721 100644 --- a/tests/roots/test-doctest/doctest.txt +++ b/tests/roots/test-doctest/doctest.txt @@ -69,7 +69,7 @@ Special directives >>> squared(2) 4 -* options for testcode/testoutput blocks +* options for doctest/testcode/testoutput blocks .. testcode:: :hide: @@ -82,6 +82,20 @@ Special directives Output text. + .. doctest:: + :pyversion: >= 2.0 + + >>> a = 3 + >>> a + 3 + + .. doctest:: + :pyversion: < 2.0 + + >>> a = 3 + >>> a + 4 + * grouping .. testsetup:: group1 From e3cb2bbcf18d301400835d0778c81a15cfd0da99 Mon Sep 17 00:00:00 2001 From: Jakob Lykke Andersen <Jakob@caput.dk> Date: Sun, 15 Jan 2017 00:52:08 +0900 Subject: [PATCH 108/190] C++, at option tparam-line-spec for templates. Make it possible to render template parameters on separate lines. --- doc/domains.rst | 14 ++++++ sphinx/domains/cpp.py | 97 +++++++++++++++++++--------------------- tests/test_domain_cpp.py | 2 +- 3 files changed, 62 insertions(+), 51 deletions(-) diff --git a/doc/domains.rst b/doc/domains.rst index 43395eac3..5236b3205 100644 --- a/doc/domains.rst +++ b/doc/domains.rst @@ -545,6 +545,10 @@ The C++ Domain The C++ domain (name **cpp**) supports documenting C++ projects. + +Directives +~~~~~~~~~~ + The following directives are available. All declarations can start with a visibility statement (``public``, ``private`` or ``protected``). @@ -740,6 +744,16 @@ a visibility statement (``public``, ``private`` or ``protected``). Holder of elements, to which it can provide access via :cpp:concept:`Iterator` s. +Options +....... + +Some directives support options: + +- ``:noindex:``, see :ref:`basic-domain-markup`. +- ``:tparam-line-spec:``, for templated declarations. + If specified, each template parameter will be rendered on a separate line. + + Constrained Templates ~~~~~~~~~~~~~~~~~~~~~ diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py index 181ed3f78..39430345e 100644 --- a/sphinx/domains/cpp.py +++ b/sphinx/domains/cpp.py @@ -15,7 +15,7 @@ from copy import deepcopy from six import iteritems, text_type from docutils import nodes -from docutils.parsers.rst import Directive +from docutils.parsers.rst import Directive, directives from sphinx import addnodes from sphinx.roles import XRefRole @@ -53,13 +53,17 @@ logger = logging.getLogger(__name__) the index. All of the versions should work as permalinks. - Tagnames + Signature Nodes and Tagnames ---------------------------------------------------------------------------- - Each desc_signature node will have the attribute 'sphinx_cpp_tagname' set to - - 'templateParams', if the line is on the form 'template<...>', - - 'templateIntroduction, if the line is on the form 'conceptName{...}' + Each signature is in a desc_signature node, where all children are + desc_signature_line nodes. Each of these lines will have the attribute + 'sphinx_cpp_tagname' set to one of the following (prioritized): - 'declarator', if the line contains the name of the declared object. + - 'templateParams', if the line starts a template parameter list, + - 'templateParams', if the line has template parameters + Note: such lines might get a new tag in the future. + - 'templateIntroduction, if the line is on the form 'conceptName{...}' No other desc_signature nodes should exist (so far). @@ -892,6 +896,7 @@ class ASTTemplateParams(ASTBase): # type: (Any) -> None assert params is not None self.params = params + self.isNested = False # whether it's a template template param def get_id_v2(self): # type: () -> unicode @@ -910,17 +915,30 @@ class ASTTemplateParams(ASTBase): res.append(u"> ") return ''.join(res) - def describe_signature(self, signode, mode, env, symbol): + def describe_signature(self, parentNode, mode, env, symbol, lineSpec=None): # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None - signode.sphinx_cpp_tagname = 'templateParams' - signode += nodes.Text("template<") + # 'lineSpec' is defaulted becuase of template template parameters + def makeLine(parentNode=parentNode): + signode = addnodes.desc_signature_line() + parentNode += signode + signode.sphinx_cpp_tagname = 'templateParams' + return signode + if self.isNested: + lineNode = parentNode + else: + lineNode = makeLine() + lineNode += nodes.Text("template<") first = True for param in self.params: if not first: - signode += nodes.Text(", ") + lineNode += nodes.Text(", ") first = False - param.describe_signature(signode, mode, env, symbol) - signode += nodes.Text(">") + if lineSpec: + lineNode = makeLine() + param.describe_signature(lineNode, mode, env, symbol) + if lineSpec and not first: + lineNode = makeLine() + lineNode += nodes.Text(">") class ASTTemplateIntroductionParameter(ASTBase): @@ -1005,8 +1023,11 @@ class ASTTemplateIntroduction(ASTBase): res.append('} ') return ''.join(res) - def describe_signature(self, signode, mode, env, symbol): + def describe_signature(self, parentNode, mode, env, symbol, lineSpec): # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None + # Note: 'lineSpec' has no effect on template introductions. + signode = addnodes.desc_signature_line() + parentNode += signode signode.sphinx_cpp_tagname = 'templateIntroduction' self.concept.describe_signature(signode, 'markType', env, symbol) signode += nodes.Text('{') @@ -1043,13 +1064,11 @@ class ASTTemplateDeclarationPrefix(ASTBase): res.append(text_type(t)) return u''.join(res) - def describe_signature(self, signode, mode, env, symbol): + def describe_signature(self, signode, mode, env, symbol, lineSpec): # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None _verify_description_mode(mode) for t in self.templates: - templateNode = addnodes.desc_signature_line() - t.describe_signature(templateNode, 'lastIsName', env, symbol) - signode += templateNode + t.describe_signature(signode, 'lastIsName', env, symbol, lineSpec) class ASTOperatorBuildIn(ASTBase): @@ -2722,7 +2741,7 @@ class ASTDeclaration(ASTBase): res.append(text_type(self.declaration)) return u''.join(res) - def describe_signature(self, signode, mode, env): + def describe_signature(self, signode, mode, env, options): # type: (addnodes.desc_signature, unicode, BuildEnvironment) -> None _verify_description_mode(mode) # The caller of the domain added a desc_signature node. @@ -2736,7 +2755,8 @@ class ASTDeclaration(ASTBase): assert self.symbol if self.templatePrefix: self.templatePrefix.describe_signature(signode, mode, env, - symbol=self.symbol) + symbol=self.symbol, + lineSpec=options.get('tparam-line-spec')) signode += mainDeclNode if self.visibility and self.visibility != "public": mainDeclNode += addnodes.desc_annotation(self.visibility + " ", @@ -4170,6 +4190,7 @@ class DefinitionParser(object): if self.skip_word('template'): # declare a tenplate template parameter nestedParams = self._parse_template_parameter_list() + nestedParams.isNested = True else: nestedParams = None self.skip_ws() @@ -4420,6 +4441,9 @@ class CPPObject(ObjectDescription): names=('returns', 'return')), ] + option_spec = dict(ObjectDescription.option_spec) + option_spec['tparam-line-spec'] = directives.flag + def warn(self, msg): # type: (unicode) -> None self.state_machine.reporter.warning(msg, line=self.lineno) @@ -4517,9 +4541,9 @@ class CPPObject(ObjectDescription): # type: (Any) -> Any raise NotImplementedError() - def describe_signature(self, signode, ast, parentScope): - # type: (addnodes.desc_signature, Any, Any) -> None - raise NotImplementedError() + def describe_signature(self, signode, ast, options): # type: ignore + # type: (addnodes.desc_signature, Any) -> None + ast.describe_signature(signode, 'lastIsName', self.env, options) def handle_signature(self, sig, signode): # type: (unicode, addnodes.desc_signature) -> Any @@ -4552,7 +4576,8 @@ class CPPObject(ObjectDescription): if ast.objectType == 'enumerator': self._add_enumerator_to_parent(ast) - self.describe_signature(signode, ast) + self.options['tparam-line-spec'] = 'tparam-line-spec' in self.options + self.describe_signature(signode, ast, self.options) return ast def before_content(self): @@ -4576,10 +4601,6 @@ class CPPTypeObject(CPPObject): # type: (Any) -> Any return parser.parse_declaration("type") - def describe_signature(self, signode, ast): # type: ignore - # type: (addnodes.desc_signature, Any) -> None - ast.describe_signature(signode, 'lastIsName', self.env) - class CPPConceptObject(CPPObject): def get_index_text(self, name): @@ -4590,10 +4611,6 @@ class CPPConceptObject(CPPObject): # type: (Any) -> Any return parser.parse_declaration("concept") - def describe_signature(self, signode, ast): # type: ignore - # type: (addnodes.desc_signature, Any) -> None - ast.describe_signature(signode, 'lastIsName', self.env) - class CPPMemberObject(CPPObject): def get_index_text(self, name): @@ -4604,10 +4621,6 @@ class CPPMemberObject(CPPObject): # type: (Any) -> Any return parser.parse_declaration("member") - def describe_signature(self, signode, ast): # type: ignore - # type: (addnodes.desc_signature, Any) -> None - ast.describe_signature(signode, 'lastIsName', self.env) - class CPPFunctionObject(CPPObject): def get_index_text(self, name): @@ -4618,10 +4631,6 @@ class CPPFunctionObject(CPPObject): # type: (Any) -> Any return parser.parse_declaration("function") - def describe_signature(self, signode, ast): # type: ignore - # type: (addnodes.desc_signature, Any) -> None - ast.describe_signature(signode, 'lastIsName', self.env) - class CPPClassObject(CPPObject): def get_index_text(self, name): @@ -4632,10 +4641,6 @@ class CPPClassObject(CPPObject): # type: (Any) -> Any return parser.parse_declaration("class") - def describe_signature(self, signode, ast): # type: ignore - # type: (addnodes.desc_signature, Any) -> None - ast.describe_signature(signode, 'lastIsName', self.env) - class CPPEnumObject(CPPObject): def get_index_text(self, name): @@ -4656,10 +4661,6 @@ class CPPEnumObject(CPPObject): assert False return ast - def describe_signature(self, signode, ast): # type: ignore - # type: (addnodes.desc_signature, Any) -> None - ast.describe_signature(signode, 'lastIsName', self.env) - class CPPEnumeratorObject(CPPObject): def get_index_text(self, name): @@ -4670,10 +4671,6 @@ class CPPEnumeratorObject(CPPObject): # type: (Any) -> Any return parser.parse_declaration("enumerator") - def describe_signature(self, signode, ast): # type: ignore - # type: (addnodes.desc_signature, Any) -> None - ast.describe_signature(signode, 'lastIsName', self.env) - class CPPNamespaceObject(Directive): """ diff --git a/tests/test_domain_cpp.py b/tests/test_domain_cpp.py index 58e6093ba..64356f0aa 100644 --- a/tests/test_domain_cpp.py +++ b/tests/test_domain_cpp.py @@ -56,7 +56,7 @@ def check(name, input, idv1output=None, idv2output=None, output=None): parentNode = addnodes.desc() signode = addnodes.desc_signature(input, '') parentNode += signode - ast.describe_signature(signode, 'lastIsName', symbol) + ast.describe_signature(signode, 'lastIsName', symbol, options={}) if idv2output: idv2output = "_CPPv2" + idv2output From 8c894fbdd95db2eed9eb249cdf86ee38bcf812db Mon Sep 17 00:00:00 2001 From: Jakob Lykke Andersen <Jakob@caput.dk> Date: Sun, 15 Jan 2017 01:00:01 +0900 Subject: [PATCH 109/190] Update changes --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 91b60217f..51cdad3c5 100644 --- a/CHANGES +++ b/CHANGES @@ -38,6 +38,8 @@ Features added * #3136: Add ``:name:`` option to the directives in ``sphinx.ext.graphviz`` * #2336: Add ``imported_members`` option to ``sphinx-autogen`` command to document imported members. +* C++, add ``:tparam-line-spec:`` option to templated declarations. + When specified, each template parameter will be rendered on a separate line. Bugs fixed ---------- From 19298dec337d20526702109e3eeb7629dccd1d50 Mon Sep 17 00:00:00 2001 From: Marco Buttu <marco.buttu@gmail.com> Date: Sat, 14 Jan 2017 18:57:19 +0100 Subject: [PATCH 110/190] Added pyversion option for doctest (issue 3303) --- sphinx/ext/doctest.py | 58 +++++++++++++++++++++++---------------- tests/test_ext_doctest.py | 19 +++++++++++++ 2 files changed, 53 insertions(+), 24 deletions(-) diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index b71fa2569..f645283a2 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -20,12 +20,11 @@ from os import path import doctest from six import itervalues, StringIO, binary_type, text_type, PY2 +from distutils.version import StrictVersion from docutils import nodes from docutils.parsers.rst import Directive, directives -from distutils.version import StrictVersion as V - import sphinx from sphinx.builders import Builder from sphinx.util import force_decode, logging @@ -107,8 +106,8 @@ class TestDirective(Directive): if (option[0] not in '+-' or option[1:] not in doctest.OPTIONFLAGS_BY_NAME): # type: ignore self.state.document.reporter.warning( - "missing '+' or '-' in '%s' option." % option, - line=self.lineno) + "missing '+' or '-' in '%s' option." % option, + line=self.lineno) continue flag = doctest.OPTIONFLAGS_BY_NAME[option[1:]] # type: ignore node['options'][flag] = (option[0] == '+') @@ -116,32 +115,43 @@ class TestDirective(Directive): try: option = self.options['pyversion'] option_strings = option.split() - # :pyversion: >= 3.6 --> op='>=', version='3.6' + # :pyversion: >= 3.6 --> operand='>=', version='3.6' operand, version = [item.strip() for item in option_strings] - operands = ('<=', '<', '==', '>=', '>') - if operand not in operands: - self.state.document.reporter.warning( - "'%s' is not a valid pyversion operand.\n" - "Avaliable operands: %s" % (operand, operands), - line=self.lineno) - else: - rv = V(platform.python_version()) # Running version - sv = V(version) # Specified version - skip = ((operand == '<=' and not (rv <= sv)) or - (operand == '<' and not (rv < sv)) or - (operand == '==' and not (rv == sv)) or - (operand == '>=' and not (rv >= sv)) or - (operand == '>' and not (rv > sv))) - if skip: - flag = doctest.OPTIONFLAGS_BY_NAME['SKIP'] - node['options'][flag] = True + if not self.proper_pyversion(operand, version): + flag = doctest.OPTIONFLAGS_BY_NAME['SKIP'] + node['options'][flag] = True # Skip the test except ValueError: self.state.document.reporter.warning( - "'%s' is not a valid pyversion value" % option, - line=self.lineno) + "'%s' is not a valid pyversion value" % option, + line=self.lineno) return [node] + def proper_pyversion(self, operand, version): + """Compare `version` to the Python version, relying on `operand`. + + This function is meant to be used to evaluate the doctest :pyversion: + option. For instance, if the doctest directive provides the option + :pyversion: >= 3.3, then we have to check if the running Python version + is greather or equal to 3.3. In that case, operand will be the string + '>=' and version the string '3.3'. proper_pyversion() will return True + if the running Python version is >= 3.3, False otherwise. + """ + operands = ('<=', '<', '==', '>=', '>') + if operand not in operands: + self.document.reporter.warning( + "'%s' is not a valid pyversion operand.\n" + "Avaliable operands: %s" % (operand, operands), + line=self.lineno) + return True # Be defensive, making the doctest to be executed + rv = StrictVersion(platform.python_version()) # Running version + sv = StrictVersion(version) # Specified version + return ((operand == '<=' and (rv <= sv)) or + (operand == '<' and (rv < sv)) or + (operand == '==' and (rv == sv)) or + (operand == '>=' and (rv >= sv)) or + (operand == '>' and (rv > sv))) + class TestsetupDirective(TestDirective): option_spec = {} # type: Dict diff --git a/tests/test_ext_doctest.py b/tests/test_ext_doctest.py index 6b17f2ed7..34ca3d027 100644 --- a/tests/test_ext_doctest.py +++ b/tests/test_ext_doctest.py @@ -8,7 +8,9 @@ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ +import platform import pytest +from sphinx.ext.doctest import TestDirective as _TestDirective cleanup_called = 0 @@ -25,6 +27,23 @@ def test_build(app, status, warning): assert cleanup_called == 3, 'testcleanup did not get executed enough times' +def test_pyversion(monkeypatch): + def python_version(): + return '3.3' + monkeypatch.setattr(platform, 'python_version', python_version) + td = _TestDirective(*([None] * 9)) + assert td.proper_pyversion('<', '3.4') is True + assert td.proper_pyversion('<', '3.2') is False + assert td.proper_pyversion('<=', '3.4') is True + assert td.proper_pyversion('<=', '3.3') is True + assert td.proper_pyversion('==', '3.3') is True + assert td.proper_pyversion('>=', '3.4') is False + assert td.proper_pyversion('>=', '3.2') is True + assert td.proper_pyversion('>', '3.4') is False + assert td.proper_pyversion('>', '3.2') is True + assert td.proper_pyversion('>', '3.3a0') is True + + def cleanup_call(): global cleanup_called cleanup_called += 1 From 960f889a2ccce408216bb3e16afdc7309fba20a6 Mon Sep 17 00:00:00 2001 From: Marco Buttu <marco.buttu@gmail.com> Date: Sun, 15 Jan 2017 10:27:34 +0100 Subject: [PATCH 111/190] Added pyversion option for doctest (issue 3303) Code, tests and documentation. --- doc/ext/doctest.rst | 13 ++++++++++++- sphinx/ext/doctest.py | 2 +- tests/test_ext_doctest.py | 4 +--- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/doc/ext/doctest.rst b/doc/ext/doctest.rst index 818b86007..c1cba088a 100644 --- a/doc/ext/doctest.rst +++ b/doc/ext/doctest.rst @@ -63,7 +63,7 @@ a comma-separated list of group names. default set of flags is specified by the :confval:`doctest_default_flags` configuration variable. - This directive supports two options: + This directive supports three options: * ``hide``, a flag option, hides the doctest block in other builders. By default it is shown as a highlighted doctest block. @@ -73,6 +73,17 @@ a comma-separated list of group names. explicit flags per example, with doctest comments, but they will show up in other builders too.) + * ``pyversion``, a string option, can be used to specify the required Python + version for the example to be tested. For instance, in the following case + the example will be tested only for Python versions greather than 3.3:: + + .. doctest:: + :pyversion: > 3.3 + + The supported operands are ``<``, ``<=``, ``==``, ``>=``, and ``>``. + + .. versionadded:: 1.5 + Note that like with standard doctests, you have to use ``<BLANKLINE>`` to signal a blank line in the expected output. The ``<BLANKLINE>`` is removed when building presentation output (HTML, LaTeX etc.). diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index f645283a2..273305884 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -133,7 +133,7 @@ class TestDirective(Directive): This function is meant to be used to evaluate the doctest :pyversion: option. For instance, if the doctest directive provides the option :pyversion: >= 3.3, then we have to check if the running Python version - is greather or equal to 3.3. In that case, operand will be the string + is greather or equal than 3.3. In that case, operand will be the string '>=' and version the string '3.3'. proper_pyversion() will return True if the running Python version is >= 3.3, False otherwise. """ diff --git a/tests/test_ext_doctest.py b/tests/test_ext_doctest.py index 34ca3d027..887fa4601 100644 --- a/tests/test_ext_doctest.py +++ b/tests/test_ext_doctest.py @@ -28,9 +28,7 @@ def test_build(app, status, warning): def test_pyversion(monkeypatch): - def python_version(): - return '3.3' - monkeypatch.setattr(platform, 'python_version', python_version) + monkeypatch.setattr(platform, 'python_version', lambda: '3.3') td = _TestDirective(*([None] * 9)) assert td.proper_pyversion('<', '3.4') is True assert td.proper_pyversion('<', '3.2') is False From 44732c6aea2bb38370a61a61478a07ab30c459f2 Mon Sep 17 00:00:00 2001 From: Marco Buttu <marco.buttu@gmail.com> Date: Sun, 15 Jan 2017 21:07:44 +0100 Subject: [PATCH 112/190] compare_version() replaces proper_pyversion() --- doc/ext/doctest.rst | 4 +-- sphinx/ext/doctest.py | 59 +++++++++++++++++++-------------------- tests/test_ext_doctest.py | 28 +++++++++---------- 3 files changed, 44 insertions(+), 47 deletions(-) diff --git a/doc/ext/doctest.rst b/doc/ext/doctest.rst index c1cba088a..9ee9c1cff 100644 --- a/doc/ext/doctest.rst +++ b/doc/ext/doctest.rst @@ -79,9 +79,9 @@ a comma-separated list of group names. .. doctest:: :pyversion: > 3.3 - + The supported operands are ``<``, ``<=``, ``==``, ``>=``, and ``>``. - + .. versionadded:: 1.5 Note that like with standard doctests, you have to use ``<BLANKLINE>`` to diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index 273305884..93aff532b 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -56,6 +56,29 @@ else: return text +def compare_version(ver1, ver2, operand): + """Compare `ver1` to `ver2`, relying on `operand`. + + Some examples: + + >>> compare_version('3.3', '3.5', '<=') + True + >>> compare_version('3.3', '3.2', '<=') + False + >>> compare_version('3.3a0', '3.3', '<=') + True + """ + if operand not in ('<=', '<', '==', '>=', '>'): + raise ValueError("'%s' is not a valid operand.") + v1 = StrictVersion(ver1) + v2 = StrictVersion(ver2) + return ((operand == '<=' and (v1 <= v2)) or + (operand == '<' and (v1 < v2)) or + (operand == '==' and (v1 == v2)) or + (operand == '>=' and (v1 >= v2)) or + (operand == '>' and (v1 > v2))) + + # set up the necessary directives class TestDirective(Directive): @@ -114,44 +137,18 @@ class TestDirective(Directive): if self.name == 'doctest' and 'pyversion' in self.options: try: option = self.options['pyversion'] - option_strings = option.split() - # :pyversion: >= 3.6 --> operand='>=', version='3.6' - operand, version = [item.strip() for item in option_strings] - if not self.proper_pyversion(operand, version): + # :pyversion: >= 3.6 --> operand='>=', option_version='3.6' + operand, option_version = [item.strip() for item in option.split()] + running_version = platform.python_version() + if not compare_version(running_version, option_version, operand): flag = doctest.OPTIONFLAGS_BY_NAME['SKIP'] node['options'][flag] = True # Skip the test except ValueError: self.state.document.reporter.warning( - "'%s' is not a valid pyversion value" % option, + "'%s' is not a valid pyversion option" % option, line=self.lineno) - return [node] - def proper_pyversion(self, operand, version): - """Compare `version` to the Python version, relying on `operand`. - - This function is meant to be used to evaluate the doctest :pyversion: - option. For instance, if the doctest directive provides the option - :pyversion: >= 3.3, then we have to check if the running Python version - is greather or equal than 3.3. In that case, operand will be the string - '>=' and version the string '3.3'. proper_pyversion() will return True - if the running Python version is >= 3.3, False otherwise. - """ - operands = ('<=', '<', '==', '>=', '>') - if operand not in operands: - self.document.reporter.warning( - "'%s' is not a valid pyversion operand.\n" - "Avaliable operands: %s" % (operand, operands), - line=self.lineno) - return True # Be defensive, making the doctest to be executed - rv = StrictVersion(platform.python_version()) # Running version - sv = StrictVersion(version) # Specified version - return ((operand == '<=' and (rv <= sv)) or - (operand == '<' and (rv < sv)) or - (operand == '==' and (rv == sv)) or - (operand == '>=' and (rv >= sv)) or - (operand == '>' and (rv > sv))) - class TestsetupDirective(TestDirective): option_spec = {} # type: Dict diff --git a/tests/test_ext_doctest.py b/tests/test_ext_doctest.py index 887fa4601..213663a2b 100644 --- a/tests/test_ext_doctest.py +++ b/tests/test_ext_doctest.py @@ -10,7 +10,7 @@ """ import platform import pytest -from sphinx.ext.doctest import TestDirective as _TestDirective +from sphinx.ext.doctest import compare_version cleanup_called = 0 @@ -27,19 +27,19 @@ def test_build(app, status, warning): assert cleanup_called == 3, 'testcleanup did not get executed enough times' -def test_pyversion(monkeypatch): - monkeypatch.setattr(platform, 'python_version', lambda: '3.3') - td = _TestDirective(*([None] * 9)) - assert td.proper_pyversion('<', '3.4') is True - assert td.proper_pyversion('<', '3.2') is False - assert td.proper_pyversion('<=', '3.4') is True - assert td.proper_pyversion('<=', '3.3') is True - assert td.proper_pyversion('==', '3.3') is True - assert td.proper_pyversion('>=', '3.4') is False - assert td.proper_pyversion('>=', '3.2') is True - assert td.proper_pyversion('>', '3.4') is False - assert td.proper_pyversion('>', '3.2') is True - assert td.proper_pyversion('>', '3.3a0') is True +def test_compare_version(): + assert compare_version('3.3', '3.4', '<') is True + assert compare_version('3.3', '3.2', '<') is False + assert compare_version('3.3', '3.4', '<=') is True + assert compare_version('3.3', '3.2', '<=') is False + assert compare_version('3.3', '3.3', '==') is True + assert compare_version('3.3', '3.4', '==') is False + assert compare_version('3.3', '3.2', '>=') is True + assert compare_version('3.3', '3.4', '>=') is False + assert compare_version('3.3', '3.2', '>') is True + assert compare_version('3.3', '3.4', '>') is False + with pytest.raises(ValueError): + compare_version('3.3', '3.4', '+') def cleanup_call(): From e7166bbc08ff0d8572763364685993b7ec48f6e1 Mon Sep 17 00:00:00 2001 From: Marco Buttu <marco.buttu@gmail.com> Date: Sun, 15 Jan 2017 21:13:34 +0100 Subject: [PATCH 113/190] Removed useless import --- tests/test_ext_doctest.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_ext_doctest.py b/tests/test_ext_doctest.py index 213663a2b..10f51a133 100644 --- a/tests/test_ext_doctest.py +++ b/tests/test_ext_doctest.py @@ -8,7 +8,6 @@ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ -import platform import pytest from sphinx.ext.doctest import compare_version From df1c46974de497ba66323f3be2f6b61474127b0f Mon Sep 17 00:00:00 2001 From: Marco Buttu <marco.buttu@gmail.com> Date: Sun, 15 Jan 2017 22:09:15 +0100 Subject: [PATCH 114/190] Comparison made by distutils.version.LooseVersion --- doc/ext/doctest.rst | 4 +++- sphinx/ext/doctest.py | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/ext/doctest.rst b/doc/ext/doctest.rst index 9ee9c1cff..95336df8b 100644 --- a/doc/ext/doctest.rst +++ b/doc/ext/doctest.rst @@ -80,7 +80,9 @@ a comma-separated list of group names. .. doctest:: :pyversion: > 3.3 - The supported operands are ``<``, ``<=``, ``==``, ``>=``, and ``>``. + The supported operands are ``<``, ``<=``, ``==``, ``>=``, ``>``, and + comparison is performed by `distutils.version.LooseVersion + <https://www.python.org/dev/peps/pep-0386/#distutils>`__. .. versionadded:: 1.5 diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index 93aff532b..3e029c19e 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -20,7 +20,7 @@ from os import path import doctest from six import itervalues, StringIO, binary_type, text_type, PY2 -from distutils.version import StrictVersion +from distutils.version import LooseVersion from docutils import nodes from docutils.parsers.rst import Directive, directives @@ -70,8 +70,8 @@ def compare_version(ver1, ver2, operand): """ if operand not in ('<=', '<', '==', '>=', '>'): raise ValueError("'%s' is not a valid operand.") - v1 = StrictVersion(ver1) - v2 = StrictVersion(ver2) + v1 = LooseVersion(ver1) + v2 = LooseVersion(ver2) return ((operand == '<=' and (v1 <= v2)) or (operand == '<' and (v1 < v2)) or (operand == '==' and (v1 == v2)) or From e32eb4495985c588deacc7e30ce4721f89a6d629 Mon Sep 17 00:00:00 2001 From: Marco Buttu <marco.buttu@gmail.com> Date: Mon, 16 Jan 2017 10:53:46 +0100 Subject: [PATCH 115/190] Fixed flake8 style check --- doc/ext/doctest.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/ext/doctest.rst b/doc/ext/doctest.rst index 95336df8b..47331dea2 100644 --- a/doc/ext/doctest.rst +++ b/doc/ext/doctest.rst @@ -79,7 +79,7 @@ a comma-separated list of group names. .. doctest:: :pyversion: > 3.3 - + The supported operands are ``<``, ``<=``, ``==``, ``>=``, ``>``, and comparison is performed by `distutils.version.LooseVersion <https://www.python.org/dev/peps/pep-0386/#distutils>`__. From 7bf4f76c3befbd52b2b0397527023d455a649fa6 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 17 Nov 2016 20:24:58 +0900 Subject: [PATCH 116/190] Add __iter__(), add() and filter() to Config class --- sphinx/application.py | 4 ++-- sphinx/builders/html.py | 4 +--- sphinx/config.py | 20 +++++++++++++++++++- sphinx/environment/__init__.py | 6 ++---- sphinx/ext/ifconfig.py | 2 +- 5 files changed, 25 insertions(+), 11 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index 6ea7b059f..c1ffe505d 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -582,11 +582,11 @@ class Sphinx(object): # type: (unicode, Any, Union[bool, unicode], Any) -> None logger.debug('[app] adding config value: %r', (name, default, rebuild) + ((types,) if types else ())) # type: ignore - if name in self.config.values: + if name in self.config: raise ExtensionError('Config value %r already present' % name) if rebuild in (False, True): rebuild = rebuild and 'env' or '' - self.config.values[name] = (default, rebuild, types) + self.config.add(name, default, rebuild, types) def add_event(self, name): # type: (unicode) -> None diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index f7fe53cac..c76bc8543 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -190,9 +190,7 @@ class StandaloneHTMLBuilder(Builder): def get_outdated_docs(self): # type: ignore # type: () -> Iterator[unicode] - cfgdict = dict((name, self.config[name]) - for (name, desc) in iteritems(self.config.values) - if desc[1] == 'html') + cfgdict = dict((confval.name, confval.value) for confval in self.config.filter('html')) self.config_hash = get_stable_hash(cfgdict) self.tags_hash = get_stable_hash(sorted(self.tags)) # type: ignore old_config_hash = old_tags_hash = '' diff --git a/sphinx/config.py b/sphinx/config.py index c55660a5c..7297fb047 100644 --- a/sphinx/config.py +++ b/sphinx/config.py @@ -13,6 +13,7 @@ import re from os import path, getenv from six import PY2, PY3, iteritems, string_types, binary_type, text_type, integer_types +from typing import Any, NamedTuple from sphinx.errors import ConfigError from sphinx.locale import l_ @@ -23,7 +24,7 @@ from sphinx.util.pycompat import execfile_, NoneType if False: # For type annotation - from typing import Any, Callable, Tuple # NOQA + from typing import Any, Callable, Generator, Iterator, Tuple # NOQA from sphinx.util.tags import Tags # NOQA logger = logging.getLogger(__name__) @@ -43,6 +44,10 @@ CONFIG_PERMITTED_TYPE_WARNING = "The config value `{name}' has type `{current.__ CONFIG_TYPE_WARNING = "The config value `{name}' has type `{current.__name__}', " \ "defaults to `{default.__name__}'." +ConfigValue = NamedTuple('ConfigValue', [('name', str), + ('value', Any), + ('rebuild', str)]) + class ENUM(object): """represents the config value should be a one of candidates. @@ -307,3 +312,16 @@ class Config(object): def __contains__(self, name): # type: (unicode) -> bool return name in self.values + + def __iter__(self): + # type: () -> Iterator[ConfigValue] + for name, value in iteritems(self.values): + yield ConfigValue(name, getattr(self, name), value[1]) # type: ignore + + def add(self, name, default, rebuild, types): + # type: (str, Any, str, Any) -> None + self.values[name] = (default, rebuild, types) + + def filter(self, rebuild): + # type: (str) -> Iterator[ConfigValue] + return (value for value in self if value.rebuild == rebuild) # type: ignore diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index 8de0a9960..5d139e738 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -533,10 +533,8 @@ class BuildEnvironment(object): else: # check if a config value was changed that affects how # doctrees are read - for key, descr in iteritems(config.values): - if descr[1] != 'env': - continue - if self.config[key] != config[key]: + for confval in config.filter('env'): + if self.config[confval.name] != confval.value: msg = '[config changed] ' config_changed = True break diff --git a/sphinx/ext/ifconfig.py b/sphinx/ext/ifconfig.py index 18504d94e..e0ecda026 100644 --- a/sphinx/ext/ifconfig.py +++ b/sphinx/ext/ifconfig.py @@ -57,7 +57,7 @@ class IfConfig(Directive): def process_ifconfig_nodes(app, doctree, docname): # type: (Sphinx, nodes.Node, unicode) -> None - ns = dict((k, app.config[k]) for k in app.config.values) + ns = dict((confval.name, confval.value) for confval in app.config) ns.update(app.config.__dict__.copy()) ns['builder'] = app.builder.name for node in doctree.traverse(ifconfig): From 491620ee5685e5c509a7b334892ca008bcc4b4a1 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 18 Jan 2017 12:49:17 +0900 Subject: [PATCH 117/190] Fix mypy violations --- sphinx/config.py | 10 +++++----- sphinx/domains/cpp.py | 10 +++++----- sphinx/ext/ifconfig.py | 2 +- sphinx/util/__init__.py | 4 ++-- sphinx/writers/latex.py | 5 +++-- 5 files changed, 16 insertions(+), 15 deletions(-) diff --git a/sphinx/config.py b/sphinx/config.py index 7297fb047..fb73c344c 100644 --- a/sphinx/config.py +++ b/sphinx/config.py @@ -13,7 +13,7 @@ import re from os import path, getenv from six import PY2, PY3, iteritems, string_types, binary_type, text_type, integer_types -from typing import Any, NamedTuple +from typing import Any, NamedTuple, Union from sphinx.errors import ConfigError from sphinx.locale import l_ @@ -24,7 +24,7 @@ from sphinx.util.pycompat import execfile_, NoneType if False: # For type annotation - from typing import Any, Callable, Generator, Iterator, Tuple # NOQA + from typing import Any, Callable, Iterable, Iterator, Tuple # NOQA from sphinx.util.tags import Tags # NOQA logger = logging.getLogger(__name__) @@ -46,7 +46,7 @@ CONFIG_TYPE_WARNING = "The config value `{name}' has type `{current.__name__}', ConfigValue = NamedTuple('ConfigValue', [('name', str), ('value', Any), - ('rebuild', str)]) + ('rebuild', Union[bool, unicode])]) class ENUM(object): @@ -314,12 +314,12 @@ class Config(object): return name in self.values def __iter__(self): - # type: () -> Iterator[ConfigValue] + # type: () -> Iterable[ConfigValue] for name, value in iteritems(self.values): yield ConfigValue(name, getattr(self, name), value[1]) # type: ignore def add(self, name, default, rebuild, types): - # type: (str, Any, str, Any) -> None + # type: (unicode, Any, Union[bool, unicode], Any) -> None self.values[name] = (default, rebuild, types) def filter(self, rebuild): diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py index 0b1e03f5a..aa26df405 100644 --- a/sphinx/domains/cpp.py +++ b/sphinx/domains/cpp.py @@ -916,7 +916,7 @@ class ASTTemplateParams(ASTBase): return ''.join(res) def describe_signature(self, parentNode, mode, env, symbol, lineSpec=None): - # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol, bool) -> None # 'lineSpec' is defaulted becuase of template template parameters def makeLine(parentNode=parentNode): signode = addnodes.desc_signature_line() @@ -1024,7 +1024,7 @@ class ASTTemplateIntroduction(ASTBase): return ''.join(res) def describe_signature(self, parentNode, mode, env, symbol, lineSpec): - # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol, bool) -> None # Note: 'lineSpec' has no effect on template introductions. signode = addnodes.desc_signature_line() parentNode += signode @@ -1065,7 +1065,7 @@ class ASTTemplateDeclarationPrefix(ASTBase): return u''.join(res) def describe_signature(self, signode, mode, env, symbol, lineSpec): - # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol, bool) -> None _verify_description_mode(mode) for t in self.templates: t.describe_signature(signode, 'lastIsName', env, symbol, lineSpec) @@ -2742,7 +2742,7 @@ class ASTDeclaration(ASTBase): return u''.join(res) def describe_signature(self, signode, mode, env, options): - # type: (addnodes.desc_signature, unicode, BuildEnvironment) -> None + # type: (addnodes.desc_signature, unicode, BuildEnvironment, Dict) -> None _verify_description_mode(mode) # The caller of the domain added a desc_signature node. # Always enable multiline: @@ -4545,7 +4545,7 @@ class CPPObject(ObjectDescription): raise NotImplementedError() def describe_signature(self, signode, ast, options): # type: ignore - # type: (addnodes.desc_signature, Any) -> None + # type: (addnodes.desc_signature, Any, Dict) -> None ast.describe_signature(signode, 'lastIsName', self.env, options) def handle_signature(self, sig, signode): diff --git a/sphinx/ext/ifconfig.py b/sphinx/ext/ifconfig.py index e0ecda026..297e476f5 100644 --- a/sphinx/ext/ifconfig.py +++ b/sphinx/ext/ifconfig.py @@ -57,7 +57,7 @@ class IfConfig(Directive): def process_ifconfig_nodes(app, doctree, docname): # type: (Sphinx, nodes.Node, unicode) -> None - ns = dict((confval.name, confval.value) for confval in app.config) + ns = dict((confval.name, confval.value) for confval in app.config) # type: ignore ns.update(app.config.__dict__.copy()) ns['builder'] = app.builder.name for node in doctree.traverse(ifconfig): diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index f2cb2f24b..a5dee1025 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -45,7 +45,7 @@ from sphinx.util.matching import patfilter # noqa if False: # For type annotation - from typing import Any, Callable, Iterable, Iterator, Pattern, Sequence, Tuple # NOQA + from typing import Any, Callable, Iterable, Iterator, Pattern, Sequence, Tuple, Union # NOQA logger = logging.getLogger(__name__) @@ -565,7 +565,7 @@ def old_status_iterator(iterable, summary, color="darkgreen", stringify_func=dis # new version with progress info def status_iterator(iterable, summary, color="darkgreen", length=0, verbosity=0, stringify_func=display_chunk): - # type: (Iterable, unicode, str, int, int, Callable[[Any], unicode]) -> Iterable + # type: (Iterable, unicode, str, int, int, Callable[[Any], unicode]) -> Iterable # NOQA if length == 0: for item in old_status_iterator(iterable, summary, color, stringify_func): yield item diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 72977e288..56182755b 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -1575,8 +1575,9 @@ class LaTeXTranslator(nodes.NodeVisitor): def visit_image(self, node): # type: (nodes.Node) -> None attrs = node.attributes - pre = [] # in reverse order - post = [] + pre = [] # type: List[unicode] + # in reverse order + post = [] # type: List[unicode] if self.in_parsed_literal: pre = ['\\begingroup\\sphinxunactivateextrasandspace\\relax '] post = ['\\endgroup '] From a616c745132b27157912afd3b6ec91472cebf8b5 Mon Sep 17 00:00:00 2001 From: Henrik Alsing Friberg <haf@mosek.com> Date: Thu, 19 Jan 2017 13:45:07 +0100 Subject: [PATCH 118/190] Set 'document' attribute of new node in DocFieldTransformer --- sphinx/util/docfields.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sphinx/util/docfields.py b/sphinx/util/docfields.py index 6bf38ebed..81b8347c6 100644 --- a/sphinx/util/docfields.py +++ b/sphinx/util/docfields.py @@ -299,6 +299,7 @@ class DocFieldTransformer(object): translatable_content = nodes.inline(fieldbody.rawsource, translatable=True) + translatable_content.document = fieldbody.parent.document translatable_content.source = fieldbody.parent.source translatable_content.line = fieldbody.parent.line translatable_content += content From 60278c76133c03a3265fcfa71a2cb3bff4058513 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Fri, 20 Jan 2017 00:01:23 +0900 Subject: [PATCH 119/190] Fix #3353: Fix NameError --- sphinx/config.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sphinx/config.py b/sphinx/config.py index fb73c344c..f5867f40f 100644 --- a/sphinx/config.py +++ b/sphinx/config.py @@ -44,6 +44,9 @@ CONFIG_PERMITTED_TYPE_WARNING = "The config value `{name}' has type `{current.__ CONFIG_TYPE_WARNING = "The config value `{name}' has type `{current.__name__}', " \ "defaults to `{default.__name__}'." +if PY3: + unicode = str # special alias for static typing... + ConfigValue = NamedTuple('ConfigValue', [('name', str), ('value', Any), ('rebuild', Union[bool, unicode])]) From b15868e11b1a3f4d83b1dc0aa64c1ace884d81e3 Mon Sep 17 00:00:00 2001 From: Segev Finer <segev208@gmail.com> Date: Thu, 19 Jan 2017 21:50:38 +0200 Subject: [PATCH 120/190] Allow sphinx.js in a user locale dir to override sphinx.js from Sphinx This is done by simply changing the search order. --- sphinx/builders/html.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index c76bc8543..1d001f3e4 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -143,13 +143,14 @@ class StandaloneHTMLBuilder(Builder): def _get_translations_js(self): # type: () -> unicode - candidates = [path.join(package_dir, 'locale', self.config.language, + candidates = [path.join(dir, self.config.language, + 'LC_MESSAGES', 'sphinx.js') + for dir in self.config.locale_dirs] + \ + [path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js'), path.join(sys.prefix, 'share/sphinx/locale', - self.config.language, 'sphinx.js')] + \ - [path.join(dir, self.config.language, - 'LC_MESSAGES', 'sphinx.js') - for dir in self.config.locale_dirs] + self.config.language, 'sphinx.js')] + for jsfile in candidates: if path.isfile(jsfile): return jsfile From 035ba39c2d8a1bd85106b6450b967b7828e37326 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 21 Jan 2017 21:14:09 +0900 Subject: [PATCH 121/190] Fix mypy violations --- sphinx/builders/gettext.py | 4 ++-- sphinx/builders/linkcheck.py | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/sphinx/builders/gettext.py b/sphinx/builders/gettext.py index 0c255abf2..32ecb3572 100644 --- a/sphinx/builders/gettext.py +++ b/sphinx/builders/gettext.py @@ -224,7 +224,7 @@ class MessageCatalogBuilder(I18nBuilder): extract_translations = self.templates.environment.extract_translations - for template in status_iterator(files, 'reading templates... ', "purple", + for template in status_iterator(files, 'reading templates... ', "purple", # type: ignore # NOQA len(files), self.app.verbosity): with open(template, 'r', encoding='utf-8') as f: # type: ignore context = f.read() @@ -247,7 +247,7 @@ class MessageCatalogBuilder(I18nBuilder): ctime = datetime.fromtimestamp( # type: ignore timestamp, ltz).strftime('%Y-%m-%d %H:%M%z'), ) - for textdomain, catalog in status_iterator(iteritems(self.catalogs), + for textdomain, catalog in status_iterator(iteritems(self.catalogs), # type: ignore "writing message catalogs... ", "darkgreen", len(self.catalogs), self.app.verbosity, diff --git a/sphinx/builders/linkcheck.py b/sphinx/builders/linkcheck.py index c0d3aa13b..c7bca9de2 100644 --- a/sphinx/builders/linkcheck.py +++ b/sphinx/builders/linkcheck.py @@ -58,7 +58,6 @@ class AnchorCheckParser(html_parser.HTMLParser): self.found = False def handle_starttag(self, tag, attrs): - # type: (Any, Dict[unicode, unicode]) -> None for key, value in attrs: if key in ('id', 'name') and value == self.search_anchor: self.found = True From 0b4ebbeebb9708b9d5bfa33d58348f82bdf410a2 Mon Sep 17 00:00:00 2001 From: Timotheus Kampik <timotheus.kampik@signavio.com> Date: Sun, 22 Jan 2017 22:05:35 +0100 Subject: [PATCH 122/190] Improve templating options documentation #2788 -- Move `parents`, `prev` & `next` to *documents that are created from source files* section. It doesnt make any sens to use them for auto-generated files like index and search, does it? Add documentation for `body`, `display_toc`, `metatags` and `title`. --- doc/templating.rst | 64 ++++++++++++++++++++++++++++++---------------- 1 file changed, 42 insertions(+), 22 deletions(-) diff --git a/doc/templating.rst b/doc/templating.rst index b9f9410de..41acea91b 100644 --- a/doc/templating.rst +++ b/doc/templating.rst @@ -291,31 +291,12 @@ in the future. The value of :confval:`master_doc`, for usage with :func:`pathto`. -.. data:: next - - The next document for the navigation. This variable is either false or has - two attributes `link` and `title`. The title contains HTML markup. For - example, to generate a link to the next page, you can use this snippet:: - - {% if next %} - <a href="{{ next.link|e }}">{{ next.title }}</a> - {% endif %} - .. data:: pagename The "page name" of the current file, i.e. either the document name if the file is generated from a reST source, or the equivalent hierarchical name relative to the output directory (``[directory/]filename_without_extension``). -.. data:: parents - - A list of parent documents for navigation, structured like the :data:`next` - item. - -.. data:: prev - - Like :data:`next`, but for the previous page. - .. data:: project The value of :confval:`project`. @@ -369,16 +350,58 @@ In documents that are created from source files (as opposed to automatically-generated files like the module index, or documents that already are in HTML form), these variables are also available: +.. data:: body + + A string containing the content of the page in HTML form as produced by the HTML builder, + before the theme is applied. + +.. data:: display_toc + + A boolean that is True if the toc contains more than one entry. + .. data:: meta Document metadata (a dictionary), see :ref:`metadata`. +.. data:: metatags + + A string containing the page's HTML :dudir:`meta` tags. + +.. data:: next + + The next document for the navigation. This variable is either false or has + two attributes `link` and `title`. The title contains HTML markup. For + example, to generate a link to the next page, you can use this snippet:: + + {% if next %} + <a href="{{ next.link|e }}">{{ next.title }}</a> + {% endif %} + + +.. data:: page_source_suffix + + The suffix of the file that was rendered. Since we support a list of :confval:`source_suffix`, + this will allow you to properly link to the original source file. + +.. data:: parents + + A list of parent documents for navigation, structured like the :data:`next` + item. + +.. data:: prev + + Like :data:`next`, but for the previous page. + .. data:: sourcename The name of the copied source file for the current document. This is only nonempty if the :confval:`html_copy_source` value is ``True``. This has empty value on creating automatically-generated files. +.. data:: title + + The page title. + .. data:: toc The local table of contents for the current page, rendered as HTML bullet @@ -401,7 +424,4 @@ are in HTML form), these variables are also available: * ``includehidden`` (``False`` by default): if true, the TOC tree will also contain hidden entries. -.. data:: page_source_suffix - The suffix of the file that was rendered. Since we support a list of :confval:`source_suffix`, - this will allow you to properly link to the original source file. From c2f78b6870ad3815b608961dcf6aaa260f6b3cf6 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Mon, 23 Jan 2017 21:52:31 +0900 Subject: [PATCH 123/190] ``BuildEnvironment.set_warnfunc()`` is now deprecated --- CHANGES | 1 + sphinx/application.py | 2 -- sphinx/environment/__init__.py | 17 ++++------------- 3 files changed, 5 insertions(+), 15 deletions(-) diff --git a/CHANGES b/CHANGES index f495c0563..4729e4190 100644 --- a/CHANGES +++ b/CHANGES @@ -57,6 +57,7 @@ Deprecated instead (as Sphinx does since 1.5.) * ``Sphinx.status_iterator()` and ``Sphinx.old_status_iterator()`` is now deprecated. Please use ``sphinx.util:status_iterator()`` intead. +* ``BuildEnvironment.set_warnfunc()`` is now deprecated Release 1.5.3 (in development) ============================== diff --git a/sphinx/application.py b/sphinx/application.py index c1ffe505d..415a385d2 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -292,7 +292,6 @@ class Sphinx(object): # type: (bool) -> None if freshenv: self.env = BuildEnvironment(self.srcdir, self.doctreedir, self.config) - self.env.set_warnfunc(self.warn) self.env.find_files(self.config, self.buildername) for domain in self.domains.keys(): self.env.domains[domain] = self.domains[domain](self.env) @@ -301,7 +300,6 @@ class Sphinx(object): logger.info(bold('loading pickled environment... '), nonl=True) self.env = BuildEnvironment.frompickle( self.srcdir, self.config, path.join(self.doctreedir, ENV_PICKLE_FILENAME)) - self.env.set_warnfunc(self.warn) self.env.init_managers() self.env.domains = {} for domain in self.domains.keys(): diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index 5d139e738..c8a05a22b 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -16,6 +16,7 @@ import time import types import codecs import fnmatch +import warnings from os import path from glob import glob from collections import defaultdict @@ -49,6 +50,7 @@ from sphinx.util.websupport import is_commentable from sphinx.errors import SphinxError, ExtensionError from sphinx.versioning import add_uids, merge_doctrees from sphinx.transforms import SphinxContentsFilter +from sphinx.deprecation import RemovedInSphinx20Warning from sphinx.environment.managers.indexentries import IndexEntries from sphinx.environment.managers.toctree import Toctree @@ -122,8 +124,6 @@ class BuildEnvironment(object): def topickle(self, filename): # type: (unicode) -> None # remove unpicklable attributes - warnfunc = self._warnfunc - self.set_warnfunc(None) values = self.config.values del self.config.values domains = self.domains @@ -142,7 +142,6 @@ class BuildEnvironment(object): self.attach_managers(managers) self.domains = domains self.config.values = values - self.set_warnfunc(warnfunc) # --------- ENVIRONMENT INITIALIZATION ------------------------------------- @@ -272,8 +271,8 @@ class BuildEnvironment(object): def set_warnfunc(self, func): # type: (Callable) -> None - self._warnfunc = func - self.settings['warning_stream'] = WarningStream(func) + warnings.warn('env.set_warnfunc() is now deprecated. Use sphinx.util.logging instead.', + RemovedInSphinx20Warning) def set_versioning_method(self, method, compare): # type: (unicode, bool) -> None @@ -639,12 +638,9 @@ class BuildEnvironment(object): def read_process(docs): # type: (List[unicode]) -> BuildEnvironment self.app = app - self.warnings = [] # type: List[Tuple] - self.set_warnfunc(lambda *args, **kwargs: self.warnings.append((args, kwargs))) for docname in docs: self.read_doc(docname, app) # allow pickling self to send it back - self.set_warnfunc(None) del self.app del self.domains del self.config.values @@ -653,13 +649,11 @@ class BuildEnvironment(object): def merge(docs, otherenv): # type: (List[unicode], BuildEnvironment) -> None - warnings.extend(otherenv.warnings) self.merge_info_from(docs, otherenv, app) tasks = ParallelTasks(nproc) chunks = make_chunks(docnames, nproc) - warnings = [] # type: List[Tuple] for chunk in status_iterator(chunks, 'reading sources... ', "purple", len(chunks), self.app.verbosity): tasks.add_task(read_process, chunk, merge) @@ -668,9 +662,6 @@ class BuildEnvironment(object): logger.info(bold('waiting for workers...')) tasks.join() - for warning, kwargs in warnings: - self._warnfunc(*warning, **kwargs) - def check_dependents(self, already): # type: (Set[unicode]) -> Iterator[unicode] to_rewrite = (self.toctree.assign_section_numbers() + # type: ignore From ea9177c8030b31a076ffaba1097f818c73466934 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 24 Jan 2017 00:36:11 +0900 Subject: [PATCH 124/190] Fix mypy violations --- sphinx/builders/html.py | 10 +++++----- sphinx/environment/managers/toctree.py | 2 +- sphinx/util/__init__.py | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index c76bc8543..33a525ff3 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -48,7 +48,7 @@ from sphinx.writers.html import HTMLWriter, HTMLTranslator, \ if False: # For type annotation - from typing import Any, Iterable, Iterator, Tuple, Union # NOQA + from typing import Any, Iterable, Iterator, Type, Tuple, Union # NOQA from sphinx.domains import Domain, Index # NOQA from sphinx.application import Sphinx # NOQA @@ -105,7 +105,7 @@ class StandaloneHTMLBuilder(Builder): css_files = [] # type: List[unicode] imgpath = None # type: unicode - domain_indices = [] # type: List[Tuple[unicode, Index, unicode, bool]] + domain_indices = [] # type: List[Tuple[unicode, Type[Index], List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool]] # NOQA default_sidebars = ['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'] @@ -295,7 +295,7 @@ class StandaloneHTMLBuilder(Builder): domain = None # type: Domain domain = self.env.domains[domain_name] for indexcls in domain.indices: - indexname = '%s-%s' % (domain.name, indexcls.name) + indexname = '%s-%s' % (domain.name, indexcls.name) # type: unicode if isinstance(indices_config, list): if indexname not in indices_config: continue @@ -326,10 +326,10 @@ class StandaloneHTMLBuilder(Builder): self.relations = self.env.collect_relations() - rellinks = [] + rellinks = [] # type: List[Tuple[unicode, unicode, unicode, unicode]] if self.use_index: rellinks.append(('genindex', _('General Index'), 'I', _('index'))) - for indexname, indexcls, content, collapse in self.domain_indices: # type: ignore + for indexname, indexcls, content, collapse in self.domain_indices: # if it has a short name if indexcls.shortname: rellinks.append((indexname, indexcls.localname, diff --git a/sphinx/environment/managers/toctree.py b/sphinx/environment/managers/toctree.py index c272e891d..0cd011ae0 100644 --- a/sphinx/environment/managers/toctree.py +++ b/sphinx/environment/managers/toctree.py @@ -72,7 +72,7 @@ class Toctree(EnvironmentManager): self.numbered_toctrees.add(docname) for subfn, fnset in other.files_to_rebuild.items(): - self.files_to_rebuild.setdefault(subfn, set()).update(fnset & docnames) + self.files_to_rebuild.setdefault(subfn, set()).update(fnset & set(docnames)) def process_doc(self, docname, doctree): # type: (unicode, nodes.Node) -> None diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index a5dee1025..8b03272cd 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -149,7 +149,7 @@ class FilenameUniqDict(dict): def merge_other(self, docnames, other): # type: (List[unicode], Dict[unicode, Tuple[Set[unicode], Any]]) -> None for filename, (docs, unique) in other.items(): - for doc in docs & docnames: + for doc in docs & set(docnames): self.add_file(doc, filename) def __getstate__(self): From 69e64cfe26e3df11e046071b5c43928828d71809 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 24 Jan 2017 01:49:00 +0900 Subject: [PATCH 125/190] Index.generate() raises NotImplementedError by default --- CHANGES | 2 ++ sphinx/domains/__init__.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 4729e4190..4fd15bf89 100644 --- a/CHANGES +++ b/CHANGES @@ -8,6 +8,8 @@ Incompatible changes members by default. Thanks to Luc Saffre. * LaTeX ``\includegraphics`` command isn't overloaded: only ``\sphinxincludegraphics`` has the custom code to fit image to available width if oversized. +* The subclasses of ``sphinx.domains.Index`` should override ``generate()`` + method. The default implementation raises NotImplmentedError Features removed ---------------- diff --git a/sphinx/domains/__init__.py b/sphinx/domains/__init__.py index 2389e342c..a0fcafcc6 100644 --- a/sphinx/domains/__init__.py +++ b/sphinx/domains/__init__.py @@ -107,7 +107,7 @@ class Index(object): Qualifier and description are not rendered e.g. in LaTeX output. """ - return tuple() + raise NotImplementedError class Domain(object): From 2ffb43e9b78692316a9c3b68744d56466fc4d8de Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 24 Jan 2017 02:13:37 +0900 Subject: [PATCH 126/190] Fix mypy violations --- sphinx/builders/epub.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/builders/epub.py b/sphinx/builders/epub.py index d351be5a2..d61f4868f 100644 --- a/sphinx/builders/epub.py +++ b/sphinx/builders/epub.py @@ -569,7 +569,7 @@ class EpubBuilder(StandaloneHTMLBuilder): f.write(self.container_template) # type: ignore def content_metadata(self, files, spine, guide): - # type: (List[unicode], Any, Any) -> Dict[unicode, Any] + # type: (List[unicode], List[unicode], List[unicode]) -> Dict[unicode, Any] """Create a dictionary with all metadata for the content.opf file properly escaped. """ From fe243bfa805cdf1544ad9fa39ceb0146fd930ff8 Mon Sep 17 00:00:00 2001 From: Marco Buttu <marco.buttu@gmail.com> Date: Tue, 24 Jan 2017 13:33:13 +0100 Subject: [PATCH 127/190] Added author name and changes (Sphinx 1.5.3) --- AUTHORS | 1 + CHANGES | 2 ++ 2 files changed, 3 insertions(+) diff --git a/AUTHORS b/AUTHORS index 24897985e..7c060cde8 100644 --- a/AUTHORS +++ b/AUTHORS @@ -20,6 +20,7 @@ Other contributors, listed alphabetically, are: * Jakob Lykke Andersen -- Rewritten C++ domain * Henrique Bastos -- SVG support for graphviz extension * Daniel Bültmann -- todo extension +* Marco Buttu -- doctest extension (pyversion option) * Jean-François Burnol -- LaTeX improvements * Etienne Desautels -- apidoc module * Michael Droettboom -- inheritance_diagram extension diff --git a/CHANGES b/CHANGES index 4fd15bf89..70f5aeeb8 100644 --- a/CHANGES +++ b/CHANGES @@ -73,6 +73,8 @@ Deprecated Features added -------------- +* #3303: Added ``:pyversion:`` option to the doctest directive + Bugs fixed ---------- From 840f152c93003e22883124f2c02e0acfa27d3504 Mon Sep 17 00:00:00 2001 From: Marco Buttu <marco.buttu@gmail.com> Date: Tue, 24 Jan 2017 14:06:59 +0100 Subject: [PATCH 128/190] Preserved alphabetically order in author list --- AUTHORS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/AUTHORS b/AUTHORS index 7c060cde8..8c225990d 100644 --- a/AUTHORS +++ b/AUTHORS @@ -20,8 +20,8 @@ Other contributors, listed alphabetically, are: * Jakob Lykke Andersen -- Rewritten C++ domain * Henrique Bastos -- SVG support for graphviz extension * Daniel Bültmann -- todo extension -* Marco Buttu -- doctest extension (pyversion option) * Jean-François Burnol -- LaTeX improvements +* Marco Buttu -- doctest extension (pyversion option) * Etienne Desautels -- apidoc module * Michael Droettboom -- inheritance_diagram extension * Charles Duffy -- original graphviz extension From b52118290f964258644a76a97300838c72a0784d Mon Sep 17 00:00:00 2001 From: Marco Buttu <marco.buttu@gmail.com> Date: Wed, 25 Jan 2017 12:26:18 +0100 Subject: [PATCH 129/190] In case of wrong option, warn the right reason. For instance, in case of `:options: +SKIPO` the message was "missing '+' or '-' in +SKIPO option`. Now it will be "SKIPO is not a valid option." --- sphinx/ext/doctest.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index 3e029c19e..b8e24fe1a 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -126,12 +126,17 @@ class TestDirective(Directive): # parse doctest-like output comparison flags option_strings = self.options['options'].replace(',', ' ').split() for option in option_strings: - if (option[0] not in '+-' or option[1:] not in - doctest.OPTIONFLAGS_BY_NAME): # type: ignore + on_or_off, option_name = option[0], option[1:] + if on_or_off not in '+-': # type: ignore self.state.document.reporter.warning( "missing '+' or '-' in '%s' option." % option, line=self.lineno) continue + if option_name not in doctest.OPTIONFLAGS_BY_NAME: # type: ignore + self.state.document.reporter.warning( + "'%s' is not a valid option." % option_name, + line=self.lineno) + continue flag = doctest.OPTIONFLAGS_BY_NAME[option[1:]] # type: ignore node['options'][flag] = (option[0] == '+') if self.name == 'doctest' and 'pyversion' in self.options: From 882bebf90a1821e38203bbb2e2a45adf085a9514 Mon Sep 17 00:00:00 2001 From: Marco Buttu <marco.buttu@gmail.com> Date: Thu, 26 Jan 2017 11:20:24 +0100 Subject: [PATCH 130/190] From version 1.5 to 1.6. Warnings wrapped with locale. --- CHANGES | 3 +-- doc/ext/doctest.rst | 2 +- sphinx/ext/doctest.py | 11 ++++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/CHANGES b/CHANGES index 70f5aeeb8..e1507cbdd 100644 --- a/CHANGES +++ b/CHANGES @@ -42,6 +42,7 @@ Features added imported members. * C++, add ``:tparam-line-spec:`` option to templated declarations. When specified, each template parameter will be rendered on a separate line. +* #3303: Add ``:pyversion:`` option to the doctest directive. Bugs fixed ---------- @@ -73,8 +74,6 @@ Deprecated Features added -------------- -* #3303: Added ``:pyversion:`` option to the doctest directive - Bugs fixed ---------- diff --git a/doc/ext/doctest.rst b/doc/ext/doctest.rst index 47331dea2..d1cb3c31d 100644 --- a/doc/ext/doctest.rst +++ b/doc/ext/doctest.rst @@ -84,7 +84,7 @@ a comma-separated list of group names. comparison is performed by `distutils.version.LooseVersion <https://www.python.org/dev/peps/pep-0386/#distutils>`__. - .. versionadded:: 1.5 + .. versionadded:: 1.6 Note that like with standard doctests, you have to use ``<BLANKLINE>`` to signal a blank line in the expected output. The ``<BLANKLINE>`` is removed diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index b8e24fe1a..cd6397fb1 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -31,6 +31,7 @@ from sphinx.util import force_decode, logging from sphinx.util.nodes import set_source_info from sphinx.util.console import bold # type: ignore from sphinx.util.osutil import fs_encoding +from sphinx.locale import _ if False: # For type annotation @@ -126,15 +127,15 @@ class TestDirective(Directive): # parse doctest-like output comparison flags option_strings = self.options['options'].replace(',', ' ').split() for option in option_strings: - on_or_off, option_name = option[0], option[1:] - if on_or_off not in '+-': # type: ignore + prefix, option_name = option[0], option[1:] + if prefix not in '+-': # type: ignore self.state.document.reporter.warning( - "missing '+' or '-' in '%s' option." % option, + _("missing '+' or '-' in '%s' option.") % option, line=self.lineno) continue if option_name not in doctest.OPTIONFLAGS_BY_NAME: # type: ignore self.state.document.reporter.warning( - "'%s' is not a valid option." % option_name, + _("'%s' is not a valid option.") % option_name, line=self.lineno) continue flag = doctest.OPTIONFLAGS_BY_NAME[option[1:]] # type: ignore @@ -150,7 +151,7 @@ class TestDirective(Directive): node['options'][flag] = True # Skip the test except ValueError: self.state.document.reporter.warning( - "'%s' is not a valid pyversion option" % option, + _("'%s' is not a valid pyversion option") % option, line=self.lineno) return [node] From c7fca4806c2aaf48ab1a36153f95a464969663a2 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 28 Jan 2017 19:59:26 +0900 Subject: [PATCH 131/190] Update CHANGES for PR#3359 --- CHANGES | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES b/CHANGES index 4fd15bf89..493e35c55 100644 --- a/CHANGES +++ b/CHANGES @@ -42,6 +42,7 @@ Features added imported members. * C++, add ``:tparam-line-spec:`` option to templated declarations. When specified, each template parameter will be rendered on a separate line. +* #3359: Allow sphinx.js in a user locale dir to override sphinx.js from Sphinx Bugs fixed ---------- From d1405e4774b3c691f046b50737b678616c972f9a Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 17 Dec 2016 20:40:48 +0900 Subject: [PATCH 132/190] Add sphinx.environment.collectors --- sphinx/application.py | 7 + sphinx/environment/__init__.py | 92 +------------- sphinx/environment/collectors/__init__.py | 51 ++++++++ sphinx/environment/collectors/asset.py | 148 ++++++++++++++++++++++ sphinx/util/__init__.py | 2 +- tests/test_build_html.py | 2 +- 6 files changed, 209 insertions(+), 93 deletions(-) create mode 100644 sphinx/environment/collectors/__init__.py create mode 100644 sphinx/environment/collectors/asset.py diff --git a/sphinx/application.py b/sphinx/application.py index 415a385d2..7e6878a7d 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -55,6 +55,7 @@ if False: from docutils.transform import Transform # NOQA from sphinx.builders import Builder # NOQA from sphinx.domains import Domain, Index # NOQA + from sphinx.environment.collectors import EnvironmentCollector # NOQA # List of all known core events. Maps name to arguments description. events = { @@ -96,6 +97,7 @@ builtin_extensions = ( 'sphinx.domains.python', 'sphinx.domains.rst', 'sphinx.domains.std', + 'sphinx.environment.collectors.asset', 'sphinx.directives', 'sphinx.directives.code', 'sphinx.directives.other', @@ -833,6 +835,11 @@ class Sphinx(object): type='app', subtype='add_source_parser') self._additional_source_parsers[suffix] = parser + def add_env_collector(self, collector): + # type: (Type[EnvironmentCollector]) -> None + logger.debug('[app] adding environment collector: %r', collector) + collector().enable(self) + class TemplateBridge(object): """ diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index c8a05a22b..da31e3148 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -18,7 +18,6 @@ import codecs import fnmatch import warnings from os import path -from glob import glob from collections import defaultdict from six import iteritems, itervalues, class_types, next @@ -39,9 +38,7 @@ from sphinx.util import get_matching_docs, docname_join, FilenameUniqDict, statu from sphinx.util.nodes import clean_astext, WarningStream, is_translatable, \ process_only_nodes from sphinx.util.osutil import SEP, getcwd, fs_encoding, ensuredir -from sphinx.util.images import guess_mimetype -from sphinx.util.i18n import find_catalog_files, get_image_filename_for_language, \ - search_image_for_language +from sphinx.util.i18n import find_catalog_files from sphinx.util.console import bold # type: ignore from sphinx.util.docutils import sphinx_domains from sphinx.util.matching import compile_matchers @@ -319,8 +316,6 @@ class BuildEnvironment(object): self.dependencies.pop(docname, None) self.titles.pop(docname, None) self.longtitles.pop(docname, None) - self.images.purge_doc(docname) - self.dlfiles.purge_doc(docname) for version, changes in self.versionchanges.items(): new = [change for change in changes if change[1] != docname] @@ -350,9 +345,6 @@ class BuildEnvironment(object): self.titles[docname] = other.titles[docname] self.longtitles[docname] = other.longtitles[docname] - self.images.merge_other(docnames, other.images) - self.dlfiles.merge_other(docnames, other.dlfiles) - for version, changes in other.versionchanges.items(): self.versionchanges.setdefault(version, []).extend( change for change in changes if change[1] in docnames) @@ -737,8 +729,6 @@ class BuildEnvironment(object): # post-processing self.process_dependencies(docname, doctree) - self.process_images(docname, doctree) - self.process_downloads(docname, doctree) self.process_metadata(docname, doctree) self.create_title_from(docname, doctree) for manager in itervalues(self.managers): @@ -884,86 +874,6 @@ class BuildEnvironment(object): path.normpath(path.join(cwd, dep))) self.dependencies[docname].add(relpath) - def process_downloads(self, docname, doctree): - # type: (unicode, nodes.Node) -> None - """Process downloadable file paths. """ - for node in doctree.traverse(addnodes.download_reference): - targetname = node['reftarget'] - rel_filename, filename = self.relfn2path(targetname, docname) - self.dependencies[docname].add(rel_filename) - if not os.access(filename, os.R_OK): - logger.warning('download file not readable: %s', filename, - location=node) - continue - uniquename = self.dlfiles.add_file(docname, filename) - node['filename'] = uniquename - - def process_images(self, docname, doctree): - # type: (unicode, nodes.Node) -> None - """Process and rewrite image URIs.""" - def collect_candidates(imgpath, candidates): - globbed = {} # type: Dict[unicode, List[unicode]] - for filename in glob(imgpath): - new_imgpath = relative_path(path.join(self.srcdir, 'dummy'), - filename) - try: - mimetype = guess_mimetype(filename) - if mimetype not in candidates: - globbed.setdefault(mimetype, []).append(new_imgpath) - except (OSError, IOError) as err: - logger.warning('image file %s not readable: %s', filename, err, - location=node) - for key, files in iteritems(globbed): - candidates[key] = sorted(files, key=len)[0] # select by similarity - - for node in doctree.traverse(nodes.image): - # Map the mimetype to the corresponding image. The writer may - # choose the best image from these candidates. The special key * is - # set if there is only single candidate to be used by a writer. - # The special key ? is set for nonlocal URIs. - node['candidates'] = candidates = {} - imguri = node['uri'] - if imguri.startswith('data:'): - logger.warning('image data URI found. some builders might not support', - location=node, type='image', subtype='data_uri') - candidates['?'] = imguri - continue - elif imguri.find('://') != -1: - logger.warning('nonlocal image URI found: %s', imguri, - location=node, type='image', subtype='nonlocal_uri') - candidates['?'] = imguri - continue - rel_imgpath, full_imgpath = self.relfn2path(imguri, docname) - if self.config.language: - # substitute figures (ex. foo.png -> foo.en.png) - i18n_full_imgpath = search_image_for_language(full_imgpath, self) - if i18n_full_imgpath != full_imgpath: - full_imgpath = i18n_full_imgpath - rel_imgpath = relative_path(path.join(self.srcdir, 'dummy'), - i18n_full_imgpath) - # set imgpath as default URI - node['uri'] = rel_imgpath - if rel_imgpath.endswith(os.extsep + '*'): - if self.config.language: - # Search language-specific figures at first - i18n_imguri = get_image_filename_for_language(imguri, self) - _, full_i18n_imgpath = self.relfn2path(i18n_imguri, docname) - collect_candidates(full_i18n_imgpath, candidates) - - collect_candidates(full_imgpath, candidates) - else: - candidates['*'] = rel_imgpath - - # map image paths to unique image names (so that they can be put - # into a single directory) - for imgpath in itervalues(candidates): - self.dependencies[docname].add(imgpath) - if not os.access(path.join(self.srcdir, imgpath), os.R_OK): - logger.warning('image file not readable: %s', imgpath, - location=node) - continue - self.images.add_file(docname, imgpath) - def process_metadata(self, docname, doctree): # type: (unicode, nodes.Node) -> None """Process the docinfo part of the doctree as metadata. diff --git a/sphinx/environment/collectors/__init__.py b/sphinx/environment/collectors/__init__.py new file mode 100644 index 000000000..444ba7665 --- /dev/null +++ b/sphinx/environment/collectors/__init__.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +""" + sphinx.environment.collectors + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + The data collector components for sphinx.environment. + + :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from six import itervalues + +if False: + # For type annotation + from docutils import nodes # NOQA + from sphinx.sphinx import Sphinx # NOQA + from sphinx.environment import BuildEnvironment # NOQA + + +class EnvironmentCollector(object): + """Base class of data collector for sphinx.environment.""" + + listener_ids = None # type: Dict[unicode, int] + + def enable(self, app): + # type: (Sphinx) -> None + assert self.listener_ids is None + self.listener_ids = {} + self.listener_ids['doctree-read'] = app.connect('doctree-read', self.process_doc) + self.listener_ids['env-merge-info'] = app.connect('env-merge-info', self.merge_other) + self.listener_ids['env-purge-doc'] = app.connect('env-purge-doc', self.clear_doc) + + def disable(self, app): + # type: (Sphinx) -> None + assert self.listener_ids is not None + for listener_id in itervalues(self.listener_ids): + app.disconnect(listener_id) + self.listener_ids = None + + def clear_doc(self, app, env, docname): + # type: (Sphinx, BuildEnvironment, unicode) -> None + raise NotImplementedError + + def merge_other(self, app, env, docnames, other): + # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None + raise NotImplementedError + + def process_doc(self, app, doctree): + # type: (Sphinx, nodes.Node) -> None + raise NotImplementedError diff --git a/sphinx/environment/collectors/asset.py b/sphinx/environment/collectors/asset.py new file mode 100644 index 000000000..54283790c --- /dev/null +++ b/sphinx/environment/collectors/asset.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +""" + sphinx.environment.collectors.asset + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + The image collector for sphinx.environment. + + :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import os +from os import path +from glob import glob + +from six import iteritems, itervalues + +from docutils import nodes +from docutils.utils import relative_path + +from sphinx import addnodes +from sphinx.environment.collectors import EnvironmentCollector +from sphinx.util import logging +from sphinx.util.i18n import get_image_filename_for_language, search_image_for_language +from sphinx.util.images import guess_mimetype + +if False: + # For type annotation + from typing import Tuple # NOQA + from docutils import nodes # NOQA + from sphinx.sphinx import Sphinx # NOQA + from sphinx.environment import BuildEnvironment # NOQA + +logger = logging.getLogger(__name__) + + +class ImageCollector(EnvironmentCollector): + """Image files collector for sphinx.environment.""" + + def clear_doc(self, app, env, docname): + # type: (Sphinx, BuildEnvironment, unicode) -> None + env.images.purge_doc(docname) + + def merge_other(self, app, env, docnames, other): + # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None + env.images.merge_other(docnames, other.images) + + def process_doc(self, app, doctree): + # type: (Sphinx, nodes.Node) -> None + """Process and rewrite image URIs.""" + docname = app.env.docname + + for node in doctree.traverse(nodes.image): + # Map the mimetype to the corresponding image. The writer may + # choose the best image from these candidates. The special key * is + # set if there is only single candidate to be used by a writer. + # The special key ? is set for nonlocal URIs. + candidates = {} # type: Dict[unicode, unicode] + node['candidates'] = candidates + imguri = node['uri'] + if imguri.startswith('data:'): + logger.warning('image data URI found. some builders might not support', + location=node, type='image', subtype='data_uri') + candidates['?'] = imguri + continue + elif imguri.find('://') != -1: + logger.warning('nonlocal image URI found: %s' % imguri, + location=node, + type='image', subtype='nonlocal_uri') + candidates['?'] = imguri + continue + rel_imgpath, full_imgpath = app.env.relfn2path(imguri, docname) + if app.config.language: + # substitute figures (ex. foo.png -> foo.en.png) + i18n_full_imgpath = search_image_for_language(full_imgpath, app.env) + if i18n_full_imgpath != full_imgpath: + full_imgpath = i18n_full_imgpath + rel_imgpath = relative_path(path.join(app.srcdir, 'dummy'), + i18n_full_imgpath) + # set imgpath as default URI + node['uri'] = rel_imgpath + if rel_imgpath.endswith(os.extsep + '*'): + if app.config.language: + # Search language-specific figures at first + i18n_imguri = get_image_filename_for_language(imguri, app.env) + _, full_i18n_imgpath = app.env.relfn2path(i18n_imguri, docname) + self.collect_candidates(app.env, full_i18n_imgpath, candidates, node) + + self.collect_candidates(app.env, full_imgpath, candidates, node) + else: + candidates['*'] = rel_imgpath + + # map image paths to unique image names (so that they can be put + # into a single directory) + for imgpath in itervalues(candidates): + app.env.dependencies[docname].add(imgpath) + if not os.access(path.join(app.srcdir, imgpath), os.R_OK): + logger.warning('image file not readable: %s' % imgpath, + location=node) + continue + app.env.images.add_file(docname, imgpath) + + def collect_candidates(self, env, imgpath, candidates, node): + # type: (BuildEnvironment, unicode, Dict[unicode, unicode], nodes.Node) -> None + globbed = {} # type: Dict[unicode, List[unicode]] + for filename in glob(imgpath): + new_imgpath = relative_path(path.join(env.srcdir, 'dummy'), + filename) + try: + mimetype = guess_mimetype(filename) + if mimetype not in candidates: + globbed.setdefault(mimetype, []).append(new_imgpath) + except (OSError, IOError) as err: + logger.warning('image file %s not readable: %s' % (filename, err), + location=node) + for key, files in iteritems(globbed): + candidates[key] = sorted(files, key=len)[0] # select by similarity + + +class DownloadFileCollector(EnvironmentCollector): + """Download files collector for sphinx.environment.""" + + def clear_doc(self, app, env, docname): + # type: (Sphinx, BuildEnvironment, unicode) -> None + env.dlfiles.purge_doc(docname) + + def merge_other(self, app, env, docnames, other): + # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None + env.dlfiles.merge_other(docnames, other.dlfiles) + + def process_doc(self, app, doctree): + # type: (Sphinx, nodes.Node) -> None + """Process downloadable file paths. """ + for node in doctree.traverse(addnodes.download_reference): + targetname = node['reftarget'] + rel_filename, filename = app.env.relfn2path(targetname, app.env.docname) + app.env.dependencies[app.env.docname].add(rel_filename) + if not os.access(filename, os.R_OK): + logger.warning('download file not readable: %s' % filename, + location=node) + continue + node['filename'] = app.env.dlfiles.add_file(app.env.docname, filename) + + +def setup(app): + # type: (Sphinx) -> None + app.add_env_collector(ImageCollector) + app.add_env_collector(DownloadFileCollector) diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index 8b03272cd..5fb42b9d5 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -147,7 +147,7 @@ class FilenameUniqDict(dict): self._existing.discard(unique) def merge_other(self, docnames, other): - # type: (List[unicode], Dict[unicode, Tuple[Set[unicode], Any]]) -> None + # type: (Set[unicode], Dict[unicode, Tuple[Set[unicode], Any]]) -> None for filename, (docs, unique) in other.items(): for doc in docs & set(docnames): self.add_file(doc, filename) diff --git a/tests/test_build_html.py b/tests/test_build_html.py index 98a11d1fc..351c2f846 100644 --- a/tests/test_build_html.py +++ b/tests/test_build_html.py @@ -30,10 +30,10 @@ ENV_WARNINGS = """\ WARNING: Explicit markup ends without a blank line; unexpected unindent. %(root)s/index.rst:\\d+: WARNING: Encoding 'utf-8-sig' used for reading included \ file u'%(root)s/wrongenc.inc' seems to be wrong, try giving an :encoding: option +%(root)s/index.rst:\\d+: WARNING: invalid single index entry u'' %(root)s/index.rst:\\d+: WARNING: image file not readable: foo.png %(root)s/index.rst:\\d+: WARNING: nonlocal image URI found: http://www.python.org/logo.png %(root)s/index.rst:\\d+: WARNING: download file not readable: %(root)s/nonexisting.png -%(root)s/index.rst:\\d+: WARNING: invalid single index entry u'' %(root)s/undecodable.rst:\\d+: WARNING: undecodable source characters, replacing \ with "\\?": b?'here: >>>(\\\\|/)xbb<<<((\\\\|/)r)?' """ From 08766abf5200fc4ee348a1f96ab93affc83c61cf Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 18 Dec 2016 00:12:54 +0900 Subject: [PATCH 133/190] Add DependenciesCollector --- sphinx/application.py | 4 +- sphinx/environment/__init__.py | 25 +------- sphinx/environment/collectors/dependencies.py | 59 +++++++++++++++++++ 3 files changed, 64 insertions(+), 24 deletions(-) create mode 100644 sphinx/environment/collectors/dependencies.py diff --git a/sphinx/application.py b/sphinx/application.py index 7e6878a7d..448b03672 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -97,12 +97,14 @@ builtin_extensions = ( 'sphinx.domains.python', 'sphinx.domains.rst', 'sphinx.domains.std', - 'sphinx.environment.collectors.asset', 'sphinx.directives', 'sphinx.directives.code', 'sphinx.directives.other', 'sphinx.directives.patches', 'sphinx.roles', + # collectors should be loaded by specific order + 'sphinx.environment.collectors.dependencies', + 'sphinx.environment.collectors.asset', ) # type: Tuple[unicode, ...] CONFIG_FILENAME = 'conf.py' diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index da31e3148..3b45c38b7 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -26,7 +26,7 @@ from six.moves import cPickle as pickle from docutils import nodes from docutils.io import NullOutput from docutils.core import Publisher -from docutils.utils import Reporter, relative_path, get_source_line +from docutils.utils import Reporter, get_source_line from docutils.parsers.rst import roles from docutils.parsers.rst.languages import en as english from docutils.frontend import OptionParser @@ -37,7 +37,7 @@ from sphinx.util import logging from sphinx.util import get_matching_docs, docname_join, FilenameUniqDict, status_iterator from sphinx.util.nodes import clean_astext, WarningStream, is_translatable, \ process_only_nodes -from sphinx.util.osutil import SEP, getcwd, fs_encoding, ensuredir +from sphinx.util.osutil import SEP, ensuredir from sphinx.util.i18n import find_catalog_files from sphinx.util.console import bold # type: ignore from sphinx.util.docutils import sphinx_domains @@ -313,7 +313,6 @@ class BuildEnvironment(object): self.all_docs.pop(docname, None) self.reread_always.discard(docname) self.metadata.pop(docname, None) - self.dependencies.pop(docname, None) self.titles.pop(docname, None) self.longtitles.pop(docname, None) @@ -340,8 +339,6 @@ class BuildEnvironment(object): if docname in other.reread_always: self.reread_always.add(docname) self.metadata[docname] = other.metadata[docname] - if docname in other.dependencies: - self.dependencies[docname] = other.dependencies[docname] self.titles[docname] = other.titles[docname] self.longtitles[docname] = other.longtitles[docname] @@ -728,7 +725,6 @@ class BuildEnvironment(object): doctree = pub.document # post-processing - self.process_dependencies(docname, doctree) self.process_metadata(docname, doctree) self.create_title_from(docname, doctree) for manager in itervalues(self.managers): @@ -857,23 +853,6 @@ class BuildEnvironment(object): # post-processing of read doctrees - def process_dependencies(self, docname, doctree): - # type: (unicode, nodes.Node) -> None - """Process docutils-generated dependency info.""" - cwd = getcwd() - frompath = path.join(path.normpath(self.srcdir), 'dummy') - deps = doctree.settings.record_dependencies - if not deps: - return - for dep in deps.list: - # the dependency path is relative to the working dir, so get - # one relative to the srcdir - if isinstance(dep, bytes): - dep = dep.decode(fs_encoding) - relpath = relative_path(frompath, - path.normpath(path.join(cwd, dep))) - self.dependencies[docname].add(relpath) - def process_metadata(self, docname, doctree): # type: (unicode, nodes.Node) -> None """Process the docinfo part of the doctree as metadata. diff --git a/sphinx/environment/collectors/dependencies.py b/sphinx/environment/collectors/dependencies.py new file mode 100644 index 000000000..b4d35caf7 --- /dev/null +++ b/sphinx/environment/collectors/dependencies.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +""" + sphinx.environment.collectors.dependencies + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + The dependencies collector components for sphinx.environment. + + :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from os import path + +from docutils.utils import relative_path + +from sphinx.util.osutil import getcwd, fs_encoding +from sphinx.environment.collectors import EnvironmentCollector + +if False: + # For type annotation + from docutils import nodes # NOQA + from sphinx.sphinx import Sphinx # NOQA + from sphinx.environment import BuildEnvironment # NOQA + + +class DependenciesCollector(EnvironmentCollector): + """dependencies collector for sphinx.environment.""" + + def clear_doc(self, app, env, docname): + # type: (Sphinx, BuildEnvironment, unicode) -> None + env.dependencies.pop(docname, None) + + def merge_other(self, app, env, docnames, other): + # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None + for docname in docnames: + if docname in other.dependencies: + env.dependencies[docname] = other.dependencies[docname] + + def process_doc(self, app, doctree): + # type: (Sphinx, nodes.Node) -> None + """Process docutils-generated dependency info.""" + cwd = getcwd() + frompath = path.join(path.normpath(app.srcdir), 'dummy') + deps = doctree.settings.record_dependencies + if not deps: + return + for dep in deps.list: + # the dependency path is relative to the working dir, so get + # one relative to the srcdir + if isinstance(dep, bytes): + dep = dep.decode(fs_encoding) + relpath = relative_path(frompath, + path.normpath(path.join(cwd, dep))) + app.env.dependencies[app.env.docname].add(relpath) + + +def setup(app): + # type: (Sphinx) -> None + app.add_env_collector(DependenciesCollector) From fc2a78434d595fd18d72d16413e54941ca851598 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 18 Dec 2016 00:22:37 +0900 Subject: [PATCH 134/190] Add MetadataCollector --- sphinx/application.py | 1 + sphinx/environment/__init__.py | 42 +------------ sphinx/environment/collectors/metadata.py | 72 +++++++++++++++++++++++ 3 files changed, 75 insertions(+), 40 deletions(-) create mode 100644 sphinx/environment/collectors/metadata.py diff --git a/sphinx/application.py b/sphinx/application.py index 448b03672..6ada00b6d 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -105,6 +105,7 @@ builtin_extensions = ( # collectors should be loaded by specific order 'sphinx.environment.collectors.dependencies', 'sphinx.environment.collectors.asset', + 'sphinx.environment.collectors.metadata', ) # type: Tuple[unicode, ...] CONFIG_FILENAME = 'conf.py' diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index 3b45c38b7..105c4e208 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -186,8 +186,8 @@ class BuildEnvironment(object): # next build # File metadata - self.metadata = {} # type: Dict[unicode, Dict[unicode, Any]] - # docname -> dict of metadata items + self.metadata = defaultdict(dict) # type: Dict[unicode, Dict[unicode, Any]] + # docname -> dict of metadata items # TOC inventory self.titles = {} # type: Dict[unicode, nodes.Node] @@ -312,7 +312,6 @@ class BuildEnvironment(object): if docname in self.all_docs: self.all_docs.pop(docname, None) self.reread_always.discard(docname) - self.metadata.pop(docname, None) self.titles.pop(docname, None) self.longtitles.pop(docname, None) @@ -338,7 +337,6 @@ class BuildEnvironment(object): self.all_docs[docname] = other.all_docs[docname] if docname in other.reread_always: self.reread_always.add(docname) - self.metadata[docname] = other.metadata[docname] self.titles[docname] = other.titles[docname] self.longtitles[docname] = other.longtitles[docname] @@ -725,7 +723,6 @@ class BuildEnvironment(object): doctree = pub.document # post-processing - self.process_metadata(docname, doctree) self.create_title_from(docname, doctree) for manager in itervalues(self.managers): manager.process_doc(docname, doctree) @@ -853,41 +850,6 @@ class BuildEnvironment(object): # post-processing of read doctrees - def process_metadata(self, docname, doctree): - # type: (unicode, nodes.Node) -> None - """Process the docinfo part of the doctree as metadata. - - Keep processing minimal -- just return what docutils says. - """ - self.metadata[docname] = {} - md = self.metadata[docname] - try: - docinfo = doctree[0] - except IndexError: - # probably an empty document - return - if docinfo.__class__ is not nodes.docinfo: - # nothing to see here - return - for node in docinfo: - # nodes are multiply inherited... - if isinstance(node, nodes.authors): - md['authors'] = [author.astext() for author in node] - elif isinstance(node, nodes.TextElement): # e.g. author - md[node.__class__.__name__] = node.astext() - else: - name, body = node - md[name.astext()] = body.astext() - for name, value in md.items(): - if name in ('tocdepth',): - try: - value = int(value) - except ValueError: - value = 0 - md[name] = value - - del doctree[0] - def create_title_from(self, docname, document): # type: (unicode, nodes.Node) -> None """Add a title node to the document (just copy the first section title), diff --git a/sphinx/environment/collectors/metadata.py b/sphinx/environment/collectors/metadata.py new file mode 100644 index 000000000..7a15cc614 --- /dev/null +++ b/sphinx/environment/collectors/metadata.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +""" + sphinx.environment.collectors.metadata + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + The metadata collector components for sphinx.environment. + + :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from docutils import nodes + +from sphinx.environment.collectors import EnvironmentCollector + +if False: + # For type annotation + from docutils import nodes # NOQA + from sphinx.sphinx import Sphinx # NOQA + from sphinx.environment import BuildEnvironment # NOQA + + +class MetadataCollector(EnvironmentCollector): + """metadata collector for sphinx.environment.""" + + def clear_doc(self, app, env, docname): + # type: (Sphinx, BuildEnvironment, unicode) -> None + env.metadata.pop(docname, None) + + def merge_other(self, app, env, docnames, other): + # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None + for docname in docnames: + env.metadata[docname] = other.metadata[docname] + + def process_doc(self, app, doctree): + # type: (Sphinx, nodes.Node) -> None + """Process the docinfo part of the doctree as metadata. + + Keep processing minimal -- just return what docutils says. + """ + md = app.env.metadata[app.env.docname] + try: + docinfo = doctree[0] + except IndexError: + # probably an empty document + return + if docinfo.__class__ is not nodes.docinfo: + # nothing to see here + return + for node in docinfo: + # nodes are multiply inherited... + if isinstance(node, nodes.authors): + md['authors'] = [author.astext() for author in node] + elif isinstance(node, nodes.TextElement): # e.g. author + md[node.__class__.__name__] = node.astext() + else: + name, body = node + md[name.astext()] = body.astext() + for name, value in md.items(): + if name in ('tocdepth',): + try: + value = int(value) + except ValueError: + value = 0 + md[name] = value + + del doctree[0] + + +def setup(app): + # type: (Sphinx) -> None + app.add_env_collector(MetadataCollector) From 43b52c85a0f874d3e5a9608014533f058b6120c2 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 18 Dec 2016 00:36:28 +0900 Subject: [PATCH 135/190] Add TitleCollector --- sphinx/application.py | 1 + sphinx/environment/__init__.py | 32 ------------- sphinx/environment/collectors/title.py | 65 ++++++++++++++++++++++++++ 3 files changed, 66 insertions(+), 32 deletions(-) create mode 100644 sphinx/environment/collectors/title.py diff --git a/sphinx/application.py b/sphinx/application.py index 6ada00b6d..a8049b933 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -106,6 +106,7 @@ builtin_extensions = ( 'sphinx.environment.collectors.dependencies', 'sphinx.environment.collectors.asset', 'sphinx.environment.collectors.metadata', + 'sphinx.environment.collectors.title', ) # type: Tuple[unicode, ...] CONFIG_FILENAME = 'conf.py' diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index 105c4e208..fd975c4ea 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -46,7 +46,6 @@ from sphinx.util.parallel import ParallelTasks, parallel_available, make_chunks from sphinx.util.websupport import is_commentable from sphinx.errors import SphinxError, ExtensionError from sphinx.versioning import add_uids, merge_doctrees -from sphinx.transforms import SphinxContentsFilter from sphinx.deprecation import RemovedInSphinx20Warning from sphinx.environment.managers.indexentries import IndexEntries from sphinx.environment.managers.toctree import Toctree @@ -312,8 +311,6 @@ class BuildEnvironment(object): if docname in self.all_docs: self.all_docs.pop(docname, None) self.reread_always.discard(docname) - self.titles.pop(docname, None) - self.longtitles.pop(docname, None) for version, changes in self.versionchanges.items(): new = [change for change in changes if change[1] != docname] @@ -337,8 +334,6 @@ class BuildEnvironment(object): self.all_docs[docname] = other.all_docs[docname] if docname in other.reread_always: self.reread_always.add(docname) - self.titles[docname] = other.titles[docname] - self.longtitles[docname] = other.longtitles[docname] for version, changes in other.versionchanges.items(): self.versionchanges.setdefault(version, []).extend( @@ -723,7 +718,6 @@ class BuildEnvironment(object): doctree = pub.document # post-processing - self.create_title_from(docname, doctree) for manager in itervalues(self.managers): manager.process_doc(docname, doctree) for domain in itervalues(self.domains): @@ -848,32 +842,6 @@ class BuildEnvironment(object): self.ref_context.get('py:module'), self.temp_data.get('object'), node.astext())) - # post-processing of read doctrees - - def create_title_from(self, docname, document): - # type: (unicode, nodes.Node) -> None - """Add a title node to the document (just copy the first section title), - and store that title in the environment. - """ - titlenode = nodes.title() - longtitlenode = titlenode - # explicit title set with title directive; use this only for - # the <title> tag in HTML output - if 'title' in document: - longtitlenode = nodes.title() - longtitlenode += nodes.Text(document['title']) - # look for first section title and use that as the title - for node in document.traverse(nodes.section): - visitor = SphinxContentsFilter(document) - node[0].walkabout(visitor) - titlenode += visitor.get_entry_text() - break - else: - # document has no title - titlenode += nodes.Text('<no title>') - self.titles[docname] = titlenode - self.longtitles[docname] = longtitlenode - def note_toctree(self, docname, toctreenode): # type: (unicode, addnodes.toctree) -> None """Note a TOC tree directive in a document and gather information about diff --git a/sphinx/environment/collectors/title.py b/sphinx/environment/collectors/title.py new file mode 100644 index 000000000..a5316fe94 --- /dev/null +++ b/sphinx/environment/collectors/title.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +""" + sphinx.environment.collectors.title + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + The title collector components for sphinx.environment. + + :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from docutils import nodes + +from sphinx.environment.collectors import EnvironmentCollector +from sphinx.transforms import SphinxContentsFilter + +if False: + # For type annotation + from docutils import nodes # NOQA + from sphinx.sphinx import Sphinx # NOQA + from sphinx.environment import BuildEnvironment # NOQA + + +class TitleCollector(EnvironmentCollector): + """title collector for sphinx.environment.""" + + def clear_doc(self, app, env, docname): + # type: (Sphinx, BuildEnvironment, unicode) -> None + env.titles.pop(docname, None) + env.longtitles.pop(docname, None) + + def merge_other(self, app, env, docnames, other): + # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None + for docname in docnames: + env.titles[docname] = other.titles[docname] + env.longtitles[docname] = other.longtitles[docname] + + def process_doc(self, app, doctree): + # type: (Sphinx, nodes.Node) -> None + """Add a title node to the document (just copy the first section title), + and store that title in the environment. + """ + titlenode = nodes.title() + longtitlenode = titlenode + # explicit title set with title directive; use this only for + # the <title> tag in HTML output + if 'title' in doctree: + longtitlenode = nodes.title() + longtitlenode += nodes.Text(doctree['title']) + # look for first section title and use that as the title + for node in doctree.traverse(nodes.section): + visitor = SphinxContentsFilter(doctree) + node[0].walkabout(visitor) + titlenode += visitor.get_entry_text() + break + else: + # document has no title + titlenode += nodes.Text('<no title>') + app.env.titles[app.env.docname] = titlenode + app.env.longtitles[app.env.docname] = longtitlenode + + +def setup(app): + # type: (Sphinx) -> None + app.add_env_collector(TitleCollector) From b2c76f44b6f0a8ed8a7832804f993aab7037532d Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 10 Jan 2017 02:36:37 +0900 Subject: [PATCH 136/190] Add TocTree adapter --- sphinx/builders/html.py | 10 +- sphinx/environment/__init__.py | 30 ++- sphinx/environment/adapters/__init__.py | 10 + sphinx/environment/adapters/toctree.py | 324 ++++++++++++++++++++++++ sphinx/environment/managers/toctree.py | 284 +-------------------- sphinx/ext/autosummary/__init__.py | 3 +- 6 files changed, 369 insertions(+), 292 deletions(-) create mode 100644 sphinx/environment/adapters/__init__.py create mode 100644 sphinx/environment/adapters/toctree.py diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index 3a074a1cc..5c3d9601e 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -45,6 +45,7 @@ from sphinx.highlighting import PygmentsBridge from sphinx.util.console import bold, darkgreen # type: ignore from sphinx.writers.html import HTMLWriter, HTMLTranslator, \ SmartyPantsHTMLTranslator +from sphinx.environment.adapters.toctree import TocTree if False: # For type annotation @@ -439,7 +440,7 @@ class StandaloneHTMLBuilder(Builder): meta = self.env.metadata.get(docname) # local TOC and global TOC tree - self_toc = self.env.get_toc_for(docname, self) + self_toc = TocTree(self.env).get_toc_for(docname, self) toc = self.render_partial(self_toc)['fragment'] return dict( @@ -763,7 +764,7 @@ class StandaloneHTMLBuilder(Builder): # type: (unicode, bool, Any) -> unicode if 'includehidden' not in kwds: kwds['includehidden'] = False - return self.render_partial(self.env.get_toctree_for( + return self.render_partial(TocTree(self.env).get_toctree_for( docname, self, collapse, **kwds))['fragment'] def get_outfilename(self, pagename): @@ -1010,7 +1011,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder): # type: (unicode, bool, Any) -> unicode if 'includehidden' not in kwds: kwds['includehidden'] = False - toctree = self.env.get_toctree_for(docname, self, collapse, **kwds) + toctree = TocTree(self.env).get_toctree_for(docname, self, collapse, **kwds) self.fix_refuris(toctree) return self.render_partial(toctree)['fragment'] @@ -1066,7 +1067,8 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder): def get_doc_context(self, docname, body, metatags): # type: (unicode, unicode, Dict) -> Dict # no relation links... - toc = self.env.get_toctree_for(self.config.master_doc, self, False) # type: Any + toc = TocTree(self.env).get_toctree_for(self.config.master_doc, + self, False) # if there is no toctree, toc is None if toc: self.fix_refuris(toc) diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index fd975c4ea..92a722aae 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -47,6 +47,7 @@ from sphinx.util.websupport import is_commentable from sphinx.errors import SphinxError, ExtensionError from sphinx.versioning import add_uids, merge_doctrees from sphinx.deprecation import RemovedInSphinx20Warning +from sphinx.environment.adapters.toctree import TocTree from sphinx.environment.managers.indexentries import IndexEntries from sphinx.environment.managers.toctree import Toctree @@ -847,17 +848,26 @@ class BuildEnvironment(object): """Note a TOC tree directive in a document and gather information about file relations from it. """ - self.toctree.note_toctree(docname, toctreenode) # type: ignore + warnings.warn('env.note_toctree() is deprecated. ' + 'Use sphinx.environment.adapters.toctre.TocTree instead.', + RemovedInSphinx20Warning) + TocTree(self).note(docname, toctreenode) def get_toc_for(self, docname, builder): - # type: (unicode, Builder) -> addnodes.toctree + # type: (unicode, Builder) -> Dict[unicode, nodes.Node] """Return a TOC nodetree -- for use on the same page only!""" - return self.toctree.get_toc_for(docname, builder) # type: ignore + warnings.warn('env.get_toc_for() is deprecated. ' + 'Use sphinx.environment.adapters.toctre.TocTree instead.', + RemovedInSphinx20Warning) + return TocTree(self).get_toc_for(docname, builder) def get_toctree_for(self, docname, builder, collapse, **kwds): # type: (unicode, Builder, bool, Any) -> addnodes.toctree """Return the global TOC nodetree.""" - return self.toctree.get_toctree_for(docname, builder, collapse, **kwds) # type: ignore + warnings.warn('env.get_toctree_for() is deprecated. ' + 'Use sphinx.environment.adapters.toctre.TocTree instead.', + RemovedInSphinx20Warning) + return TocTree(self).get_toctree_for(docname, builder, collapse, **kwds) def get_domain(self, domainname): # type: (unicode) -> Domain @@ -897,9 +907,9 @@ class BuildEnvironment(object): # now, resolve all toctree nodes for toctreenode in doctree.traverse(addnodes.toctree): - result = self.resolve_toctree(docname, builder, toctreenode, - prune=prune_toctrees, - includehidden=includehidden) + result = TocTree(self).resolve(docname, builder, toctreenode, + prune=prune_toctrees, + includehidden=includehidden) if result is None: toctreenode.replace_self([]) else: @@ -921,9 +931,9 @@ class BuildEnvironment(object): If *collapse* is True, all branches not containing docname will be collapsed. """ - return self.toctree.resolve_toctree(docname, builder, toctree, prune, # type: ignore - maxdepth, titles_only, collapse, - includehidden) + return TocTree(self).resolve(docname, builder, toctree, prune, + maxdepth, titles_only, collapse, + includehidden) def resolve_references(self, doctree, fromdocname, builder): # type: (nodes.Node, unicode, Builder) -> None diff --git a/sphinx/environment/adapters/__init__.py b/sphinx/environment/adapters/__init__.py new file mode 100644 index 000000000..12a6fa490 --- /dev/null +++ b/sphinx/environment/adapters/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +""" + sphinx.environment.adapters + ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Sphinx environment adapters + + :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" diff --git a/sphinx/environment/adapters/toctree.py b/sphinx/environment/adapters/toctree.py new file mode 100644 index 000000000..1ab3e229f --- /dev/null +++ b/sphinx/environment/adapters/toctree.py @@ -0,0 +1,324 @@ +# -*- coding: utf-8 -*- +""" + sphinx.environment.adapters.toctree + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Toctree adapter for sphinx.environment. + + :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from six import iteritems + +from docutils import nodes + +from sphinx import addnodes +from sphinx.util import url_re, logging +from sphinx.util.nodes import clean_astext, process_only_nodes + +if False: + # For type annotation + from typing import Any # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.environment import BuildEnvironment # NOQA + +logger = logging.getLogger(__name__) + + +class TocTree(object): + def __init__(self, env): + # type: (BuildEnvironment) -> None + self.env = env + + def note(self, docname, toctreenode): + # type: (unicode, addnodes.toctree) -> None + """Note a TOC tree directive in a document and gather information about + file relations from it. + """ + if toctreenode['glob']: + self.env.glob_toctrees.add(docname) + if toctreenode.get('numbered'): + self.env.numbered_toctrees.add(docname) + includefiles = toctreenode['includefiles'] + for includefile in includefiles: + # note that if the included file is rebuilt, this one must be + # too (since the TOC of the included file could have changed) + self.env.files_to_rebuild.setdefault(includefile, set()).add(docname) + self.env.toctree_includes.setdefault(docname, []).extend(includefiles) + + def resolve(self, docname, builder, toctree, prune=True, maxdepth=0, + titles_only=False, collapse=False, includehidden=False): + # type: (unicode, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Node + """Resolve a *toctree* node into individual bullet lists with titles + as items, returning None (if no containing titles are found) or + a new node. + + If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0, + to the value of the *maxdepth* option on the *toctree* node. + If *titles_only* is True, only toplevel document titles will be in the + resulting tree. + If *collapse* is True, all branches not containing docname will + be collapsed. + """ + if toctree.get('hidden', False) and not includehidden: + return None + + # For reading the following two helper function, it is useful to keep + # in mind the node structure of a toctree (using HTML-like node names + # for brevity): + # + # <ul> + # <li> + # <p><a></p> + # <p><a></p> + # ... + # <ul> + # ... + # </ul> + # </li> + # </ul> + # + # The transformation is made in two passes in order to avoid + # interactions between marking and pruning the tree (see bug #1046). + + toctree_ancestors = self.get_toctree_ancestors(docname) + + def _toctree_add_classes(node, depth): + """Add 'toctree-l%d' and 'current' classes to the toctree.""" + for subnode in node.children: + if isinstance(subnode, (addnodes.compact_paragraph, + nodes.list_item)): + # for <p> and <li>, indicate the depth level and recurse + subnode['classes'].append('toctree-l%d' % (depth - 1)) + _toctree_add_classes(subnode, depth) + elif isinstance(subnode, nodes.bullet_list): + # for <ul>, just recurse + _toctree_add_classes(subnode, depth + 1) + elif isinstance(subnode, nodes.reference): + # for <a>, identify which entries point to the current + # document and therefore may not be collapsed + if subnode['refuri'] == docname: + if not subnode['anchorname']: + # give the whole branch a 'current' class + # (useful for styling it differently) + branchnode = subnode + while branchnode: + branchnode['classes'].append('current') + branchnode = branchnode.parent + # mark the list_item as "on current page" + if subnode.parent.parent.get('iscurrent'): + # but only if it's not already done + return + while subnode: + subnode['iscurrent'] = True + subnode = subnode.parent + + def _entries_from_toctree(toctreenode, parents, + separate=False, subtree=False): + """Return TOC entries for a toctree node.""" + refs = [(e[0], e[1]) for e in toctreenode['entries']] + entries = [] + for (title, ref) in refs: + try: + refdoc = None + if url_re.match(ref): + if title is None: + title = ref + reference = nodes.reference('', '', internal=False, + refuri=ref, anchorname='', + *[nodes.Text(title)]) + para = addnodes.compact_paragraph('', '', reference) + item = nodes.list_item('', para) + toc = nodes.bullet_list('', item) + elif ref == 'self': + # 'self' refers to the document from which this + # toctree originates + ref = toctreenode['parent'] + if not title: + title = clean_astext(self.env.titles[ref]) + reference = nodes.reference('', '', internal=True, + refuri=ref, + anchorname='', + *[nodes.Text(title)]) + para = addnodes.compact_paragraph('', '', reference) + item = nodes.list_item('', para) + # don't show subitems + toc = nodes.bullet_list('', item) + else: + if ref in parents: + logger.warning('circular toctree references ' + 'detected, ignoring: %s <- %s', + ref, ' <- '.join(parents), + location=ref) + continue + refdoc = ref + toc = self.env.tocs[ref].deepcopy() + maxdepth = self.env.metadata[ref].get('tocdepth', 0) + if ref not in toctree_ancestors or (prune and maxdepth > 0): + self._toctree_prune(toc, 2, maxdepth, collapse) + process_only_nodes(toc, builder.tags) + if title and toc.children and len(toc.children) == 1: + child = toc.children[0] + for refnode in child.traverse(nodes.reference): + if refnode['refuri'] == ref and \ + not refnode['anchorname']: + refnode.children = [nodes.Text(title)] + if not toc.children: + # empty toc means: no titles will show up in the toctree + logger.warning('toctree contains reference to document %r that ' + 'doesn\'t have a title: no link will be generated', + ref, location=toctreenode) + except KeyError: + # this is raised if the included file does not exist + logger.warning('toctree contains reference to nonexisting document %r', + ref, location=toctreenode) + else: + # if titles_only is given, only keep the main title and + # sub-toctrees + if titles_only: + # delete everything but the toplevel title(s) + # and toctrees + for toplevel in toc: + # nodes with length 1 don't have any children anyway + if len(toplevel) > 1: + subtrees = toplevel.traverse(addnodes.toctree) + if subtrees: + toplevel[1][:] = subtrees + else: + toplevel.pop(1) + # resolve all sub-toctrees + for subtocnode in toc.traverse(addnodes.toctree): + if not (subtocnode.get('hidden', False) and + not includehidden): + i = subtocnode.parent.index(subtocnode) + 1 + for item in _entries_from_toctree( + subtocnode, [refdoc] + parents, + subtree=True): + subtocnode.parent.insert(i, item) + i += 1 + subtocnode.parent.remove(subtocnode) + if separate: + entries.append(toc) + else: + entries.extend(toc.children) + if not subtree and not separate: + ret = nodes.bullet_list() + ret += entries + return [ret] + return entries + + maxdepth = maxdepth or toctree.get('maxdepth', -1) + if not titles_only and toctree.get('titlesonly', False): + titles_only = True + if not includehidden and toctree.get('includehidden', False): + includehidden = True + + # NOTE: previously, this was separate=True, but that leads to artificial + # separation when two or more toctree entries form a logical unit, so + # separating mode is no longer used -- it's kept here for history's sake + tocentries = _entries_from_toctree(toctree, [], separate=False) + if not tocentries: + return None + + newnode = addnodes.compact_paragraph('', '') + caption = toctree.attributes.get('caption') + if caption: + caption_node = nodes.caption(caption, '', *[nodes.Text(caption)]) + caption_node.line = toctree.line + caption_node.source = toctree.source + caption_node.rawsource = toctree['rawcaption'] + if hasattr(toctree, 'uid'): + # move uid to caption_node to translate it + caption_node.uid = toctree.uid + del toctree.uid + newnode += caption_node + newnode.extend(tocentries) + newnode['toctree'] = True + + # prune the tree to maxdepth, also set toc depth and current classes + _toctree_add_classes(newnode, 1) + self._toctree_prune(newnode, 1, prune and maxdepth or 0, collapse) + + if len(newnode[-1]) == 0: # No titles found + return None + + # set the target paths in the toctrees (they are not known at TOC + # generation time) + for refnode in newnode.traverse(nodes.reference): + if not url_re.match(refnode['refuri']): + refnode['refuri'] = builder.get_relative_uri( + docname, refnode['refuri']) + refnode['anchorname'] + return newnode + + def get_toctree_ancestors(self, docname): + # type: (unicode) -> List[unicode] + parent = {} + for p, children in iteritems(self.env.toctree_includes): + for child in children: + parent[child] = p + ancestors = [] # type: List[unicode] + d = docname + while d in parent and d not in ancestors: + ancestors.append(d) + d = parent[d] + return ancestors + + def _toctree_prune(self, node, depth, maxdepth, collapse=False): + # type: (nodes.Node, int, int, bool) -> None + """Utility: Cut a TOC at a specified depth.""" + for subnode in node.children[:]: + if isinstance(subnode, (addnodes.compact_paragraph, + nodes.list_item)): + # for <p> and <li>, just recurse + self._toctree_prune(subnode, depth, maxdepth, collapse) + elif isinstance(subnode, nodes.bullet_list): + # for <ul>, determine if the depth is too large or if the + # entry is to be collapsed + if maxdepth > 0 and depth > maxdepth: + subnode.parent.replace(subnode, []) + else: + # cull sub-entries whose parents aren't 'current' + if (collapse and depth > 1 and + 'iscurrent' not in subnode.parent): + subnode.parent.remove(subnode) + else: + # recurse on visible children + self._toctree_prune(subnode, depth + 1, maxdepth, collapse) + + def get_toc_for(self, docname, builder): + # type: (unicode, Builder) -> Dict[unicode, nodes.Node] + """Return a TOC nodetree -- for use on the same page only!""" + tocdepth = self.env.metadata[docname].get('tocdepth', 0) + try: + toc = self.env.tocs[docname].deepcopy() + self._toctree_prune(toc, 2, tocdepth) + except KeyError: + # the document does not exist anymore: return a dummy node that + # renders to nothing + return nodes.paragraph() + process_only_nodes(toc, builder.tags) + for node in toc.traverse(nodes.reference): + node['refuri'] = node['anchorname'] or '#' + return toc + + def get_toctree_for(self, docname, builder, collapse, **kwds): + # type: (unicode, Builder, bool, Any) -> nodes.Node + """Return the global TOC nodetree.""" + doctree = self.env.get_doctree(self.env.config.master_doc) + toctrees = [] + if 'includehidden' not in kwds: + kwds['includehidden'] = True + if 'maxdepth' not in kwds: + kwds['maxdepth'] = 0 + kwds['collapse'] = collapse + for toctreenode in doctree.traverse(addnodes.toctree): + toctree = self.resolve(docname, builder, toctreenode, prune=True, **kwds) + if toctree: + toctrees.append(toctree) + if not toctrees: + return None + result = toctrees[0] + for toctree in toctrees[1:]: + result.extend(toctree.children) + return result diff --git a/sphinx/environment/managers/toctree.py b/sphinx/environment/managers/toctree.py index 0cd011ae0..bc0d9f999 100644 --- a/sphinx/environment/managers/toctree.py +++ b/sphinx/environment/managers/toctree.py @@ -15,8 +15,8 @@ from docutils import nodes from sphinx import addnodes from sphinx.util import url_re, logging -from sphinx.util.nodes import clean_astext, process_only_nodes from sphinx.transforms import SphinxContentsFilter +from sphinx.environment.adapters.toctree import TocTree as TocTreeAdapter from sphinx.environment.managers import EnvironmentManager if False: @@ -149,293 +149,23 @@ class Toctree(EnvironmentManager): """Note a TOC tree directive in a document and gather information about file relations from it. """ - if toctreenode['glob']: - self.glob_toctrees.add(docname) - if toctreenode.get('numbered'): - self.numbered_toctrees.add(docname) - includefiles = toctreenode['includefiles'] - for includefile in includefiles: - # note that if the included file is rebuilt, this one must be - # too (since the TOC of the included file could have changed) - self.files_to_rebuild.setdefault(includefile, set()).add(docname) - self.toctree_includes.setdefault(docname, []).extend(includefiles) + TocTreeAdapter(self.env).note(docname, toctreenode) def get_toc_for(self, docname, builder): - # type: (unicode, Builder) -> None + # type: (unicode, Builder) -> Dict[unicode, nodes.Node] """Return a TOC nodetree -- for use on the same page only!""" - tocdepth = self.env.metadata[docname].get('tocdepth', 0) - try: - toc = self.tocs[docname].deepcopy() - self._toctree_prune(toc, 2, tocdepth) - except KeyError: - # the document does not exist anymore: return a dummy node that - # renders to nothing - return nodes.paragraph() - process_only_nodes(toc, builder.tags) - for node in toc.traverse(nodes.reference): - node['refuri'] = node['anchorname'] or '#' - return toc + return TocTreeAdapter(self.env).get_toc_for(docname, builder) def get_toctree_for(self, docname, builder, collapse, **kwds): # type: (unicode, Builder, bool, Any) -> nodes.Node """Return the global TOC nodetree.""" - doctree = self.env.get_doctree(self.env.config.master_doc) - toctrees = [] - if 'includehidden' not in kwds: - kwds['includehidden'] = True - if 'maxdepth' not in kwds: - kwds['maxdepth'] = 0 - kwds['collapse'] = collapse - for toctreenode in doctree.traverse(addnodes.toctree): - toctree = self.env.resolve_toctree(docname, builder, toctreenode, - prune=True, **kwds) - if toctree: - toctrees.append(toctree) - if not toctrees: - return None - result = toctrees[0] - for toctree in toctrees[1:]: - result.extend(toctree.children) - return result + return TocTreeAdapter(self.env).get_toctree_for(docname, builder, collapse, **kwds) def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0, titles_only=False, collapse=False, includehidden=False): # type: (unicode, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Node - """Resolve a *toctree* node into individual bullet lists with titles - as items, returning None (if no containing titles are found) or - a new node. - - If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0, - to the value of the *maxdepth* option on the *toctree* node. - If *titles_only* is True, only toplevel document titles will be in the - resulting tree. - If *collapse* is True, all branches not containing docname will - be collapsed. - """ - if toctree.get('hidden', False) and not includehidden: - return None - - # For reading the following two helper function, it is useful to keep - # in mind the node structure of a toctree (using HTML-like node names - # for brevity): - # - # <ul> - # <li> - # <p><a></p> - # <p><a></p> - # ... - # <ul> - # ... - # </ul> - # </li> - # </ul> - # - # The transformation is made in two passes in order to avoid - # interactions between marking and pruning the tree (see bug #1046). - - toctree_ancestors = self.get_toctree_ancestors(docname) - - def _toctree_add_classes(node, depth): - """Add 'toctree-l%d' and 'current' classes to the toctree.""" - for subnode in node.children: - if isinstance(subnode, (addnodes.compact_paragraph, - nodes.list_item)): - # for <p> and <li>, indicate the depth level and recurse - subnode['classes'].append('toctree-l%d' % (depth - 1)) - _toctree_add_classes(subnode, depth) - elif isinstance(subnode, nodes.bullet_list): - # for <ul>, just recurse - _toctree_add_classes(subnode, depth + 1) - elif isinstance(subnode, nodes.reference): - # for <a>, identify which entries point to the current - # document and therefore may not be collapsed - if subnode['refuri'] == docname: - if not subnode['anchorname']: - # give the whole branch a 'current' class - # (useful for styling it differently) - branchnode = subnode - while branchnode: - branchnode['classes'].append('current') - branchnode = branchnode.parent - # mark the list_item as "on current page" - if subnode.parent.parent.get('iscurrent'): - # but only if it's not already done - return - while subnode: - subnode['iscurrent'] = True - subnode = subnode.parent - - def _entries_from_toctree(toctreenode, parents, - separate=False, subtree=False): - """Return TOC entries for a toctree node.""" - refs = [(e[0], e[1]) for e in toctreenode['entries']] - entries = [] - for (title, ref) in refs: - try: - refdoc = None - if url_re.match(ref): - if title is None: - title = ref - reference = nodes.reference('', '', internal=False, - refuri=ref, anchorname='', - *[nodes.Text(title)]) - para = addnodes.compact_paragraph('', '', reference) - item = nodes.list_item('', para) - toc = nodes.bullet_list('', item) - elif ref == 'self': - # 'self' refers to the document from which this - # toctree originates - ref = toctreenode['parent'] - if not title: - title = clean_astext(self.env.titles[ref]) - reference = nodes.reference('', '', internal=True, - refuri=ref, - anchorname='', - *[nodes.Text(title)]) - para = addnodes.compact_paragraph('', '', reference) - item = nodes.list_item('', para) - # don't show subitems - toc = nodes.bullet_list('', item) - else: - if ref in parents: - logger.warning('circular toctree references ' - 'detected, ignoring: %s <- %s', - ref, ' <- '.join(parents), - location=ref) - continue - refdoc = ref - toc = self.tocs[ref].deepcopy() - maxdepth = self.env.metadata[ref].get('tocdepth', 0) - if ref not in toctree_ancestors or (prune and maxdepth > 0): - self._toctree_prune(toc, 2, maxdepth, collapse) - process_only_nodes(toc, builder.tags) - if title and toc.children and len(toc.children) == 1: - child = toc.children[0] - for refnode in child.traverse(nodes.reference): - if refnode['refuri'] == ref and \ - not refnode['anchorname']: - refnode.children = [nodes.Text(title)] - if not toc.children: - # empty toc means: no titles will show up in the toctree - logger.warning('toctree contains reference to document %r that ' - 'doesn\'t have a title: no link will be generated', - ref, location=toctreenode) - except KeyError: - # this is raised if the included file does not exist - logger.warning('toctree contains reference to nonexisting document %r', - ref, location=toctreenode) - else: - # if titles_only is given, only keep the main title and - # sub-toctrees - if titles_only: - # delete everything but the toplevel title(s) - # and toctrees - for toplevel in toc: - # nodes with length 1 don't have any children anyway - if len(toplevel) > 1: - subtrees = toplevel.traverse(addnodes.toctree) - if subtrees: - toplevel[1][:] = subtrees - else: - toplevel.pop(1) - # resolve all sub-toctrees - for subtocnode in toc.traverse(addnodes.toctree): - if not (subtocnode.get('hidden', False) and - not includehidden): - i = subtocnode.parent.index(subtocnode) + 1 - for item in _entries_from_toctree( - subtocnode, [refdoc] + parents, - subtree=True): - subtocnode.parent.insert(i, item) - i += 1 - subtocnode.parent.remove(subtocnode) - if separate: - entries.append(toc) - else: - entries.extend(toc.children) - if not subtree and not separate: - ret = nodes.bullet_list() - ret += entries - return [ret] - return entries - - maxdepth = maxdepth or toctree.get('maxdepth', -1) - if not titles_only and toctree.get('titlesonly', False): - titles_only = True - if not includehidden and toctree.get('includehidden', False): - includehidden = True - - # NOTE: previously, this was separate=True, but that leads to artificial - # separation when two or more toctree entries form a logical unit, so - # separating mode is no longer used -- it's kept here for history's sake - tocentries = _entries_from_toctree(toctree, [], separate=False) - if not tocentries: - return None - - newnode = addnodes.compact_paragraph('', '') - caption = toctree.attributes.get('caption') - if caption: - caption_node = nodes.caption(caption, '', *[nodes.Text(caption)]) - caption_node.line = toctree.line - caption_node.source = toctree.source - caption_node.rawsource = toctree['rawcaption'] - if hasattr(toctree, 'uid'): - # move uid to caption_node to translate it - caption_node.uid = toctree.uid - del toctree.uid - newnode += caption_node - newnode.extend(tocentries) - newnode['toctree'] = True - - # prune the tree to maxdepth, also set toc depth and current classes - _toctree_add_classes(newnode, 1) - self._toctree_prune(newnode, 1, prune and maxdepth or 0, collapse) - - if len(newnode[-1]) == 0: # No titles found - return None - - # set the target paths in the toctrees (they are not known at TOC - # generation time) - for refnode in newnode.traverse(nodes.reference): - if not url_re.match(refnode['refuri']): - refnode['refuri'] = builder.get_relative_uri( - docname, refnode['refuri']) + refnode['anchorname'] - return newnode - - def get_toctree_ancestors(self, docname): - # type: (unicode) -> List[unicode] - parent = {} - for p, children in iteritems(self.toctree_includes): - for child in children: - parent[child] = p - ancestors = [] # type: List[unicode] - d = docname - while d in parent and d not in ancestors: - ancestors.append(d) - d = parent[d] - return ancestors - - def _toctree_prune(self, node, depth, maxdepth, collapse=False): - # type: (nodes.Node, int, int, bool) -> None - """Utility: Cut a TOC at a specified depth.""" - for subnode in node.children[:]: - if isinstance(subnode, (addnodes.compact_paragraph, - nodes.list_item)): - # for <p> and <li>, just recurse - self._toctree_prune(subnode, depth, maxdepth, collapse) - elif isinstance(subnode, nodes.bullet_list): - # for <ul>, determine if the depth is too large or if the - # entry is to be collapsed - if maxdepth > 0 and depth > maxdepth: - subnode.parent.replace(subnode, []) - else: - # cull sub-entries whose parents aren't 'current' - if (collapse and depth > 1 and - 'iscurrent' not in subnode.parent): - subnode.parent.remove(subnode) - else: - # recurse on visible children - self._toctree_prune(subnode, depth + 1, maxdepth, collapse) + return TocTreeAdapter(self.env).resolve(docname, builder, toctree, prune, maxdepth, + titles_only, collapse, includehidden) def assign_section_numbers(self): # type: () -> List[unicode] diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py index 4995c0261..8d521a74d 100644 --- a/sphinx/ext/autosummary/__init__.py +++ b/sphinx/ext/autosummary/__init__.py @@ -69,6 +69,7 @@ from docutils import nodes import sphinx from sphinx import addnodes +from sphinx.environment.adapters.toctree import TocTree from sphinx.util import import_object, rst, logging from sphinx.pycode import ModuleAnalyzer, PycodeError from sphinx.ext.autodoc import Options @@ -104,7 +105,7 @@ def process_autosummary_toc(app, doctree): try: if (isinstance(subnode, autosummary_toc) and isinstance(subnode[0], addnodes.toctree)): - env.note_toctree(env.docname, subnode[0]) + TocTree(env).note(env.docname, subnode[0]) continue except IndexError: continue From 105951cdb27d100ef3b505e546be13e0dc05be4a Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 10 Jan 2017 13:52:51 +0900 Subject: [PATCH 137/190] Add EnvironmentManager.get_updated_docs() to weaken the coupling env modules --- sphinx/environment/__init__.py | 5 +++-- sphinx/environment/managers/__init__.py | 4 ++++ sphinx/environment/managers/indexentries.py | 4 ++++ sphinx/environment/managers/toctree.py | 4 ++++ 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index 92a722aae..ea650bae0 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -647,8 +647,9 @@ class BuildEnvironment(object): def check_dependents(self, already): # type: (Set[unicode]) -> Iterator[unicode] - to_rewrite = (self.toctree.assign_section_numbers() + # type: ignore - self.toctree.assign_figure_numbers()) # type: ignore + to_rewrite = [] + for manager in itervalues(self.managers): + to_rewrite.extend(manager.get_updated_docs()) for docname in set(to_rewrite): if docname not in already: yield docname diff --git a/sphinx/environment/managers/__init__.py b/sphinx/environment/managers/__init__.py index 0822f1091..b2489a9b9 100644 --- a/sphinx/environment/managers/__init__.py +++ b/sphinx/environment/managers/__init__.py @@ -48,3 +48,7 @@ class EnvironmentManager(object): def process_doc(self, docname, doctree): # type: (unicode, nodes.Node) -> None raise NotImplementedError + + def get_updated_docs(self): + # type: () -> List[unicode] + raise NotImplementedError diff --git a/sphinx/environment/managers/indexentries.py b/sphinx/environment/managers/indexentries.py index ef9c84d02..cb972b54c 100644 --- a/sphinx/environment/managers/indexentries.py +++ b/sphinx/environment/managers/indexentries.py @@ -65,6 +65,10 @@ class IndexEntries(EnvironmentManager): else: entries.append(entry + (None,)) + def get_updated_docs(self): + # type: () -> List[unicode] + return [] + def create_index(self, builder, group_entries=True, _fixre=re.compile(r'(.*) ([(][^()]*[)])')): # type: (Builder, bool, Pattern) -> List[Tuple[unicode, List[Tuple[unicode, List[unicode]]]]] # NOQA diff --git a/sphinx/environment/managers/toctree.py b/sphinx/environment/managers/toctree.py index bc0d9f999..92712b4d9 100644 --- a/sphinx/environment/managers/toctree.py +++ b/sphinx/environment/managers/toctree.py @@ -144,6 +144,10 @@ class Toctree(EnvironmentManager): self.tocs[docname] = nodes.bullet_list('') self.toc_num_entries[docname] = numentries[0] + def get_updated_docs(self): + # type: () -> List[unicode] + return self.assign_section_numbers() + self.assign_figure_numbers() + def note_toctree(self, docname, toctreenode): # type: (unicode, addnodes.toctree) -> None """Note a TOC tree directive in a document and gather information about From 0b0637deb2f093de80de102348de6c89fe432e11 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 10 Jan 2017 21:43:31 +0900 Subject: [PATCH 138/190] Reimplement ToctreeManager as a collector --- sphinx/application.py | 2 + sphinx/builders/__init__.py | 2 +- sphinx/environment/__init__.py | 9 +- sphinx/environment/collectors/__init__.py | 19 ++- .../{managers => collectors}/toctree.py | 159 +++++++----------- 5 files changed, 87 insertions(+), 104 deletions(-) rename sphinx/environment/{managers => collectors}/toctree.py (65%) diff --git a/sphinx/application.py b/sphinx/application.py index a8049b933..f957d6fbf 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -61,6 +61,7 @@ if False: events = { 'builder-inited': '', 'env-get-outdated': 'env, added, changed, removed', + 'env-get-updated': 'env', 'env-purge-doc': 'env, docname', 'env-before-read-docs': 'env, docnames', 'source-read': 'docname, source text', @@ -107,6 +108,7 @@ builtin_extensions = ( 'sphinx.environment.collectors.asset', 'sphinx.environment.collectors.metadata', 'sphinx.environment.collectors.title', + 'sphinx.environment.collectors.toctree', ) # type: Tuple[unicode, ...] CONFIG_FILENAME = 'conf.py' diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index 1461c3e68..dbeca1a34 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -292,7 +292,7 @@ class Builder(object): doccount = len(updated_docnames) logger.info(bold('looking for now-outdated files... '), nonl=1) - for docname in self.env.check_dependents(updated_docnames): + for docname in self.env.check_dependents(self.app, updated_docnames): updated_docnames.add(docname) outdated = len(updated_docnames) - doccount if outdated: diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index ea650bae0..46cdc21ef 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -49,7 +49,6 @@ from sphinx.versioning import add_uids, merge_doctrees from sphinx.deprecation import RemovedInSphinx20Warning from sphinx.environment.adapters.toctree import TocTree from sphinx.environment.managers.indexentries import IndexEntries -from sphinx.environment.managers.toctree import Toctree if False: # For type annotation @@ -248,7 +247,7 @@ class BuildEnvironment(object): # type: () -> None managers = {} manager_class = None # type: Type[EnvironmentManager] - for manager_class in [IndexEntries, Toctree]: # type: ignore + for manager_class in [IndexEntries]: # type: ignore managers[manager_class.name] = manager_class(self) self.attach_managers(managers) @@ -645,11 +644,13 @@ class BuildEnvironment(object): logger.info(bold('waiting for workers...')) tasks.join() - def check_dependents(self, already): - # type: (Set[unicode]) -> Iterator[unicode] + def check_dependents(self, app, already): + # type: (Sphinx, Set[unicode]) -> Iterator[unicode] to_rewrite = [] for manager in itervalues(self.managers): to_rewrite.extend(manager.get_updated_docs()) + for docnames in app.emit('env-get-updated', self): + to_rewrite.extend(docnames) for docname in set(to_rewrite): if docname not in already: yield docname diff --git a/sphinx/environment/collectors/__init__.py b/sphinx/environment/collectors/__init__.py index 444ba7665..3a78d21ba 100644 --- a/sphinx/environment/collectors/__init__.py +++ b/sphinx/environment/collectors/__init__.py @@ -26,10 +26,13 @@ class EnvironmentCollector(object): def enable(self, app): # type: (Sphinx) -> None assert self.listener_ids is None - self.listener_ids = {} - self.listener_ids['doctree-read'] = app.connect('doctree-read', self.process_doc) - self.listener_ids['env-merge-info'] = app.connect('env-merge-info', self.merge_other) - self.listener_ids['env-purge-doc'] = app.connect('env-purge-doc', self.clear_doc) + self.listener_ids = { + 'doctree-read': app.connect('doctree-read', self.process_doc), + 'env-merge-info': app.connect('env-merge-info', self.merge_other), + 'env-purge-doc': app.connect('env-purge-doc', self.clear_doc), + 'env-get-updated': app.connect('env-get-updated', self.get_updated_docs), + 'env-get-outdated': app.connect('env-get-outdated', self.get_outdated_docs), + } def disable(self, app): # type: (Sphinx) -> None @@ -49,3 +52,11 @@ class EnvironmentCollector(object): def process_doc(self, app, doctree): # type: (Sphinx, nodes.Node) -> None raise NotImplementedError + + def get_updated_docs(self, app, env): + # type: (Sphinx, BuildEnvironment) -> List[unicode] + return [] + + def get_outdated_docs(self, app, env, added, changed, removed): + # type: (Sphinx, BuildEnvironment, unicode, Set[unicode], Set[unicode], Set[unicode]) -> List[unicode] # NOQA + return [] diff --git a/sphinx/environment/managers/toctree.py b/sphinx/environment/collectors/toctree.py similarity index 65% rename from sphinx/environment/managers/toctree.py rename to sphinx/environment/collectors/toctree.py index 92712b4d9..5c9ed5472 100644 --- a/sphinx/environment/managers/toctree.py +++ b/sphinx/environment/collectors/toctree.py @@ -1,9 +1,9 @@ # -*- coding: utf-8 -*- """ - sphinx.environment.managers.toctree - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + sphinx.environment.collectors.toctree + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Toctree manager for sphinx.environment. + Toctree collector for sphinx.environment. :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. @@ -16,67 +16,54 @@ from docutils import nodes from sphinx import addnodes from sphinx.util import url_re, logging from sphinx.transforms import SphinxContentsFilter -from sphinx.environment.adapters.toctree import TocTree as TocTreeAdapter -from sphinx.environment.managers import EnvironmentManager +from sphinx.environment.adapters.toctree import TocTree +from sphinx.environment.collectors import EnvironmentCollector if False: # For type annotation from typing import Any, Tuple # NOQA + from sphinx.application import Sphinx # NOQA from sphinx.builders import Builder # NOQA from sphinx.environment import BuildEnvironment # NOQA logger = logging.getLogger(__name__) -class Toctree(EnvironmentManager): - name = 'toctree' +class TocTreeCollector(EnvironmentCollector): + def clear_doc(self, app, env, docname): + # type: (Sphinx, BuildEnvironment, unicode) -> None + env.tocs.pop(docname, None) + env.toc_secnumbers.pop(docname, None) + env.toc_fignumbers.pop(docname, None) + env.toc_num_entries.pop(docname, None) + env.toctree_includes.pop(docname, None) + env.glob_toctrees.discard(docname) + env.numbered_toctrees.discard(docname) - def __init__(self, env): - # type: (BuildEnvironment) -> None - super(Toctree, self).__init__(env) - - self.tocs = env.tocs - self.toc_num_entries = env.toc_num_entries - self.toc_secnumbers = env.toc_secnumbers - self.toc_fignumbers = env.toc_fignumbers - self.toctree_includes = env.toctree_includes - self.files_to_rebuild = env.files_to_rebuild - self.glob_toctrees = env.glob_toctrees - self.numbered_toctrees = env.numbered_toctrees - - def clear_doc(self, docname): - # type: (unicode) -> None - self.tocs.pop(docname, None) - self.toc_secnumbers.pop(docname, None) - self.toc_fignumbers.pop(docname, None) - self.toc_num_entries.pop(docname, None) - self.toctree_includes.pop(docname, None) - self.glob_toctrees.discard(docname) - self.numbered_toctrees.discard(docname) - - for subfn, fnset in list(self.files_to_rebuild.items()): + for subfn, fnset in list(env.files_to_rebuild.items()): fnset.discard(docname) if not fnset: - del self.files_to_rebuild[subfn] + del env.files_to_rebuild[subfn] - def merge_other(self, docnames, other): - # type: (List[unicode], BuildEnvironment) -> None + def merge_other(self, app, env, docnames, other): + # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None for docname in docnames: - self.tocs[docname] = other.tocs[docname] - self.toc_num_entries[docname] = other.toc_num_entries[docname] + env.tocs[docname] = other.tocs[docname] + env.toc_num_entries[docname] = other.toc_num_entries[docname] if docname in other.toctree_includes: - self.toctree_includes[docname] = other.toctree_includes[docname] + env.toctree_includes[docname] = other.toctree_includes[docname] if docname in other.glob_toctrees: - self.glob_toctrees.add(docname) + env.glob_toctrees.add(docname) if docname in other.numbered_toctrees: - self.numbered_toctrees.add(docname) + env.numbered_toctrees.add(docname) for subfn, fnset in other.files_to_rebuild.items(): - self.files_to_rebuild.setdefault(subfn, set()).update(fnset & set(docnames)) + env.files_to_rebuild.setdefault(subfn, set()).update(fnset & set(docnames)) - def process_doc(self, docname, doctree): - # type: (unicode, nodes.Node) -> None + def process_doc(self, app, doctree): + # type: (Sphinx, nodes.Node) -> None """Build a TOC from the doctree and store it in the inventory.""" + docname = app.env.docname numentries = [0] # nonlocal again... def traverse_in_section(node, cls): @@ -109,7 +96,7 @@ class Toctree(EnvironmentManager): item = toctreenode.copy() entries.append(item) # important: do the inventory stuff - self.note_toctree(docname, toctreenode) + TocTree(app.env).note(docname, toctreenode) continue title = sectionnode[0] # copy the contents of the section title, but without references @@ -139,47 +126,24 @@ class Toctree(EnvironmentManager): return [] toc = build_toc(doctree) if toc: - self.tocs[docname] = toc + app.env.tocs[docname] = toc else: - self.tocs[docname] = nodes.bullet_list('') - self.toc_num_entries[docname] = numentries[0] + app.env.tocs[docname] = nodes.bullet_list('') + app.env.toc_num_entries[docname] = numentries[0] - def get_updated_docs(self): - # type: () -> List[unicode] - return self.assign_section_numbers() + self.assign_figure_numbers() + def get_updated_docs(self, app, env): + # type: (Sphinx, BuildEnvironment) -> List[unicode] + return self.assign_section_numbers(env) + self.assign_figure_numbers(env) - def note_toctree(self, docname, toctreenode): - # type: (unicode, addnodes.toctree) -> None - """Note a TOC tree directive in a document and gather information about - file relations from it. - """ - TocTreeAdapter(self.env).note(docname, toctreenode) - - def get_toc_for(self, docname, builder): - # type: (unicode, Builder) -> Dict[unicode, nodes.Node] - """Return a TOC nodetree -- for use on the same page only!""" - return TocTreeAdapter(self.env).get_toc_for(docname, builder) - - def get_toctree_for(self, docname, builder, collapse, **kwds): - # type: (unicode, Builder, bool, Any) -> nodes.Node - """Return the global TOC nodetree.""" - return TocTreeAdapter(self.env).get_toctree_for(docname, builder, collapse, **kwds) - - def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0, - titles_only=False, collapse=False, includehidden=False): - # type: (unicode, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Node - return TocTreeAdapter(self.env).resolve(docname, builder, toctree, prune, maxdepth, - titles_only, collapse, includehidden) - - def assign_section_numbers(self): - # type: () -> List[unicode] + def assign_section_numbers(self, env): + # type: (BuildEnvironment) -> List[unicode] """Assign a section number to each heading under a numbered toctree.""" # a list of all docnames whose section numbers changed rewrite_needed = [] assigned = set() # type: Set[unicode] - old_secnumbers = self.toc_secnumbers - self.toc_secnumbers = self.env.toc_secnumbers = {} + old_secnumbers = env.toc_secnumbers + env.toc_secnumbers = {} def _walk_toc(node, secnums, depth, titlenode=None): # titlenode is the title of the document, it will get assigned a @@ -224,17 +188,17 @@ class Toctree(EnvironmentManager): logger.warning('%s is already assigned section numbers ' '(nested numbered toctree?)', ref, location=toctreenode, type='toc', subtype='secnum') - elif ref in self.tocs: - secnums = self.toc_secnumbers[ref] = {} + elif ref in env.tocs: + secnums = env.toc_secnumbers[ref] = {} assigned.add(ref) - _walk_toc(self.tocs[ref], secnums, depth, - self.env.titles.get(ref)) + _walk_toc(env.tocs[ref], secnums, depth, + env.titles.get(ref)) if secnums != old_secnumbers.get(ref): rewrite_needed.append(ref) - for docname in self.numbered_toctrees: + for docname in env.numbered_toctrees: assigned.add(docname) - doctree = self.env.get_doctree(docname) + doctree = env.get_doctree(docname) for toctreenode in doctree.traverse(addnodes.toctree): depth = toctreenode.get('numbered', 0) if depth: @@ -244,20 +208,20 @@ class Toctree(EnvironmentManager): return rewrite_needed - def assign_figure_numbers(self): - # type: () -> List[unicode] + def assign_figure_numbers(self, env): + # type: (BuildEnvironment) -> List[unicode] """Assign a figure number to each figure under a numbered toctree.""" rewrite_needed = [] assigned = set() # type: Set[unicode] - old_fignumbers = self.toc_fignumbers - self.toc_fignumbers = self.env.toc_fignumbers = {} + old_fignumbers = env.toc_fignumbers + env.toc_fignumbers = {} fignum_counter = {} # type: Dict[unicode, Dict[Tuple[int], int]] def get_section_number(docname, section): anchorname = '#' + section['ids'][0] - secnumbers = self.toc_secnumbers.get(docname, {}) + secnumbers = env.toc_secnumbers.get(docname, {}) if anchorname in secnumbers: secnum = secnumbers.get(anchorname) else: @@ -268,13 +232,13 @@ class Toctree(EnvironmentManager): def get_next_fignumber(figtype, secnum): counter = fignum_counter.setdefault(figtype, {}) - secnum = secnum[:self.env.config.numfig_secnum_depth] + secnum = secnum[:env.config.numfig_secnum_depth] counter[secnum] = counter.get(secnum, 0) + 1 return secnum + (counter[secnum],) def register_fignumber(docname, secnum, figtype, fignode): - self.toc_fignumbers.setdefault(docname, {}) - fignumbers = self.toc_fignumbers[docname].setdefault(figtype, {}) + env.toc_fignumbers.setdefault(docname, {}) + fignumbers = env.toc_fignumbers[docname].setdefault(figtype, {}) figure_id = fignode['ids'][0] fignumbers[figure_id] = get_next_fignumber(figtype, secnum) @@ -298,7 +262,7 @@ class Toctree(EnvironmentManager): continue - figtype = self.env.get_domain('std').get_figtype(subnode) # type: ignore + figtype = env.get_domain('std').get_figtype(subnode) # type: ignore if figtype and subnode['ids']: register_fignumber(docname, secnum, figtype, subnode) @@ -307,13 +271,18 @@ class Toctree(EnvironmentManager): def _walk_doc(docname, secnum): if docname not in assigned: assigned.add(docname) - doctree = self.env.get_doctree(docname) + doctree = env.get_doctree(docname) _walk_doctree(docname, doctree, secnum) - if self.env.config.numfig: - _walk_doc(self.env.config.master_doc, tuple()) - for docname, fignums in iteritems(self.toc_fignumbers): + if env.config.numfig: + _walk_doc(env.config.master_doc, tuple()) + for docname, fignums in iteritems(env.toc_fignumbers): if fignums != old_fignumbers.get(docname): rewrite_needed.append(docname) return rewrite_needed + + +def setup(app): + # type: (Sphinx) -> None + app.add_env_collector(TocTreeCollector) From e2c7b1db4206542c72e0dfbfc78bae9a8d153dfd Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 11 Jan 2017 00:15:34 +0900 Subject: [PATCH 139/190] Add IndexEntries adapter --- sphinx/builders/devhelp.py | 3 +- sphinx/builders/html.py | 3 +- sphinx/builders/htmlhelp.py | 3 +- sphinx/builders/qthelp.py | 3 +- sphinx/environment/__init__.py | 14 +- sphinx/environment/adapters/indexentries.py | 157 ++++++++++++++++++++ sphinx/environment/managers/indexentries.py | 134 +---------------- tests/test_environment_indexentries.py | 2 +- 8 files changed, 177 insertions(+), 142 deletions(-) create mode 100644 sphinx/environment/adapters/indexentries.py diff --git a/sphinx/builders/devhelp.py b/sphinx/builders/devhelp.py index af8bcfeed..031c55184 100644 --- a/sphinx/builders/devhelp.py +++ b/sphinx/builders/devhelp.py @@ -22,6 +22,7 @@ from sphinx import addnodes from sphinx.util import logging from sphinx.util.osutil import make_filename from sphinx.builders.html import StandaloneHTMLBuilder +from sphinx.environment.adapters.indexentries import IndexEntries try: import xml.etree.ElementTree as etree @@ -104,7 +105,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder): # Index functions = etree.SubElement(root, 'functions') - index = self.env.create_index(self) + index = IndexEntries(self.env).create_index(self) def write_index(title, refs, subitems): # type: (unicode, List[Any], Any) -> None diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index 5c3d9601e..062c56669 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -46,6 +46,7 @@ from sphinx.util.console import bold, darkgreen # type: ignore from sphinx.writers.html import HTMLWriter, HTMLTranslator, \ SmartyPantsHTMLTranslator from sphinx.environment.adapters.toctree import TocTree +from sphinx.environment.adapters.indexentries import IndexEntries if False: # For type annotation @@ -542,7 +543,7 @@ class StandaloneHTMLBuilder(Builder): # type: () -> None # the total count of lines for each index letter, used to distribute # the entries into two columns - genindex = self.env.create_index(self) + genindex = IndexEntries(self.env).create_index(self) indexcounts = [] for _k, entries in genindex: indexcounts.append(sum(1 + len(subitems) diff --git a/sphinx/builders/htmlhelp.py b/sphinx/builders/htmlhelp.py index 08e6f9df4..68fd3b1db 100644 --- a/sphinx/builders/htmlhelp.py +++ b/sphinx/builders/htmlhelp.py @@ -19,6 +19,7 @@ from docutils import nodes from sphinx import addnodes from sphinx.builders.html import StandaloneHTMLBuilder +from sphinx.environment.adapters.indexentries import IndexEntries from sphinx.util import logging from sphinx.util.osutil import make_filename from sphinx.util.pycompat import htmlescape @@ -281,7 +282,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder): f.write(contents_footer) logger.info('writing index file...') - index = self.env.create_index(self) + index = IndexEntries(self.env).create_index(self) with self.open_file(outdir, outname + '.hhk') as f: f.write('<UL>\n') diff --git a/sphinx/builders/qthelp.py b/sphinx/builders/qthelp.py index 27178676f..25dec7586 100644 --- a/sphinx/builders/qthelp.py +++ b/sphinx/builders/qthelp.py @@ -21,6 +21,7 @@ from docutils import nodes from sphinx import addnodes from sphinx.builders.html import StandaloneHTMLBuilder +from sphinx.environment.adapters.indexentries import IndexEntries from sphinx.util import force_decode, logging from sphinx.util.osutil import make_filename from sphinx.util.pycompat import htmlescape @@ -170,7 +171,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder): # keywords keywords = [] - index = self.env.create_index(self, group_entries=False) + index = IndexEntries(self.env).create_index(self, group_entries=False) for (key, group) in index: for title, (refs, subitems, key_) in group: keywords.extend(self.build_keywords(title, refs, subitems)) diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index 46cdc21ef..d41b90871 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -47,8 +47,9 @@ from sphinx.util.websupport import is_commentable from sphinx.errors import SphinxError, ExtensionError from sphinx.versioning import add_uids, merge_doctrees from sphinx.deprecation import RemovedInSphinx20Warning +from sphinx.environment.adapters.indexentries import IndexEntries from sphinx.environment.adapters.toctree import TocTree -from sphinx.environment.managers.indexentries import IndexEntries +from sphinx.environment.managers.indexentries import IndexEntries as IndexEntriesManager if False: # For type annotation @@ -247,7 +248,7 @@ class BuildEnvironment(object): # type: () -> None managers = {} manager_class = None # type: Type[EnvironmentManager] - for manager_class in [IndexEntries]: # type: ignore + for manager_class in [IndexEntriesManager]: # type: ignore managers[manager_class.name] = manager_class(self) self.attach_managers(managers) @@ -1070,8 +1071,13 @@ class BuildEnvironment(object): def create_index(self, builder, group_entries=True, _fixre=re.compile(r'(.*) ([(][^()]*[)])')): - # type: (Builder, bool, Pattern) -> Any - return self.indices.create_index(builder, group_entries=group_entries, _fixre=_fixre) # type: ignore # NOQA + # type: (Builder, bool, Pattern) -> List[Tuple[unicode, List[Tuple[unicode, List[unicode]]]]] # NOQA + warnings.warn('env.create_index() is deprecated. ' + 'Use sphinx.environment.adapters.indexentreis.IndexEntries instead.', + RemovedInSphinx20Warning) + return IndexEntries(self).create_index(builder, + group_entries=group_entries, + _fixre=_fixre) def collect_relations(self): # type: () -> Dict[unicode, List[unicode]] diff --git a/sphinx/environment/adapters/indexentries.py b/sphinx/environment/adapters/indexentries.py new file mode 100644 index 000000000..9eeb50833 --- /dev/null +++ b/sphinx/environment/adapters/indexentries.py @@ -0,0 +1,157 @@ +# -*- coding: utf-8 -*- +""" + sphinx.environment.adapters.indexentries + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Index entries adapters for sphinx.environment. + + :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" +import re +import bisect +import unicodedata +import string +from itertools import groupby + +from six import text_type + +from sphinx.locale import _ +from sphinx.util import iteritems, split_into, logging + +if False: + # For type annotation + from typing import Any, Pattern, Tuple # NOQA + from sphinx.builders import Builder # NOQA + from sphinx.environment import BuildEnvironment # NOQA + +logger = logging.getLogger(__name__) + + +class IndexEntries(object): + def __init__(self, env): + # type: (BuildEnvironment) -> None + self.env = env + + def create_index(self, builder, group_entries=True, + _fixre=re.compile(r'(.*) ([(][^()]*[)])')): + # type: (Builder, bool, Pattern) -> List[Tuple[unicode, List[Tuple[unicode, Any]]]] # NOQA + """Create the real index from the collected index entries.""" + from sphinx.environment import NoUri + + new = {} # type: Dict[unicode, List] + + def add_entry(word, subword, main, link=True, dic=new, key=None): + # Force the word to be unicode if it's a ASCII bytestring. + # This will solve problems with unicode normalization later. + # For instance the RFC role will add bytestrings at the moment + word = text_type(word) + entry = dic.get(word) + if not entry: + dic[word] = entry = [[], {}, key] + if subword: + add_entry(subword, '', main, link=link, dic=entry[1], key=key) + elif link: + try: + uri = builder.get_relative_uri('genindex', fn) + '#' + tid + except NoUri: + pass + else: + # maintain links in sorted/deterministic order + bisect.insort(entry[0], (main, uri)) + + for fn, entries in iteritems(self.env.indexentries): + # new entry types must be listed in directives/other.py! + for type, value, tid, main, index_key in entries: + try: + if type == 'single': + try: + entry, subentry = split_into(2, 'single', value) + except ValueError: + entry, = split_into(1, 'single', value) + subentry = '' + add_entry(entry, subentry, main, key=index_key) + elif type == 'pair': + first, second = split_into(2, 'pair', value) + add_entry(first, second, main, key=index_key) + add_entry(second, first, main, key=index_key) + elif type == 'triple': + first, second, third = split_into(3, 'triple', value) + add_entry(first, second + ' ' + third, main, key=index_key) + add_entry(second, third + ', ' + first, main, key=index_key) + add_entry(third, first + ' ' + second, main, key=index_key) + elif type == 'see': + first, second = split_into(2, 'see', value) + add_entry(first, _('see %s') % second, None, + link=False, key=index_key) + elif type == 'seealso': + first, second = split_into(2, 'see', value) + add_entry(first, _('see also %s') % second, None, + link=False, key=index_key) + else: + logger.warning('unknown index entry type %r', type, location=fn) + except ValueError as err: + logger.warning(str(err), location=fn) + + # sort the index entries; put all symbols at the front, even those + # following the letters in ASCII, this is where the chr(127) comes from + def keyfunc(entry, lcletters=string.ascii_lowercase + '_'): + key, (void, void, category_key) = entry + if category_key: + # using specified category key to sort + key = category_key + lckey = unicodedata.normalize('NFD', key.lower()) + if lckey[0:1] in lcletters: + lckey = chr(127) + lckey + # ensure a determinstic order *within* letters by also sorting on + # the entry itself + return (lckey, entry[0]) + newlist = sorted(new.items(), key=keyfunc) + + if group_entries: + # fixup entries: transform + # func() (in module foo) + # func() (in module bar) + # into + # func() + # (in module foo) + # (in module bar) + oldkey = '' # type: unicode + oldsubitems = None # type: Dict[unicode, List] + i = 0 + while i < len(newlist): + key, (targets, subitems, _key) = newlist[i] + # cannot move if it has subitems; structure gets too complex + if not subitems: + m = _fixre.match(key) + if m: + if oldkey == m.group(1): + # prefixes match: add entry as subitem of the + # previous entry + oldsubitems.setdefault(m.group(2), [[], {}, _key])[0].\ + extend(targets) + del newlist[i] + continue + oldkey = m.group(1) + else: + oldkey = key + oldsubitems = subitems + i += 1 + + # group the entries by letter + def keyfunc2(item, letters=string.ascii_uppercase + '_'): + # hack: mutating the subitems dicts to a list in the keyfunc + k, v = item + v[1] = sorted((si, se) for (si, (se, void, void)) in iteritems(v[1])) + if v[2] is None: + # now calculate the key + letter = unicodedata.normalize('NFD', k[0])[0].upper() + if letter in letters: + return letter + else: + # get all other symbols under one heading + return _('Symbols') + else: + return v[2] + return [(key_, list(group)) + for (key_, group) in groupby(newlist, keyfunc2)] diff --git a/sphinx/environment/managers/indexentries.py b/sphinx/environment/managers/indexentries.py index cb972b54c..680ef374d 100644 --- a/sphinx/environment/managers/indexentries.py +++ b/sphinx/environment/managers/indexentries.py @@ -8,23 +8,14 @@ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ -import re -import bisect -import unicodedata -import string -from itertools import groupby -from six import text_type from sphinx import addnodes -from sphinx.util import iteritems, split_index_msg, split_into, logging -from sphinx.locale import _ +from sphinx.util import split_index_msg, logging from sphinx.environment.managers import EnvironmentManager if False: # For type annotation - from typing import Pattern, Tuple # NOQA from docutils import nodes # NOQA - from sphinx.builders import Builder # NOQA from sphinx.environment import BuildEnvironment # NOQA logger = logging.getLogger(__name__) @@ -68,126 +59,3 @@ class IndexEntries(EnvironmentManager): def get_updated_docs(self): # type: () -> List[unicode] return [] - - def create_index(self, builder, group_entries=True, - _fixre=re.compile(r'(.*) ([(][^()]*[)])')): - # type: (Builder, bool, Pattern) -> List[Tuple[unicode, List[Tuple[unicode, List[unicode]]]]] # NOQA - """Create the real index from the collected index entries.""" - from sphinx.environment import NoUri - - new = {} # type: Dict[unicode, List] - - def add_entry(word, subword, main, link=True, dic=new, key=None): - # Force the word to be unicode if it's a ASCII bytestring. - # This will solve problems with unicode normalization later. - # For instance the RFC role will add bytestrings at the moment - word = text_type(word) - entry = dic.get(word) - if not entry: - dic[word] = entry = [[], {}, key] - if subword: - add_entry(subword, '', main, link=link, dic=entry[1], key=key) - elif link: - try: - uri = builder.get_relative_uri('genindex', fn) + '#' + tid - except NoUri: - pass - else: - # maintain links in sorted/deterministic order - bisect.insort(entry[0], (main, uri)) - - for fn, entries in iteritems(self.data): - # new entry types must be listed in directives/other.py! - for type, value, tid, main, index_key in entries: - try: - if type == 'single': - try: - entry, subentry = split_into(2, 'single', value) - except ValueError: - entry, = split_into(1, 'single', value) - subentry = '' - add_entry(entry, subentry, main, key=index_key) - elif type == 'pair': - first, second = split_into(2, 'pair', value) - add_entry(first, second, main, key=index_key) - add_entry(second, first, main, key=index_key) - elif type == 'triple': - first, second, third = split_into(3, 'triple', value) - add_entry(first, second + ' ' + third, main, key=index_key) - add_entry(second, third + ', ' + first, main, key=index_key) - add_entry(third, first + ' ' + second, main, key=index_key) - elif type == 'see': - first, second = split_into(2, 'see', value) - add_entry(first, _('see %s') % second, None, - link=False, key=index_key) - elif type == 'seealso': - first, second = split_into(2, 'see', value) - add_entry(first, _('see also %s') % second, None, - link=False, key=index_key) - else: - logger.warning('unknown index entry type %r', type, location=fn) - except ValueError as err: - logger.warning(str(err), location=fn) - - # sort the index entries; put all symbols at the front, even those - # following the letters in ASCII, this is where the chr(127) comes from - def keyfunc(entry, lcletters=string.ascii_lowercase + '_'): - key, (void, void, category_key) = entry - if category_key: - # using specified category key to sort - key = category_key - lckey = unicodedata.normalize('NFD', key.lower()) - if lckey[0:1] in lcletters: - lckey = chr(127) + lckey - # ensure a determinstic order *within* letters by also sorting on - # the entry itself - return (lckey, entry[0]) - newlist = sorted(new.items(), key=keyfunc) - - if group_entries: - # fixup entries: transform - # func() (in module foo) - # func() (in module bar) - # into - # func() - # (in module foo) - # (in module bar) - oldkey = '' # type: unicode - oldsubitems = None # type: Dict[unicode, List] - i = 0 - while i < len(newlist): - key, (targets, subitems, _key) = newlist[i] - # cannot move if it has subitems; structure gets too complex - if not subitems: - m = _fixre.match(key) - if m: - if oldkey == m.group(1): - # prefixes match: add entry as subitem of the - # previous entry - oldsubitems.setdefault(m.group(2), [[], {}, _key])[0].\ - extend(targets) - del newlist[i] - continue - oldkey = m.group(1) - else: - oldkey = key - oldsubitems = subitems - i += 1 - - # group the entries by letter - def keyfunc2(item, letters=string.ascii_uppercase + '_'): - # hack: mutating the subitems dicts to a list in the keyfunc - k, v = item - v[1] = sorted((si, se) for (si, (se, void, void)) in iteritems(v[1])) - if v[2] is None: - # now calculate the key - letter = unicodedata.normalize('NFD', k[0])[0].upper() - if letter in letters: - return letter - else: - # get all other symbols under one heading - return _('Symbols') - else: - return v[2] - return [(key_, list(group)) - for (key_, group) in groupby(newlist, keyfunc2)] diff --git a/tests/test_environment_indexentries.py b/tests/test_environment_indexentries.py index 57a3cf52f..53e0ad65d 100644 --- a/tests/test_environment_indexentries.py +++ b/tests/test_environment_indexentries.py @@ -11,7 +11,7 @@ from collections import namedtuple from sphinx import locale -from sphinx.environment.managers.indexentries import IndexEntries +from sphinx.environment.adapters.indexentries import IndexEntries import mock From 851172b13b05fd814314f60924f38fc37a649468 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 11 Jan 2017 00:55:57 +0900 Subject: [PATCH 140/190] Reimplement IndexEntriesManager as a collector --- sphinx/application.py | 2 +- sphinx/environment/__init__.py | 42 +-------------- .../{managers => collectors}/indexentries.py | 42 +++++++-------- sphinx/environment/managers/__init__.py | 54 ------------------- tests/test_build_html.py | 2 +- 5 files changed, 24 insertions(+), 118 deletions(-) rename sphinx/environment/{managers => collectors}/indexentries.py (53%) delete mode 100644 sphinx/environment/managers/__init__.py diff --git a/sphinx/application.py b/sphinx/application.py index f957d6fbf..a0d07c89c 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -109,6 +109,7 @@ builtin_extensions = ( 'sphinx.environment.collectors.metadata', 'sphinx.environment.collectors.title', 'sphinx.environment.collectors.toctree', + 'sphinx.environment.collectors.indexentries', ) # type: Tuple[unicode, ...] CONFIG_FILENAME = 'conf.py' @@ -308,7 +309,6 @@ class Sphinx(object): logger.info(bold('loading pickled environment... '), nonl=True) self.env = BuildEnvironment.frompickle( self.srcdir, self.config, path.join(self.doctreedir, ENV_PICKLE_FILENAME)) - self.env.init_managers() self.env.domains = {} for domain in self.domains.keys(): # this can raise if the data version doesn't fit diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index d41b90871..17fafe8d2 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -20,7 +20,7 @@ import warnings from os import path from collections import defaultdict -from six import iteritems, itervalues, class_types, next +from six import itervalues, class_types, next from six.moves import cPickle as pickle from docutils import nodes @@ -49,7 +49,6 @@ from sphinx.versioning import add_uids, merge_doctrees from sphinx.deprecation import RemovedInSphinx20Warning from sphinx.environment.adapters.indexentries import IndexEntries from sphinx.environment.adapters.toctree import TocTree -from sphinx.environment.managers.indexentries import IndexEntries as IndexEntriesManager if False: # For type annotation @@ -58,7 +57,6 @@ if False: from sphinx.builders import Builder # NOQA from sphinx.config import Config # NOQA from sphinx.domains import Domain # NOQA - from sphinx.environment.managers import EnvironmentManager # NOQA logger = logging.getLogger(__name__) @@ -125,7 +123,6 @@ class BuildEnvironment(object): del self.config.values domains = self.domains del self.domains - managers = self.detach_managers() # remove potentially pickling-problematic values from config for key, val in list(vars(self.config).items()): if key.startswith('_') or \ @@ -136,7 +133,6 @@ class BuildEnvironment(object): with open(filename, 'wb') as picklefile: pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL) # reset attributes - self.attach_managers(managers) self.domains = domains self.config.values = values @@ -241,31 +237,6 @@ class BuildEnvironment(object): # attributes of "any" cross references self.ref_context = {} # type: Dict[unicode, Any] - self.managers = {} # type: Dict[unicode, EnvironmentManager] - self.init_managers() - - def init_managers(self): - # type: () -> None - managers = {} - manager_class = None # type: Type[EnvironmentManager] - for manager_class in [IndexEntriesManager]: # type: ignore - managers[manager_class.name] = manager_class(self) - self.attach_managers(managers) - - def attach_managers(self, managers): - # type: (Dict[unicode, EnvironmentManager]) -> None - for name, manager in iteritems(managers): - self.managers[name] = manager - manager.attach(self) - - def detach_managers(self): - # type: () -> Dict[unicode, EnvironmentManager] - managers = self.managers - self.managers = {} - for _, manager in iteritems(managers): - manager.detach(self) - return managers - def set_warnfunc(self, func): # type: (Callable) -> None warnings.warn('env.set_warnfunc() is now deprecated. Use sphinx.util.logging instead.', @@ -317,9 +288,6 @@ class BuildEnvironment(object): new = [change for change in changes if change[1] != docname] changes[:] = new - for manager in itervalues(self.managers): - manager.clear_doc(docname) - for domain in self.domains.values(): domain.clear_doc(docname) @@ -340,8 +308,6 @@ class BuildEnvironment(object): self.versionchanges.setdefault(version, []).extend( change for change in changes if change[1] in docnames) - for manager in itervalues(self.managers): - manager.merge_other(docnames, other) for domainname, domain in self.domains.items(): domain.merge_domaindata(docnames, other.domaindata[domainname]) app.emit('env-merge-info', self, docnames, other) @@ -647,9 +613,7 @@ class BuildEnvironment(object): def check_dependents(self, app, already): # type: (Sphinx, Set[unicode]) -> Iterator[unicode] - to_rewrite = [] - for manager in itervalues(self.managers): - to_rewrite.extend(manager.get_updated_docs()) + to_rewrite = [] # type: List[unicode] for docnames in app.emit('env-get-updated', self): to_rewrite.extend(docnames) for docname in set(to_rewrite): @@ -722,8 +686,6 @@ class BuildEnvironment(object): doctree = pub.document # post-processing - for manager in itervalues(self.managers): - manager.process_doc(docname, doctree) for domain in itervalues(self.domains): domain.process_doc(self, docname, doctree) diff --git a/sphinx/environment/managers/indexentries.py b/sphinx/environment/collectors/indexentries.py similarity index 53% rename from sphinx/environment/managers/indexentries.py rename to sphinx/environment/collectors/indexentries.py index 680ef374d..c9aeda7e1 100644 --- a/sphinx/environment/managers/indexentries.py +++ b/sphinx/environment/collectors/indexentries.py @@ -1,9 +1,9 @@ # -*- coding: utf-8 -*- """ - sphinx.environment.managers.indexentries - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + sphinx.environment.collectors.indexentries + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Index entries manager for sphinx.environment. + Index entries collector for sphinx.environment. :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. @@ -11,36 +11,33 @@ from sphinx import addnodes from sphinx.util import split_index_msg, logging -from sphinx.environment.managers import EnvironmentManager +from sphinx.environment.collectors import EnvironmentCollector if False: # For type annotation from docutils import nodes # NOQA + from sphinx.applicatin import Sphinx # NOQA from sphinx.environment import BuildEnvironment # NOQA logger = logging.getLogger(__name__) -class IndexEntries(EnvironmentManager): +class IndexEntriesCollector(EnvironmentCollector): name = 'indices' - def __init__(self, env): - # type: (BuildEnvironment) -> None - super(IndexEntries, self).__init__(env) - self.data = env.indexentries + def clear_doc(self, app, env, docname): + # type: (Sphinx, BuildEnvironment, unicode) -> None + env.indexentries.pop(docname, None) - def clear_doc(self, docname): - # type: (unicode) -> None - self.data.pop(docname, None) - - def merge_other(self, docnames, other): - # type: (List[unicode], BuildEnvironment) -> None + def merge_other(self, app, env, docnames, other): + # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None for docname in docnames: - self.data[docname] = other.indexentries[docname] + env.indexentries[docname] = other.indexentries[docname] - def process_doc(self, docname, doctree): - # type: (unicode, nodes.Node) -> None - entries = self.data[docname] = [] + def process_doc(self, app, doctree): + # type: (Sphinx, nodes.Node) -> None + docname = app.env.docname + entries = app.env.indexentries[docname] = [] for node in doctree.traverse(addnodes.index): try: for entry in node['entries']: @@ -56,6 +53,7 @@ class IndexEntries(EnvironmentManager): else: entries.append(entry + (None,)) - def get_updated_docs(self): - # type: () -> List[unicode] - return [] + +def setup(app): + # type: (Sphinx) -> None + app.add_env_collector(IndexEntriesCollector) diff --git a/sphinx/environment/managers/__init__.py b/sphinx/environment/managers/__init__.py deleted file mode 100644 index b2489a9b9..000000000 --- a/sphinx/environment/managers/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -""" - sphinx.environment.managers - ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Manager components for sphinx.environment. - - :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -if False: - # For type annotation - from typing import Any # NOQA - from docutils import nodes # NOQA - from sphinx.environment import BuildEnvironment # NOQA - - -class EnvironmentManager(object): - """Base class for sphinx.environment managers.""" - name = None # type: unicode - env = None # type: BuildEnvironment - - def __init__(self, env): - # type: (BuildEnvironment) -> None - self.env = env - - def attach(self, env): - # type: (BuildEnvironment) -> None - self.env = env - if self.name: - setattr(env, self.name, self) - - def detach(self, env): - # type: (BuildEnvironment) -> None - self.env = None - if self.name: - delattr(env, self.name) - - def clear_doc(self, docname): - # type: (unicode) -> None - raise NotImplementedError - - def merge_other(self, docnames, other): - # type: (List[unicode], Any) -> None - raise NotImplementedError - - def process_doc(self, docname, doctree): - # type: (unicode, nodes.Node) -> None - raise NotImplementedError - - def get_updated_docs(self): - # type: () -> List[unicode] - raise NotImplementedError diff --git a/tests/test_build_html.py b/tests/test_build_html.py index 351c2f846..98a11d1fc 100644 --- a/tests/test_build_html.py +++ b/tests/test_build_html.py @@ -30,10 +30,10 @@ ENV_WARNINGS = """\ WARNING: Explicit markup ends without a blank line; unexpected unindent. %(root)s/index.rst:\\d+: WARNING: Encoding 'utf-8-sig' used for reading included \ file u'%(root)s/wrongenc.inc' seems to be wrong, try giving an :encoding: option -%(root)s/index.rst:\\d+: WARNING: invalid single index entry u'' %(root)s/index.rst:\\d+: WARNING: image file not readable: foo.png %(root)s/index.rst:\\d+: WARNING: nonlocal image URI found: http://www.python.org/logo.png %(root)s/index.rst:\\d+: WARNING: download file not readable: %(root)s/nonexisting.png +%(root)s/index.rst:\\d+: WARNING: invalid single index entry u'' %(root)s/undecodable.rst:\\d+: WARNING: undecodable source characters, replacing \ with "\\?": b?'here: >>>(\\\\|/)xbb<<<((\\\\|/)r)?' """ From da15090c83a4b5a2189726bb00314bd1685ca360 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 22 Jan 2017 00:04:11 +0900 Subject: [PATCH 141/190] Update docs for collectors API --- doc/extdev/appapi.rst | 6 ++++++ doc/extdev/collectorapi.rst | 9 +++++++++ doc/extdev/index.rst | 1 + sphinx/environment/collectors/__init__.py | 24 ++++++++++++++++++++++- 4 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 doc/extdev/collectorapi.rst diff --git a/doc/extdev/appapi.rst b/doc/extdev/appapi.rst index f6d21c057..c02e85933 100644 --- a/doc/extdev/appapi.rst +++ b/doc/extdev/appapi.rst @@ -359,6 +359,12 @@ package. .. versionadded:: 1.4 +.. method:: Sphinx.add_env_collector(collector) + + Register a environment collector class (refs: :ref:`collector-api`) + + .. versionadded:: 1.6 + .. method:: Sphinx.require_sphinx(version) Compare *version* (which must be a ``major.minor`` version string, diff --git a/doc/extdev/collectorapi.rst b/doc/extdev/collectorapi.rst new file mode 100644 index 000000000..cb4c30bf3 --- /dev/null +++ b/doc/extdev/collectorapi.rst @@ -0,0 +1,9 @@ +.. _collector-api: + +Environment Collector API +------------------------- + +.. module:: sphinx.environment.collectors + +.. autoclass:: EnvironmentCollector + :members: diff --git a/doc/extdev/index.rst b/doc/extdev/index.rst index 1f3871c21..85172abb6 100644 --- a/doc/extdev/index.rst +++ b/doc/extdev/index.rst @@ -50,6 +50,7 @@ APIs used for writing extensions appapi envapi builderapi + collectorapi markupapi domainapi parserapi diff --git a/sphinx/environment/collectors/__init__.py b/sphinx/environment/collectors/__init__.py index 3a78d21ba..b8d73ad1f 100644 --- a/sphinx/environment/collectors/__init__.py +++ b/sphinx/environment/collectors/__init__.py @@ -19,7 +19,13 @@ if False: class EnvironmentCollector(object): - """Base class of data collector for sphinx.environment.""" + """An EnvironmentCollector is a specific data collector from each document. + + It gathers data and stores :py:class:`BuildEnvironment + <sphinx.environment.BuildEnvironment>` as a database. Examples of specific + data would be images, download files, section titles, metadatas, index + entries and toctrees, etc. + """ listener_ids = None # type: Dict[unicode, int] @@ -43,20 +49,36 @@ class EnvironmentCollector(object): def clear_doc(self, app, env, docname): # type: (Sphinx, BuildEnvironment, unicode) -> None + """Remove specified data of a document. + + This method is called on the removal of the document.""" raise NotImplementedError def merge_other(self, app, env, docnames, other): # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None + """Merge in specified data regarding docnames from a different `BuildEnvironment` + object which coming from a subprocess in parallel builds.""" raise NotImplementedError def process_doc(self, app, doctree): # type: (Sphinx, nodes.Node) -> None + """Process a document and gather specific data from it. + + This method is called after the document is read.""" raise NotImplementedError def get_updated_docs(self, app, env): # type: (Sphinx, BuildEnvironment) -> List[unicode] + """Return a list of docnames to re-read. + + This methods is called after reading the whole of documents (experimental). + """ return [] def get_outdated_docs(self, app, env, added, changed, removed): # type: (Sphinx, BuildEnvironment, unicode, Set[unicode], Set[unicode], Set[unicode]) -> List[unicode] # NOQA + """Return a list of docnames to re-read. + + This methods is called before reading the documents. + """ return [] From 21ef6b7e0fdf4b63f3182cbc15625f842f77c8cd Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 28 Jan 2017 20:51:10 +0900 Subject: [PATCH 142/190] Fix mypy violation --- sphinx/application.py | 2 +- sphinx/ext/doctest.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index 415a385d2..839d0101e 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -793,7 +793,7 @@ class Sphinx(object): # type: (unicode, unicode) -> None logger.debug('[app] adding latex package: %r', packagename) if hasattr(self.builder, 'usepackages'): # only for LaTeX builder - self.builder.usepackages.append((packagename, options)) + self.builder.usepackages.append((packagename, options)) # type: ignore def add_lexer(self, alias, lexer): # type: (unicode, Any) -> None diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index cd6397fb1..984642312 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -147,7 +147,7 @@ class TestDirective(Directive): operand, option_version = [item.strip() for item in option.split()] running_version = platform.python_version() if not compare_version(running_version, option_version, operand): - flag = doctest.OPTIONFLAGS_BY_NAME['SKIP'] + flag = doctest.OPTIONFLAGS_BY_NAME['SKIP'] # type: ignore node['options'][flag] = True # Skip the test except ValueError: self.state.document.reporter.warning( From e6693b5cae5083fdc7bc68304f6d0a01303e653e Mon Sep 17 00:00:00 2001 From: Timotheus Kampik <timotheus.kampik@signavio.com> Date: Sun, 29 Jan 2017 10:58:22 +0100 Subject: [PATCH 143/190] document markdown support #2303 #825 --- doc/config.rst | 7 +++++-- doc/contents.rst | 1 + doc/markdown.rst | 44 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 2 deletions(-) create mode 100644 doc/markdown.rst diff --git a/doc/config.rst b/doc/config.rst index 5613d0bd6..c082dba2f 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -101,10 +101,13 @@ General configuration suffix that is not in the dictionary will be parsed with the default reStructuredText parser. - For example:: - source_parsers = {'.md': 'some.markdown.module.Parser'} + source_parsers = {'.md': 'recommonmark.parser.CommonMarkParser'} + + .. note:: + + Read more about how to use Markdown with Sphinx at :ref:`markdown`. .. versionadded:: 1.3 diff --git a/doc/contents.rst b/doc/contents.rst index 0f5527bae..92fb13fcb 100644 --- a/doc/contents.rst +++ b/doc/contents.rst @@ -19,6 +19,7 @@ Sphinx documentation contents theming templating latex + markdown extensions extdev/index websupport diff --git a/doc/markdown.rst b/doc/markdown.rst new file mode 100644 index 000000000..04ae29466 --- /dev/null +++ b/doc/markdown.rst @@ -0,0 +1,44 @@ +.. highlightlang:: python + +.. _markdown: + +Markdown support +================ + +`Markdown <https://daringfireball.net/projects/markdown/>`__ is a lightweight markup language with a simplistic plain +text formatting syntax. +It exists in many syntactically different *flavors*. +To support Markdown-based documentation, Sphinx can use `CommonMark-py <https://github.com/rtfd/CommonMark-py>`__, a +Python package for parsing the `CommonMark <http://commonmark.org/>`__ flavor. In addition, Sphinx uses +`recommonmark <http://recommonmark.readthedocs.io/en/latest/index.html>`__, a Docutils bridge to CommonMark. + + +Configuration +------------- + +To configure your Sphinx project for markdown support, proceed as follows: + +#. Install CommonMark version **0.5.4** and recommonmark: + + :: + + pip install commonmark==0.5.4 recommonmark + +#. Add the Markdown parser to the ``source_parsers`` configuration variable in your Sphinx configuration file: + + :: + + source_parsers = { + '.md': 'recommonmark.parser.CommonMarkParser', + } + + You can replace `.md` with a filename extension of your choice. + +#. Add the Markdown filename extension to the ``source_suffix`` configuration variable: + + :: + + source_suffix = ['.rst', '.md'] + +#. You can further configure recommonmark to allow custom syntax that standard CommonMark doesn't support. Read more in + the `recommonmark documentation <http://recommonmark.readthedocs.io/en/latest/auto_structify.html>`__. From 12d639873953847de31ec99742b42e50e89ed58c Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 29 Jan 2017 18:10:49 +0900 Subject: [PATCH 144/190] Move doc reference to under standard domain --- sphinx/domains/std.py | 24 +++++++++++++++++++++++- sphinx/environment/__init__.py | 30 ++++-------------------------- sphinx/ext/intersphinx.py | 3 --- sphinx/roles.py | 2 -- sphinx/util/nodes.py | 9 ++++++--- tests/test_ext_intersphinx.py | 5 +++++ 6 files changed, 38 insertions(+), 35 deletions(-) diff --git a/sphinx/domains/std.py b/sphinx/domains/std.py index eeca858ca..300fc9c19 100644 --- a/sphinx/domains/std.py +++ b/sphinx/domains/std.py @@ -23,7 +23,7 @@ from sphinx.roles import XRefRole from sphinx.locale import l_, _ from sphinx.domains import Domain, ObjType from sphinx.directives import ObjectDescription -from sphinx.util import ws_re, logging +from sphinx.util import ws_re, logging, docname_join from sphinx.util.nodes import clean_astext, make_refnode if False: @@ -465,6 +465,7 @@ class StandardDomain(Domain): searchprio=-1), 'envvar': ObjType(l_('environment variable'), 'envvar'), 'cmdoption': ObjType(l_('program option'), 'option'), + 'doc': ObjType(l_('document'), 'doc', searchprio=-1) } # type: Dict[unicode, ObjType] directives = { @@ -491,6 +492,8 @@ class StandardDomain(Domain): warn_dangling=True), # links to labels, without a different title 'keyword': XRefRole(warn_dangling=True), + # links to documents + 'doc': XRefRole(warn_dangling=True, innernodeclass=nodes.inline), } # type: Dict[unicode, Union[RoleFunction, XRefRole]] initial_data = { @@ -515,6 +518,7 @@ class StandardDomain(Domain): 'the label must precede a section header)', 'numref': 'undefined label: %(target)s', 'keyword': 'unknown keyword: %(target)s', + 'doc': 'unknown document: %(target)s', 'option': 'unknown option: %(target)s', 'citation': 'citation not found: %(target)s', } @@ -650,6 +654,8 @@ class StandardDomain(Domain): resolver = self._resolve_numref_xref elif typ == 'keyword': resolver = self._resolve_keyword_xref + elif typ == 'doc': + resolver = self._resolve_doc_xref elif typ == 'option': resolver = self._resolve_option_xref elif typ == 'citation': @@ -747,6 +753,22 @@ class StandardDomain(Domain): return make_refnode(builder, fromdocname, docname, labelid, contnode) + def _resolve_doc_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA + # directly reference to document by source name; can be absolute or relative + refdoc = node.get('refdoc', fromdocname) + docname = docname_join(refdoc, node['reftarget']) + if docname not in env.all_docs: + return None + else: + if node['refexplicit']: + # reference with explicit title + caption = node.astext() + else: + caption = clean_astext(env.titles[docname]) + innernode = nodes.inline(caption, caption, classes=['doc']) + return make_refnode(builder, fromdocname, docname, None, innernode) + def _resolve_option_xref(self, env, fromdocname, builder, typ, target, node, contnode): # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA progname = node.get('std:program') diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index c8a05a22b..51ede85f0 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -35,9 +35,8 @@ from docutils.frontend import OptionParser from sphinx import addnodes from sphinx.io import SphinxStandaloneReader, SphinxDummyWriter, SphinxFileInput from sphinx.util import logging -from sphinx.util import get_matching_docs, docname_join, FilenameUniqDict, status_iterator -from sphinx.util.nodes import clean_astext, WarningStream, is_translatable, \ - process_only_nodes +from sphinx.util import get_matching_docs, FilenameUniqDict, status_iterator +from sphinx.util.nodes import WarningStream, is_translatable, process_only_nodes from sphinx.util.osutil import SEP, getcwd, fs_encoding, ensuredir from sphinx.util.images import guess_mimetype from sphinx.util.i18n import find_catalog_files, get_image_filename_for_language, \ @@ -1129,8 +1128,6 @@ class BuildEnvironment(object): # really hardwired reference types elif typ == 'any': newnode = self._resolve_any_reference(builder, refdoc, node, contnode) - elif typ == 'doc': - newnode = self._resolve_doc_reference(builder, refdoc, node, contnode) # no new node found? try the missing-reference event if newnode is None: newnode = builder.app.emit_firstresult( @@ -1166,8 +1163,6 @@ class BuildEnvironment(object): return if domain and typ in domain.dangling_warnings: msg = domain.dangling_warnings[typ] - elif typ == 'doc': - msg = 'unknown document: %(target)s' elif node.get('refdomain', 'std') not in ('', 'std'): msg = '%s:%s reference target not found: %%(target)s' % \ (node['refdomain'], typ) @@ -1176,31 +1171,14 @@ class BuildEnvironment(object): logger.warning(msg % {'target': target}, location=node, type='ref', subtype=typ) - def _resolve_doc_reference(self, builder, refdoc, node, contnode): - # type: (Builder, unicode, nodes.Node, nodes.Node) -> nodes.Node - # directly reference to document by source name; - # can be absolute or relative - docname = docname_join(refdoc, node['reftarget']) - if docname in self.all_docs: - if node['refexplicit']: - # reference with explicit title - caption = node.astext() - else: - caption = clean_astext(self.titles[docname]) - innernode = nodes.inline(caption, caption) - innernode['classes'].append('doc') - newnode = nodes.reference('', '', internal=True) - newnode['refuri'] = builder.get_relative_uri(refdoc, docname) - newnode.append(innernode) - return newnode - def _resolve_any_reference(self, builder, refdoc, node, contnode): # type: (Builder, unicode, nodes.Node, nodes.Node) -> nodes.Node """Resolve reference generated by the "any" role.""" target = node['reftarget'] results = [] # type: List[Tuple[unicode, nodes.Node]] # first, try resolving as :doc: - doc_ref = self._resolve_doc_reference(builder, refdoc, node, contnode) + doc_ref = self.domains['std'].resolve_xref(self, refdoc, builder, 'doc', + target, node, contnode) if doc_ref: results.append(('doc', doc_ref)) # next, do the standard domain (makes this a priority) diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py index 884778cbc..d1be888ba 100644 --- a/sphinx/ext/intersphinx.py +++ b/sphinx/ext/intersphinx.py @@ -338,9 +338,6 @@ def missing_reference(app, env, node, contnode): for domain in env.domains.values() for objtype in domain.object_types] domain = None - elif node['reftype'] == 'doc': - domain = 'std' # special case - objtypes = ['std:doc'] else: domain = node.get('refdomain') if not domain: diff --git a/sphinx/roles.py b/sphinx/roles.py index 6723b0122..32894a8e7 100644 --- a/sphinx/roles.py +++ b/sphinx/roles.py @@ -323,8 +323,6 @@ def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): specific_docroles = { # links to download references 'download': XRefRole(nodeclass=addnodes.download_reference), - # links to documents - 'doc': XRefRole(warn_dangling=True, innernodeclass=nodes.inline), # links to anything 'any': AnyXRefRole(warn_dangling=True), diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py index f5091df99..17584e7c9 100644 --- a/sphinx/util/nodes.py +++ b/sphinx/util/nodes.py @@ -325,11 +325,14 @@ def make_refnode(builder, fromdocname, todocname, targetid, child, title=None): # type: (Builder, unicode, unicode, unicode, nodes.Node, unicode) -> nodes.reference """Shortcut to create a reference node.""" node = nodes.reference('', '', internal=True) - if fromdocname == todocname: + if fromdocname == todocname and targetid: node['refid'] = targetid else: - node['refuri'] = (builder.get_relative_uri(fromdocname, todocname) + - '#' + targetid) + if targetid: + node['refuri'] = (builder.get_relative_uri(fromdocname, todocname) + + '#' + targetid) + else: + node['refuri'] = builder.get_relative_uri(fromdocname, todocname) if title: node['reftitle'] = title node.append(child) diff --git a/tests/test_ext_intersphinx.py b/tests/test_ext_intersphinx.py index 934b8a2bd..4d028c151 100644 --- a/tests/test_ext_intersphinx.py +++ b/tests/test_ext_intersphinx.py @@ -43,6 +43,7 @@ module2 py:module 0 foo.html#module-$ - module1.func py:function 1 sub/foo.html#$ - CFunc c:function 2 cfunc.html#CFunc - a term std:term -1 glossary.html#term-a-term - +docname std:doc -1 docname.html - a term including:colon std:term -1 glossary.html#term-a-term-including-colon - '''.encode('utf-8')) @@ -212,6 +213,10 @@ def test_missing_reference(tempdir, app, status, warning): rn = reference_check('py', 'mod', 'py3krelparent:module1', 'foo', refdoc='sub/dir/test') assert rn['refuri'] == '../../../../py3k/foo.html#module-module1' + # check refs of standard domain + rn = reference_check('std', 'doc', 'docname', 'docname') + assert rn['refuri'] == 'https://docs.python.org/docname.html' + def test_load_mappings_warnings(tempdir, app, status, warning): """ From c933184b28a4db22eaf3f8204f18b21ba2bff1dc Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 28 Jan 2017 20:47:44 +0900 Subject: [PATCH 145/190] Update CHANGES --- CHANGES | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGES b/CHANGES index 2eeb0fb33..8483518aa 100644 --- a/CHANGES +++ b/CHANGES @@ -62,6 +62,14 @@ Deprecated * ``Sphinx.status_iterator()` and ``Sphinx.old_status_iterator()`` is now deprecated. Please use ``sphinx.util:status_iterator()`` intead. * ``BuildEnvironment.set_warnfunc()`` is now deprecated +* Following methods of ``BuildEnvironment`` is now deprecated. + + - ``BuildEnvironment.note_toctree()`` + - ``BuildEnvironment.get_toc_for()`` + - ``BuildEnvironment.get_toctree_for()`` + - ``BuildEnvironment.create_index()`` + + Please use ``sphinx.environment.adapters`` modules instead. Release 1.5.3 (in development) ============================== From 7bf7b8175105232481e126d65165e3fcf69dabd1 Mon Sep 17 00:00:00 2001 From: adrian5 <adrian5@users.noreply.github.com> Date: Tue, 31 Jan 2017 01:29:18 +0100 Subject: [PATCH 146/190] Update toctree.rst --- doc/markup/toctree.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/markup/toctree.rst b/doc/markup/toctree.rst index a0161ee3c..78a72c1b4 100644 --- a/doc/markup/toctree.rst +++ b/doc/markup/toctree.rst @@ -41,7 +41,7 @@ tables of contents. The ``toctree`` directive is the central element. * Tables of contents from all those documents are inserted, with a maximum depth of two, that means one nested heading. ``toctree`` directives in those documents are also taken into account. - * Sphinx knows that the relative order of the documents ``intro``, + * Sphinx knows the relative order of the documents ``intro``, ``strings`` and so forth, and it knows that they are children of the shown document, the library index. From this information it generates "next chapter", "previous chapter" and "parent chapter" links. From 7c10e52850cdbeb05b59bd564ce848b36c55f00d Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 1 Feb 2017 14:25:33 +0900 Subject: [PATCH 147/190] Fix #3378: latex: Support ``:widths:`` option of table directives --- CHANGES | 1 + sphinx/writers/latex.py | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/CHANGES b/CHANGES index e53aabdb2..060b46ee7 100644 --- a/CHANGES +++ b/CHANGES @@ -44,6 +44,7 @@ Features added When specified, each template parameter will be rendered on a separate line. * #3359: Allow sphinx.js in a user locale dir to override sphinx.js from Sphinx * #3303: Add ``:pyversion:`` option to the doctest directive. +* #3378: latex: Support ``:widths:`` option of table directives Bugs fixed ---------- diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 41ca74eaa..519e655ce 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -321,6 +321,7 @@ class Table(object): self.col = 0 self.colcount = 0 self.colspec = None # type: unicode + self.colwidths = [] # type: List[int] self.rowcount = 0 self.had_head = False self.has_problematic = False @@ -1203,6 +1204,12 @@ class LaTeXTranslator(nodes.NodeVisitor): endmacro = '\\end{tabulary}\n\n' if self.table.colspec: self.body.append(self.table.colspec) + elif self.table.colwidths: + total = sum(self.table.colwidths) + colspec = ['p{\\dimexpr(\\linewidth-\\arrayrulewidth)*%d/%d' + '-2\\tabcolsep-\\arrayrulewidth\\relax}' % (width, total) + for width in self.table.colwidths] + self.body.append('{|%s|}\n' % '|'.join(colspec)) else: if self.table.has_problematic: colspec = ('*{%d}{p{\\dimexpr(\\linewidth-\\arrayrulewidth)/%d' @@ -1252,6 +1259,8 @@ class LaTeXTranslator(nodes.NodeVisitor): def visit_colspec(self, node): # type: (nodes.Node) -> None self.table.colcount += 1 + if 'colwidth' in node: + self.table.colwidths.append(node['colwidth']) def depart_colspec(self, node): # type: (nodes.Node) -> None From 98e0f6b56f3fb90710fa4091b222d779b68ac00c Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 2 Feb 2017 01:42:08 +0900 Subject: [PATCH 148/190] Use Ubuntu/trusty for testing --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 33c93038f..c787aa8d9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,6 @@ language: python sudo: false +dist: trusty cache: directories: - $HOME/.cache/pip From 98a39f8a51d3b544d6d9270e48d3deaa50178cda Mon Sep 17 00:00:00 2001 From: Timotheus Kampik <timotheus.kampik@signavio.com> Date: Wed, 1 Feb 2017 19:16:05 +0100 Subject: [PATCH 149/190] document markdown support: work in review comments #2303 #825 --- doc/markdown.rst | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/doc/markdown.rst b/doc/markdown.rst index 04ae29466..75a35b9d0 100644 --- a/doc/markdown.rst +++ b/doc/markdown.rst @@ -8,21 +8,22 @@ Markdown support `Markdown <https://daringfireball.net/projects/markdown/>`__ is a lightweight markup language with a simplistic plain text formatting syntax. It exists in many syntactically different *flavors*. -To support Markdown-based documentation, Sphinx can use `CommonMark-py <https://github.com/rtfd/CommonMark-py>`__, a -Python package for parsing the `CommonMark <http://commonmark.org/>`__ flavor. In addition, Sphinx uses -`recommonmark <http://recommonmark.readthedocs.io/en/latest/index.html>`__, a Docutils bridge to CommonMark. +To support Markdown-based documentation, Sphinx can use +`recommonmark <http://recommonmark.readthedocs.io/en/latest/index.html>`__. +recommonmark is a Docutils bridge to `CommonMark-py <https://github.com/rtfd/CommonMark-py>`__, a +Python package for parsing the `CommonMark <http://commonmark.org/>`__ Markdown flavor. Configuration ------------- -To configure your Sphinx project for markdown support, proceed as follows: +To configure your Sphinx project for Markdown support, proceed as follows: -#. Install CommonMark version **0.5.4** and recommonmark: +#. Install recommonmark: :: - pip install commonmark==0.5.4 recommonmark + pip install recommonmark #. Add the Markdown parser to the ``source_parsers`` configuration variable in your Sphinx configuration file: From e97d4e955b78dea0e5df1742dc09a90abf687158 Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Wed, 1 Feb 2017 19:50:53 +0100 Subject: [PATCH 150/190] Simplify LaTeX mark-up for table column widths (ref #3379) The ``\X`` token is used as column-specifier: this does not define or redefine ``\X`` as a LaTeX macro. Using a letter could have led to conflict with user defined column types or table packages. This column specifier takes two arguments which must be positive integers, as produced by LaTeX writer for ``:widths:`` option or for equal widths columns. As it always uses ``\linewidth`` the latter was not abstracted into a third argument. --- sphinx/texinputs/sphinx.sty | 5 ++++- sphinx/writers/latex.py | 6 ++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/sphinx/texinputs/sphinx.sty b/sphinx/texinputs/sphinx.sty index 3eea2ffe7..3929f1e9b 100644 --- a/sphinx/texinputs/sphinx.sty +++ b/sphinx/texinputs/sphinx.sty @@ -6,7 +6,7 @@ % \NeedsTeXFormat{LaTeX2e}[1995/12/01] -\ProvidesPackage{sphinx}[2017/01/16 v1.6 LaTeX package (Sphinx markup)] +\ProvidesPackage{sphinx}[2017/02/01 v1.6 LaTeX package (Sphinx markup)] % we delay handling of options to after having loaded packages, because % of the need to use \definecolor. @@ -24,6 +24,9 @@ ******** ERROR !! PLEASE UPDATE titlesec.sty !!********^^J% ******** THIS VERSION SWALLOWS SECTION NUMBERS.********}}}}{} \RequirePackage{tabulary} +% use of \X to minimize possibility of conflict with one-character column types +\newcolumntype{\X}[2]{p{\dimexpr + (\linewidth-\arrayrulewidth)*#1/#2-\tw@\tabcolsep-\arrayrulewidth\relax}} \RequirePackage{makeidx} % For framing code-blocks and warning type notices, and shadowing topics \RequirePackage{framed} diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 519e655ce..e806dad54 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -1206,14 +1206,12 @@ class LaTeXTranslator(nodes.NodeVisitor): self.body.append(self.table.colspec) elif self.table.colwidths: total = sum(self.table.colwidths) - colspec = ['p{\\dimexpr(\\linewidth-\\arrayrulewidth)*%d/%d' - '-2\\tabcolsep-\\arrayrulewidth\\relax}' % (width, total) + colspec = ['\\X{%d}{%d}' % (width, total) for width in self.table.colwidths] self.body.append('{|%s|}\n' % '|'.join(colspec)) else: if self.table.has_problematic: - colspec = ('*{%d}{p{\\dimexpr(\\linewidth-\\arrayrulewidth)/%d' - '-2\\tabcolsep-\\arrayrulewidth\\relax}|}' % + colspec = ('*{%d}{\\X{1}{%d}|}' % (self.table.colcount, self.table.colcount)) self.body.append('{|' + colspec + '}\n') elif self.table.longtable: From 1b6bb1caceccddfe7c63f02b34a42c1aa1a0ae21 Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Thu, 2 Feb 2017 10:12:13 +0100 Subject: [PATCH 151/190] Use tabular (not tabulary) if ``:widths:`` table option (and no colspec) Also, organizes code for more clarity in conditionals. --- sphinx/writers/latex.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index e806dad54..3093ca274 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -1194,9 +1194,10 @@ class LaTeXTranslator(nodes.NodeVisitor): elif self.table.has_verbatim: self.body.append('\n\\noindent\\begin{tabular}') endmacro = '\\end{tabular}\n\n' - elif self.table.has_problematic and not self.table.colspec: - # if the user has given us tabularcolumns, accept them and use - # tabulary nevertheless + elif self.table.colspec: + self.body.append('\n\\noindent\\begin{tabulary}{\\linewidth}') + endmacro = '\\end{tabulary}\n\n' + elif self.table.has_problematic or self.table.colwidths: self.body.append('\n\\noindent\\begin{tabular}') endmacro = '\\end{tabular}\n\n' else: @@ -1209,15 +1210,14 @@ class LaTeXTranslator(nodes.NodeVisitor): colspec = ['\\X{%d}{%d}' % (width, total) for width in self.table.colwidths] self.body.append('{|%s|}\n' % '|'.join(colspec)) + elif self.table.has_problematic: + colspec = ('*{%d}{\\X{1}{%d}|}' % + (self.table.colcount, self.table.colcount)) + self.body.append('{|' + colspec + '}\n') + elif self.table.longtable: + self.body.append('{|' + ('l|' * self.table.colcount) + '}\n') else: - if self.table.has_problematic: - colspec = ('*{%d}{\\X{1}{%d}|}' % - (self.table.colcount, self.table.colcount)) - self.body.append('{|' + colspec + '}\n') - elif self.table.longtable: - self.body.append('{|' + ('l|' * self.table.colcount) + '}\n') - else: - self.body.append('{|' + ('L|' * self.table.colcount) + '}\n') + self.body.append('{|' + ('L|' * self.table.colcount) + '}\n') if self.table.longtable and self.table.caption is not None: self.body.append(u'\\caption{') for caption in self.table.caption: From 680030b5687474340883f7cd4212b970d77a8e26 Mon Sep 17 00:00:00 2001 From: adrian5 <adrian5@users.noreply.github.com> Date: Fri, 3 Feb 2017 16:30:26 +0100 Subject: [PATCH 152/190] Update conf.py.txt I'm not entirely sure what this line is trying to say, but is needs rewriting. Modify my commit, if necessary. --- doc/_static/conf.py.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/_static/conf.py.txt b/doc/_static/conf.py.txt index f70ae3568..be0c846db 100644 --- a/doc/_static/conf.py.txt +++ b/doc/_static/conf.py.txt @@ -79,7 +79,7 @@ language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path +# These patterns also affect html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The reST default role (used for this markup: `text`) to use for all From 87ab6b01ec075d942aaa3e47edf099eeb32223c3 Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Sat, 4 Feb 2017 09:07:35 +0100 Subject: [PATCH 153/190] Fix a typo in CHANGES --- CHANGES | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 060b46ee7..d007a5240 100644 --- a/CHANGES +++ b/CHANGES @@ -9,7 +9,7 @@ Incompatible changes * LaTeX ``\includegraphics`` command isn't overloaded: only ``\sphinxincludegraphics`` has the custom code to fit image to available width if oversized. * The subclasses of ``sphinx.domains.Index`` should override ``generate()`` - method. The default implementation raises NotImplmentedError + method. The default implementation raises NotImplementedError Features removed ---------------- From a542ca2e78f9ad336bac202e8f52f6dcd3f0a484 Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Sat, 4 Feb 2017 09:07:57 +0100 Subject: [PATCH 154/190] Update CHANGES for PR#3381 --- CHANGES | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index d007a5240..c3f6e2852 100644 --- a/CHANGES +++ b/CHANGES @@ -44,7 +44,8 @@ Features added When specified, each template parameter will be rendered on a separate line. * #3359: Allow sphinx.js in a user locale dir to override sphinx.js from Sphinx * #3303: Add ``:pyversion:`` option to the doctest directive. -* #3378: latex: Support ``:widths:`` option of table directives +* #3378: (latex) support for ``:widths:`` option of table directives + (refs: #3379, #3381) Bugs fixed ---------- From 10ae47d7b9e4b523f3dc94f7726f0355e1974896 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 4 Feb 2017 13:06:12 +0900 Subject: [PATCH 155/190] latex: Fix colwidths without :widths: option --- sphinx/writers/latex.py | 4 ++- tests/roots/test-latex-table/conf.py | 7 +++++ tests/roots/test-latex-table/index.rst | 27 ++++++++++++++++++ tests/test_build_latex.py | 38 ++++++++++++++++++++++++++ 4 files changed, 75 insertions(+), 1 deletion(-) create mode 100644 tests/roots/test-latex-table/conf.py create mode 100644 tests/roots/test-latex-table/index.rst diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 3093ca274..0799e9237 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -318,6 +318,7 @@ class ShowUrlsTransform(object): class Table(object): def __init__(self): # type: () -> None + self.classes = [] self.col = 0 self.colcount = 0 self.colspec = None # type: unicode @@ -1166,6 +1167,7 @@ class LaTeXTranslator(nodes.NodeVisitor): '%s:%s: nested tables are not yet implemented.' % (self.curfilestack[-1], node.line or '')) self.table = Table() + self.table.classes = node['classes'] self.table.longtable = 'longtable' in node['classes'] self.tablebody = [] # type: List[unicode] self.tableheaders = [] # type: List[unicode] @@ -1205,7 +1207,7 @@ class LaTeXTranslator(nodes.NodeVisitor): endmacro = '\\end{tabulary}\n\n' if self.table.colspec: self.body.append(self.table.colspec) - elif self.table.colwidths: + elif self.table.colwidths and 'colwidths-given' in self.table.classes: total = sum(self.table.colwidths) colspec = ['\\X{%d}{%d}' % (width, total) for width in self.table.colwidths] diff --git a/tests/roots/test-latex-table/conf.py b/tests/roots/test-latex-table/conf.py new file mode 100644 index 000000000..31e7a6ed4 --- /dev/null +++ b/tests/roots/test-latex-table/conf.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- + +master_doc = 'index' + +latex_documents = [ + (master_doc, 'test.tex', 'The basic Sphinx documentation for testing', 'Sphinx', 'report') +] diff --git a/tests/roots/test-latex-table/index.rst b/tests/roots/test-latex-table/index.rst new file mode 100644 index 000000000..c7159398a --- /dev/null +++ b/tests/roots/test-latex-table/index.rst @@ -0,0 +1,27 @@ +test-latex-table +================ + +simple table +------------ + +======= ======= +header1 header2 +======= ======= +cell1-1 cell1-2 +cell2-1 cell2-2 +cell3-1 cell3-2 +======= ======= + +table having :widths: option +---------------------------- + +.. table:: + :widths: 30,70 + + ======= ======= + header1 header2 + ======= ======= + cell1-1 cell1-2 + cell2-1 cell2-2 + cell3-1 cell3-2 + ======= ======= diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py index 6c8861f1b..758c84f73 100644 --- a/tests/test_build_latex.py +++ b/tests/test_build_latex.py @@ -20,6 +20,7 @@ import pytest from sphinx.errors import SphinxError from sphinx.util.osutil import cd, ensuredir +from sphinx.util import docutils from sphinx.writers.latex import LaTeXTranslator from util import SkipTest, remove_unicode_literals, strip_escseq, skip_if @@ -814,3 +815,40 @@ def test_maxlistdepth_at_ten(app, status, warning): print(status.getvalue()) print(warning.getvalue()) compile_latex_document(app) + + +@pytest.mark.skipif(docutils.__version_info__ < (0, 13), + reason='docutils-0.13 or above is required') +@pytest.mark.sphinx('latex', testroot='latex-table') +def test_latex_table(app, status, warning): + app.builder.build_all() + result = (app.outdir / 'test.tex').text(encoding='utf8') + tables = {} + for chap in re.split(r'\\chapter(?={.*})', result)[1:]: + sectname, content = chap.split('}', 1) + tables[sectname[1:]] = content.strip() + + # simple_table + simple_table = tables['simple table'] + assert ('\\noindent\\begin{tabulary}{\\linewidth}{|L|L|}' in simple_table) + assert ('\\hline\n' + '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' + '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax' in simple_table) + assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in simple_table) + assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in simple_table) + assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in simple_table) + assert ('\\hline\\end{tabulary}' in simple_table) + + # table having :widths: option + widths_table = tables['table having :widths: option'] + assert ('\\noindent\\begin{tabulary}{\\linewidth}{' + '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*30/100-2\\tabcolsep-\\arrayrulewidth\\relax}' + '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*70/100-2\\tabcolsep-\\arrayrulewidth\\relax}|}' + in widths_table) + assert ('\\hline\n' + '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' + '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax' in widths_table) + assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in widths_table) + assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in widths_table) + assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in widths_table) + assert ('\\hline\\end{tabulary}' in widths_table) From 4f8b3f94d4ff69b68a7f0e65ff621433062c047e Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 4 Feb 2017 13:37:34 +0900 Subject: [PATCH 156/190] Add testcase for captioned table and longtable --- tests/roots/test-latex-table/index.rst | 29 ++++++++++++ tests/test_build_latex.py | 62 ++++++++++++++++++++------ 2 files changed, 77 insertions(+), 14 deletions(-) diff --git a/tests/roots/test-latex-table/index.rst b/tests/roots/test-latex-table/index.rst index c7159398a..d55edf9e4 100644 --- a/tests/roots/test-latex-table/index.rst +++ b/tests/roots/test-latex-table/index.rst @@ -25,3 +25,32 @@ table having :widths: option cell2-1 cell2-2 cell3-1 cell3-2 ======= ======= + +table having caption +-------------------- + +.. list-table:: caption for table + :header-rows: 1 + + * - header1 + - header2 + * - cell1-1 + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 + +longtable +--------- + +.. table:: + :class: longtable + + ======= ======= + header1 header2 + ======= ======= + cell1-1 cell1-2 + cell2-1 cell2-2 + cell3-1 cell3-2 + ======= ======= diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py index 758c84f73..2016c16fd 100644 --- a/tests/test_build_latex.py +++ b/tests/test_build_latex.py @@ -829,26 +829,60 @@ def test_latex_table(app, status, warning): tables[sectname[1:]] = content.strip() # simple_table - simple_table = tables['simple table'] - assert ('\\noindent\\begin{tabulary}{\\linewidth}{|L|L|}' in simple_table) + table = tables['simple table'] + assert ('\\noindent\\begin{tabulary}{\\linewidth}{|L|L|}' in table) assert ('\\hline\n' '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' - '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax' in simple_table) - assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in simple_table) - assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in simple_table) - assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in simple_table) - assert ('\\hline\\end{tabulary}' in simple_table) + '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax' in table) + assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in table) + assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table) + assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) + assert ('\\hline\\end{tabulary}' in table) # table having :widths: option - widths_table = tables['table having :widths: option'] + table = tables['table having :widths: option'] assert ('\\noindent\\begin{tabulary}{\\linewidth}{' '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*30/100-2\\tabcolsep-\\arrayrulewidth\\relax}' '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*70/100-2\\tabcolsep-\\arrayrulewidth\\relax}|}' - in widths_table) + in table) assert ('\\hline\n' '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' - '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax' in widths_table) - assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in widths_table) - assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in widths_table) - assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in widths_table) - assert ('\\hline\\end{tabulary}' in widths_table) + '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax' in table) + assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in table) + assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table) + assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) + assert ('\\hline\\end{tabulary}' in table) + + # table having caption + table = tables['table having caption'] + assert ('\\begin{threeparttable}\n\\capstart\\caption{caption for table}' + '\\label{\\detokenize{index:id1}}' in table) + assert ('\\noindent\\begin{tabulary}{\\linewidth}{|L|L|}' in table) + assert ('\\hline\n' + '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' + '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax' in table) + assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in table) + assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table) + assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) + assert ('\\hline\\end{tabulary}' in table) + + # longtable + table = tables['longtable'] + assert ('\\begin{longtable}{|l|l|}\n\\hline' in table) + assert ('\\hline\n' + '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' + '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax \\\\\n' + '\\hline\\endfirsthead' in table) + assert ('\\multicolumn{2}{c}%\n' + '{{\\tablecontinued{\\tablename\\ \\thetable{} -- ' + 'continued from previous page}}} \\\\\n\\hline\n' + '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' + '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax \\\\\n' + '\\hline\\endhead' in table) + assert ('\\hline \\multicolumn{2}{|r|}' + '{{\\tablecontinued{Continued on next page}}} \\\\ \\hline\n' + '\\endfoot\n\n\\endlastfoot' in table) + assert ('\ncell1-1\n&\ncell1-2\n\\\\' in table) + assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table) + assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) + assert ('\\hline\\end{longtable}' in table) From 40a957009dd4ef6c3f631324b133c788b3f76359 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 4 Feb 2017 14:40:40 +0900 Subject: [PATCH 157/190] Add testcases for latex tables --- tests/roots/test-latex-table/index.rst | 136 +++++++++++++++++++++++++ tests/test_build_latex.py | 51 ++++++++++ 2 files changed, 187 insertions(+) diff --git a/tests/roots/test-latex-table/index.rst b/tests/roots/test-latex-table/index.rst index d55edf9e4..c559ccff8 100644 --- a/tests/roots/test-latex-table/index.rst +++ b/tests/roots/test-latex-table/index.rst @@ -41,6 +41,57 @@ table having caption * - cell3-1 - cell3-2 +table having verbatim +--------------------- + +.. list-table:: + :header-rows: 1 + + * - header1 + - header2 + * - :: + + hello world + + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 + +table having both :widths: and problematic cell +----------------------------------------------- + +.. list-table:: + :header-rows: 1 + :widths: 30,70 + + * - header1 + - header2 + * - + item1 + + item2 + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 + +table having problematic cell +----------------------------- + +.. list-table:: + :header-rows: 1 + + * - header1 + - header2 + * - + item1 + + item2 + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 + longtable --------- @@ -54,3 +105,88 @@ longtable cell2-1 cell2-2 cell3-1 cell3-2 ======= ======= + +longtable having :widths: option +-------------------------------- + +.. table:: + :class: longtable + :widths: 30,70 + + ======= ======= + header1 header2 + ======= ======= + cell1-1 cell1-2 + cell2-1 cell2-2 + cell3-1 cell3-2 + ======= ======= + +longtable having caption +------------------------ + +.. list-table:: caption for longtable + :class: longtable + :header-rows: 1 + + * - header1 + - header2 + * - cell1-1 + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 + +longtable having verbatim +------------------------- + +.. list-table:: + :class: longtable + :header-rows: 1 + + * - header1 + - header2 + * - :: + + hello world + + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 + +longtable having both :widths: and problematic cell +--------------------------------------------------- + +.. list-table:: + :class: longtable + :header-rows: 1 + :widths: 30,70 + + * - header1 + - header2 + * - + item1 + + item2 + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 + +longtable having problematic cell +--------------------------------- + +.. list-table:: + :class: longtable + :header-rows: 1 + + * - header1 + - header2 + * - + item1 + + item2 + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py index 2016c16fd..a7fa95168 100644 --- a/tests/test_build_latex.py +++ b/tests/test_build_latex.py @@ -865,6 +865,26 @@ def test_latex_table(app, status, warning): assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table) assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) assert ('\\hline\\end{tabulary}' in table) + assert ('\\end{threeparttable}' in table) + + # table having verbatim + table = tables['table having verbatim'] + assert ('\\noindent\\begin{tabular}{|*{2}{' + 'p{\\dimexpr(\\linewidth-\\arrayrulewidth)/2-2\\tabcolsep-\\arrayrulewidth\\relax}|}}\n' + '\\hline' in table) + + # table having problematic cell + table = tables['table having problematic cell'] + assert ('\\noindent\\begin{tabular}{|*{2}{' + 'p{\\dimexpr(\\linewidth-\\arrayrulewidth)/2-2\\tabcolsep-\\arrayrulewidth\\relax}|}}\n' + '\\hline' in table) + + # table having both :widths: and problematic cell + table = tables['table having both :widths: and problematic cell'] + assert ('\\noindent\\begin{tabular}{' + '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*30/100-2\\tabcolsep-\\arrayrulewidth\\relax}' + '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*70/100-2\\tabcolsep-\\arrayrulewidth\\relax}|}' + in table) # longtable table = tables['longtable'] @@ -886,3 +906,34 @@ def test_latex_table(app, status, warning): assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table) assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) assert ('\\hline\\end{longtable}' in table) + + # longtable having :widths: option + table = tables['longtable having :widths: option'] + assert ('\\begin{longtable}{' + '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*30/100-2\\tabcolsep-\\arrayrulewidth\\relax}' + '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*70/100-2\\tabcolsep-\\arrayrulewidth\\relax}|}' + in table) + + # longtable having caption + table = tables['longtable having caption'] + assert ('\\begin{longtable}{|l|l|}\n\\caption{caption for longtable}' + '\\label{\\detokenize{index:id2}}\\\\\n\\hline' in table) + + # longtable having verbatim + table = tables['longtable having verbatim'] + assert ('\\begin{longtable}{|*{2}{' + 'p{\\dimexpr(\\linewidth-\\arrayrulewidth)/2-2\\tabcolsep-\\arrayrulewidth\\relax}|}}\n' + '\\hline' in table) + + # longtable having problematic cell + table = tables['longtable having problematic cell'] + assert ('\\begin{longtable}{|*{2}{' + 'p{\\dimexpr(\\linewidth-\\arrayrulewidth)/2-2\\tabcolsep-\\arrayrulewidth\\relax}|}}\n' + '\\hline' in table) + + # longtable having both :widths: and problematic cell + table = tables['longtable having both :widths: and problematic cell'] + assert ('\\begin{longtable}{' + '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*30/100-2\\tabcolsep-\\arrayrulewidth\\relax}' + '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*70/100-2\\tabcolsep-\\arrayrulewidth\\relax}|}' + in table) From 7edcec66c9f79c9ba47e54a3e1a381b4e2599327 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 4 Feb 2017 18:35:44 +0900 Subject: [PATCH 158/190] Support gettext translation in templates --- sphinx/locale/__init__.py | 10 ++++++++++ sphinx/util/template.py | 4 +++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/sphinx/locale/__init__.py b/sphinx/locale/__init__.py index 44ad64304..135b76b9d 100644 --- a/sphinx/locale/__init__.py +++ b/sphinx/locale/__init__.py @@ -239,3 +239,13 @@ def init(locale_dirs, language, catalog='sphinx'): if hasattr(translator, 'ugettext'): translator.gettext = translator.ugettext return translator, has_translation + + +def get_translator(catalog='sphinx'): + global translators + translator = translators.get(catalog) + if translator is None: + translator = gettext.NullTranslations() + if hasattr(translator, 'ugettext'): + translator.gettext = translator.ugettext + return translator diff --git a/sphinx/util/template.py b/sphinx/util/template.py index 7cb897e7d..01d365994 100644 --- a/sphinx/util/template.py +++ b/sphinx/util/template.py @@ -14,12 +14,14 @@ from jinja2.sandbox import SandboxedEnvironment from sphinx import package_dir from sphinx.jinja2glue import SphinxFileSystemLoader +from sphinx.locale import get_translator class BaseRenderer(object): def __init__(self, loader=None): - self.env = SandboxedEnvironment(loader=loader) + self.env = SandboxedEnvironment(loader=loader, extensions=['jinja2.ext.i18n']) self.env.filters['repr'] = repr + self.env.install_gettext_translations(get_translator()) def render(self, template_name, context): return self.env.get_template(template_name).render(context) From 976fc326b9f91e9d16bb29a76b9daf07c6afc92e Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 4 Feb 2017 17:06:53 +0900 Subject: [PATCH 159/190] latex: Use templates to render tables --- sphinx/templates/latex/longtable.tex_t | 21 ++++ sphinx/templates/latex/tabular.tex_t | 12 ++ sphinx/templates/latex/tabulary.tex_t | 12 ++ sphinx/util/template.py | 9 +- sphinx/writers/latex.py | 163 +++++++++---------------- tests/test_build_latex.py | 37 ++---- 6 files changed, 120 insertions(+), 134 deletions(-) create mode 100644 sphinx/templates/latex/longtable.tex_t create mode 100644 sphinx/templates/latex/tabular.tex_t create mode 100644 sphinx/templates/latex/tabulary.tex_t diff --git a/sphinx/templates/latex/longtable.tex_t b/sphinx/templates/latex/longtable.tex_t new file mode 100644 index 000000000..694483ce2 --- /dev/null +++ b/sphinx/templates/latex/longtable.tex_t @@ -0,0 +1,21 @@ +\begin{longtable}<%= table.get_colspec() %> +<%- if table.caption -%> +\caption{<%= ''.join(table.caption) %>}<%= labels %>\\ +<% endif -%> +\hline +<%= ''.join(table.header) -%> +\endfirsthead + +\multicolumn{<%= table.colcount %>}{c}% +{{\tablecontinued{\tablename\ \thetable{} -- <%= _('continued from previous page') %>}}} \\ +\hline +<%= ''.join(table.header) -%> +\endhead + +\hline \multicolumn{<%= table.colcount %>}{|r|}{{\tablecontinued{<%= _('Continued on next page') %>}}} \\ \hline +\endfoot + +\endlastfoot + +<%= ''.join(table.body) -%> +\end{longtable} diff --git a/sphinx/templates/latex/tabular.tex_t b/sphinx/templates/latex/tabular.tex_t new file mode 100644 index 000000000..7448d6c1f --- /dev/null +++ b/sphinx/templates/latex/tabular.tex_t @@ -0,0 +1,12 @@ +<%- if table.caption -%> +\begin{threeparttable} +\capstart\caption{<%= ''.join(table.caption) %>}<%= labels %> +<%- endif %> +\noindent\begin{tabular}<%= table.get_colspec() -%> +\hline +<%= ''.join(table.header) -%> +<%= ''.join(table.body) -%> +\end{tabular} +<%- if table.caption -%> +\end{threeparttable} +<%- endif -%> diff --git a/sphinx/templates/latex/tabulary.tex_t b/sphinx/templates/latex/tabulary.tex_t new file mode 100644 index 000000000..959eadcbd --- /dev/null +++ b/sphinx/templates/latex/tabulary.tex_t @@ -0,0 +1,12 @@ +<%- if table.caption -%> +\begin{threeparttable} +\capstart\caption{<%= ''.join(table.caption) %>}<%= labels %> +<%- endif %> +\noindent\begin{tabulary}{\linewidth}<%= table.get_colspec() -%> +\hline +<%= ''.join(table.header) -%> +<%= ''.join(table.body) -%> +\end{tabulary} +<%- if table.caption -%> +\end{threeparttable} +<%- endif -%> diff --git a/sphinx/util/template.py b/sphinx/util/template.py index 01d365994..f6db8034b 100644 --- a/sphinx/util/template.py +++ b/sphinx/util/template.py @@ -43,8 +43,10 @@ class FileRenderer(BaseRenderer): class SphinxRenderer(FileRenderer): - def __init__(self): - super(SphinxRenderer, self).__init__(os.path.join(package_dir, 'templates')) + def __init__(self, template_path=None): + if template_path is None: + template_path = os.path.join(package_dir, 'templates') + super(SphinxRenderer, self).__init__(template_path) @classmethod def render_from_file(cls, filename, context): @@ -53,7 +55,8 @@ class SphinxRenderer(FileRenderer): class LaTeXRenderer(SphinxRenderer): def __init__(self): - super(LaTeXRenderer, self).__init__() + template_path = os.path.join(package_dir, 'templates', 'latex') + super(LaTeXRenderer, self).__init__(template_path) # use JSP/eRuby like tagging instead because curly bracket; the default # tagging of jinja2 is not good for LaTeX sources. diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 0799e9237..ab716e86c 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -46,7 +46,6 @@ BEGIN_DOC = r''' ''' -DEFAULT_TEMPLATE = 'latex/content.tex_t' URI_SCHEMES = ('mailto:', 'http:', 'https:', 'ftp:') SECNUMDEPTH = 3 @@ -316,19 +315,49 @@ class ShowUrlsTransform(object): class Table(object): - def __init__(self): - # type: () -> None - self.classes = [] + def __init__(self, node): + # type: (nodes.table) -> None + self.header = [] # type: List[unicode] + self.body = [] # type: List[unicode] + self.classes = node.get('classes', []) # type: List[unicode] self.col = 0 self.colcount = 0 - self.colspec = None # type: unicode - self.colwidths = [] # type: List[int] + self.colspec = None # type: unicode + self.colwidths = [] # type: List[int] self.rowcount = 0 - self.had_head = False self.has_problematic = False self.has_verbatim = False - self.caption = None # type: List[unicode] - self.longtable = False + self.caption = None # type: List[unicode] + + def is_longtable(self): + # type: () -> bool + return self.rowcount > 30 or 'longtable' in self.classes + + def get_table_type(self): + # type: () -> unicode + if self.is_longtable(): + return 'longtable' + elif self.has_verbatim: + return 'tabular' + elif self.has_problematic and not self.colspec: + return 'tabular' + else: + return 'tabulary' + + def get_colspec(self): + # type: () -> unicode + if self.colspec: + return self.colspec + elif self.colwidths and 'colwidths-given' in self.classes: + total = sum(self.colwidths) + colspecs = ['\\X{%d}{%d}' % (width, total) for width in self.colwidths] + return '{|%s|}\n' % '|'.join(colspecs) + elif self.has_problematic: + return '{|*{%d}{\\X{1}{%d}|}}\n' % (self.colcount, self.colcount) + elif self.is_longtable(): + return '{|' + ('l|' * self.colcount) + '}\n' + else: + return '{|' + ('L|' * self.colcount) + '}\n' def escape_abbr(text): @@ -606,7 +635,7 @@ class LaTeXTranslator(nodes.NodeVisitor): if path.exists(template_path): return LaTeXRenderer().render(template_path, self.elements) else: - return LaTeXRenderer().render(DEFAULT_TEMPLATE, self.elements) + return LaTeXRenderer().render('content.tex_t', self.elements) def hypertarget(self, id, withdoc=True, anchor=True): # type: (unicode, bool, bool) -> unicode @@ -1166,95 +1195,29 @@ class LaTeXTranslator(nodes.NodeVisitor): raise UnsupportedError( '%s:%s: nested tables are not yet implemented.' % (self.curfilestack[-1], node.line or '')) - self.table = Table() - self.table.classes = node['classes'] - self.table.longtable = 'longtable' in node['classes'] - self.tablebody = [] # type: List[unicode] - self.tableheaders = [] # type: List[unicode] - # Redirect body output until table is finished. - self.pushbody(self.tablebody) + self.table = Table(node) + if self.next_table_colspec: + self.table.colspec = '{%s}\n' % self.next_table_colspec + self.next_table_colspec = None self.restrict_footnote(node) def depart_table(self, node): # type: (nodes.Node) -> None - if self.table.rowcount > 30: - self.table.longtable = True - self.popbody() - if not self.table.longtable and self.table.caption is not None: - self.body.append('\n\n\\begin{threeparttable}\n' - '\\capstart\\caption{') - for caption in self.table.caption: - self.body.append(caption) - self.body.append('}') - for id in self.pop_hyperlink_ids('table'): - self.body.append(self.hypertarget(id, anchor=False)) - if node['ids']: - self.body.append(self.hypertarget(node['ids'][0], anchor=False)) - if self.table.longtable: - self.body.append('\n\\begin{longtable}') - endmacro = '\\end{longtable}\n\n' - elif self.table.has_verbatim: - self.body.append('\n\\noindent\\begin{tabular}') - endmacro = '\\end{tabular}\n\n' - elif self.table.colspec: - self.body.append('\n\\noindent\\begin{tabulary}{\\linewidth}') - endmacro = '\\end{tabulary}\n\n' - elif self.table.has_problematic or self.table.colwidths: - self.body.append('\n\\noindent\\begin{tabular}') - endmacro = '\\end{tabular}\n\n' - else: - self.body.append('\n\\noindent\\begin{tabulary}{\\linewidth}') - endmacro = '\\end{tabulary}\n\n' - if self.table.colspec: - self.body.append(self.table.colspec) - elif self.table.colwidths and 'colwidths-given' in self.table.classes: - total = sum(self.table.colwidths) - colspec = ['\\X{%d}{%d}' % (width, total) - for width in self.table.colwidths] - self.body.append('{|%s|}\n' % '|'.join(colspec)) - elif self.table.has_problematic: - colspec = ('*{%d}{\\X{1}{%d}|}' % - (self.table.colcount, self.table.colcount)) - self.body.append('{|' + colspec + '}\n') - elif self.table.longtable: - self.body.append('{|' + ('l|' * self.table.colcount) + '}\n') - else: - self.body.append('{|' + ('L|' * self.table.colcount) + '}\n') - if self.table.longtable and self.table.caption is not None: - self.body.append(u'\\caption{') - for caption in self.table.caption: - self.body.append(caption) - self.body.append('}') - for id in self.pop_hyperlink_ids('table'): - self.body.append(self.hypertarget(id, anchor=False)) - if node['ids']: - self.body.append(self.hypertarget(node['ids'][0], anchor=False)) - self.body.append(u'\\\\\n') - if self.table.longtable: - self.body.append('\\hline\n') - self.body.extend(self.tableheaders) - self.body.append('\\endfirsthead\n\n') - self.body.append('\\multicolumn{%s}{c}%%\n' % self.table.colcount) - self.body.append(r'{{\tablecontinued{\tablename\ \thetable{} -- %s}}} \\' - % _('continued from previous page')) - self.body.append('\n\\hline\n') - self.body.extend(self.tableheaders) - self.body.append('\\endhead\n\n') - self.body.append(r'\hline \multicolumn{%s}{|r|}{{\tablecontinued{%s}}} \\ \hline' - % (self.table.colcount, - _('Continued on next page'))) - self.body.append('\n\\endfoot\n\n') - self.body.append('\\endlastfoot\n\n') - else: - self.body.append('\\hline\n') - self.body.extend(self.tableheaders) - self.body.extend(self.tablebody) - self.body.append(endmacro) - if not self.table.longtable and self.table.caption is not None: - self.body.append('\\end{threeparttable}\n\n') + labels = '' # type: unicode + for labelid in self.pop_hyperlink_ids('table'): + labels += self.hypertarget(labelid, anchor=False) + if node['ids']: + labels += self.hypertarget(node['ids'][0], anchor=False) + + table_type = self.table.get_table_type() + table = LaTeXRenderer().render(table_type + '.tex_t', + dict(table=self.table, labels=labels)) + self.body.append("\n\n") + self.body.append(table) + self.body.append("\n\n") + self.unrestrict_footnote(node) self.table = None - self.tablebody = None def visit_colspec(self, node): # type: (nodes.Node) -> None @@ -1276,25 +1239,19 @@ class LaTeXTranslator(nodes.NodeVisitor): def visit_thead(self, node): # type: (nodes.Node) -> None - self.table.had_head = True - if self.next_table_colspec: - self.table.colspec = '{%s}\n' % self.next_table_colspec - self.next_table_colspec = None - # Redirect head output until header is finished. see visit_tbody. - self.body = self.tableheaders + self.pushbody(self.table.header) # Redirect head output until header is finished. def depart_thead(self, node): # type: (nodes.Node) -> None - pass + self.popbody() def visit_tbody(self, node): # type: (nodes.Node) -> None - if not self.table.had_head: - self.visit_thead(node) - self.body = self.tablebody + self.pushbody(self.table.body) # Redirect body output until table is finished. def depart_tbody(self, node): # type: (nodes.Node) -> None + self.popbody() self.remember_multirow = {} self.remember_multirowcol = {} diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py index a7fa95168..534270e45 100644 --- a/tests/test_build_latex.py +++ b/tests/test_build_latex.py @@ -841,10 +841,8 @@ def test_latex_table(app, status, warning): # table having :widths: option table = tables['table having :widths: option'] - assert ('\\noindent\\begin{tabulary}{\\linewidth}{' - '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*30/100-2\\tabcolsep-\\arrayrulewidth\\relax}' - '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*70/100-2\\tabcolsep-\\arrayrulewidth\\relax}|}' - in table) + assert ('\\noindent\\begin{tabulary}{\\linewidth}' + '{|\\X{30}{100}|\\X{70}{100}|}' in table) assert ('\\hline\n' '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax' in table) @@ -869,22 +867,15 @@ def test_latex_table(app, status, warning): # table having verbatim table = tables['table having verbatim'] - assert ('\\noindent\\begin{tabular}{|*{2}{' - 'p{\\dimexpr(\\linewidth-\\arrayrulewidth)/2-2\\tabcolsep-\\arrayrulewidth\\relax}|}}\n' - '\\hline' in table) + assert ('\\noindent\\begin{tabular}{|*{2}{\\X{1}{2}|}}\n\\hline' in table) # table having problematic cell table = tables['table having problematic cell'] - assert ('\\noindent\\begin{tabular}{|*{2}{' - 'p{\\dimexpr(\\linewidth-\\arrayrulewidth)/2-2\\tabcolsep-\\arrayrulewidth\\relax}|}}\n' - '\\hline' in table) + assert ('\\noindent\\begin{tabular}{|*{2}{\\X{1}{2}|}}\n\\hline' in table) # table having both :widths: and problematic cell table = tables['table having both :widths: and problematic cell'] - assert ('\\noindent\\begin{tabular}{' - '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*30/100-2\\tabcolsep-\\arrayrulewidth\\relax}' - '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*70/100-2\\tabcolsep-\\arrayrulewidth\\relax}|}' - in table) + assert ('\\noindent\\begin{tabular}{|\\X{30}{100}|\\X{70}{100}|}' in table) # longtable table = tables['longtable'] @@ -909,10 +900,7 @@ def test_latex_table(app, status, warning): # longtable having :widths: option table = tables['longtable having :widths: option'] - assert ('\\begin{longtable}{' - '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*30/100-2\\tabcolsep-\\arrayrulewidth\\relax}' - '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*70/100-2\\tabcolsep-\\arrayrulewidth\\relax}|}' - in table) + assert ('\\begin{longtable}{|\\X{30}{100}|\\X{70}{100}|}' in table) # longtable having caption table = tables['longtable having caption'] @@ -921,19 +909,12 @@ def test_latex_table(app, status, warning): # longtable having verbatim table = tables['longtable having verbatim'] - assert ('\\begin{longtable}{|*{2}{' - 'p{\\dimexpr(\\linewidth-\\arrayrulewidth)/2-2\\tabcolsep-\\arrayrulewidth\\relax}|}}\n' - '\\hline' in table) + assert ('\\begin{longtable}{|*{2}{\\X{1}{2}|}}\n\\hline' in table) # longtable having problematic cell table = tables['longtable having problematic cell'] - assert ('\\begin{longtable}{|*{2}{' - 'p{\\dimexpr(\\linewidth-\\arrayrulewidth)/2-2\\tabcolsep-\\arrayrulewidth\\relax}|}}\n' - '\\hline' in table) + assert ('\\begin{longtable}{|*{2}{\\X{1}{2}|}}\n\\hline' in table) # longtable having both :widths: and problematic cell table = tables['longtable having both :widths: and problematic cell'] - assert ('\\begin{longtable}{' - '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*30/100-2\\tabcolsep-\\arrayrulewidth\\relax}' - '|p{\\dimexpr(\\linewidth-\\arrayrulewidth)*70/100-2\\tabcolsep-\\arrayrulewidth\\relax}|}' - in table) + assert ('\\begin{longtable}{|\\X{30}{100}|\\X{70}{100}|}' in table) From 35ce2ea432a5e1477f5cce6328d1ac1e8980b8ca Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 4 Feb 2017 21:32:36 +0900 Subject: [PATCH 160/190] latex: modify the conditions for colspecs --- sphinx/writers/latex.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index ab716e86c..5228938ef 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -354,10 +354,10 @@ class Table(object): return '{|%s|}\n' % '|'.join(colspecs) elif self.has_problematic: return '{|*{%d}{\\X{1}{%d}|}}\n' % (self.colcount, self.colcount) - elif self.is_longtable(): - return '{|' + ('l|' * self.colcount) + '}\n' - else: + elif self.get_table_type() == 'tabulary': return '{|' + ('L|' * self.colcount) + '}\n' + else: + return '{|' + ('l|' * self.colcount) + '}\n' def escape_abbr(text): From fdd6f9079247c87cb948501ba73df7cfb92d27c9 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 4 Feb 2017 22:29:40 +0900 Subject: [PATCH 161/190] Fix changes in #3381 is lost on merging --- sphinx/writers/latex.py | 4 +++- tests/roots/test-latex-table/index.rst | 29 ++++++++++++++++++++++++++ tests/test_build_latex.py | 13 +++++++++--- 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 5228938ef..7cdec13c9 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -339,7 +339,9 @@ class Table(object): return 'longtable' elif self.has_verbatim: return 'tabular' - elif self.has_problematic and not self.colspec: + elif self.colspec: + return 'tabulary' + elif self.has_problematic or (self.colwidths and 'colwidths-given' in self.classes): return 'tabular' else: return 'tabulary' diff --git a/tests/roots/test-latex-table/index.rst b/tests/roots/test-latex-table/index.rst index c559ccff8..ae461df2d 100644 --- a/tests/roots/test-latex-table/index.rst +++ b/tests/roots/test-latex-table/index.rst @@ -26,6 +26,19 @@ table having :widths: option cell3-1 cell3-2 ======= ======= +table with tabularcolumn +------------------------ + +.. tabularcolumns:: |c|c| + +======= ======= +header1 header2 +======= ======= +cell1-1 cell1-2 +cell2-1 cell2-2 +cell3-1 cell3-2 +======= ======= + table having caption -------------------- @@ -121,6 +134,22 @@ longtable having :widths: option cell3-1 cell3-2 ======= ======= +longtable with tabularcolumn +---------------------------- + +.. tabularcolumns:: |c|c| + +.. table:: + :class: longtable + + ======= ======= + header1 header2 + ======= ======= + cell1-1 cell1-2 + cell2-1 cell2-2 + cell3-1 cell3-2 + ======= ======= + longtable having caption ------------------------ diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py index 534270e45..4813fbbbc 100644 --- a/tests/test_build_latex.py +++ b/tests/test_build_latex.py @@ -841,15 +841,18 @@ def test_latex_table(app, status, warning): # table having :widths: option table = tables['table having :widths: option'] - assert ('\\noindent\\begin{tabulary}{\\linewidth}' - '{|\\X{30}{100}|\\X{70}{100}|}' in table) + assert ('\\noindent\\begin{tabular}{|\\X{30}{100}|\\X{70}{100}|}' in table) assert ('\\hline\n' '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax' in table) assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in table) assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table) assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) - assert ('\\hline\\end{tabulary}' in table) + assert ('\\hline\\end{tabular}' in table) + + # table with tabularcolumn + table = tables['table with tabularcolumn'] + assert ('\\noindent\\begin{tabulary}{\\linewidth}{|c|c|}' in table) # table having caption table = tables['table having caption'] @@ -902,6 +905,10 @@ def test_latex_table(app, status, warning): table = tables['longtable having :widths: option'] assert ('\\begin{longtable}{|\\X{30}{100}|\\X{70}{100}|}' in table) + # longtable with tabularcolumn + table = tables['longtable with tabularcolumn'] + assert ('\\begin{longtable}{|c|c|}' in table) + # longtable having caption table = tables['longtable having caption'] assert ('\\begin{longtable}{|l|l|}\n\\caption{caption for longtable}' From f88e2f23f562f416d04e0131616023fea25e0764 Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Sat, 4 Feb 2017 16:43:33 +0100 Subject: [PATCH 162/190] Streamlines newlines in latex output for tables --- sphinx/templates/latex/longtable.tex_t | 10 ++++++---- sphinx/templates/latex/tabular.tex_t | 10 +++++----- sphinx/templates/latex/tabulary.tex_t | 10 +++++----- sphinx/writers/latex.py | 2 +- tests/test_build_latex.py | 22 +++++++++++----------- 5 files changed, 28 insertions(+), 26 deletions(-) diff --git a/sphinx/templates/latex/longtable.tex_t b/sphinx/templates/latex/longtable.tex_t index 694483ce2..516f6d0fa 100644 --- a/sphinx/templates/latex/longtable.tex_t +++ b/sphinx/templates/latex/longtable.tex_t @@ -3,19 +3,21 @@ \caption{<%= ''.join(table.caption) %>}<%= labels %>\\ <% endif -%> \hline -<%= ''.join(table.header) -%> +<%= ''.join(table.header) %> \endfirsthead \multicolumn{<%= table.colcount %>}{c}% {{\tablecontinued{\tablename\ \thetable{} -- <%= _('continued from previous page') %>}}} \\ \hline -<%= ''.join(table.header) -%> +<%= ''.join(table.header) %> \endhead -\hline \multicolumn{<%= table.colcount %>}{|r|}{{\tablecontinued{<%= _('Continued on next page') %>}}} \\ \hline +\hline +\multicolumn{<%= table.colcount %>}{|r|}{{\tablecontinued{<%= _('Continued on next page') %>}}} \\ +\hline \endfoot \endlastfoot -<%= ''.join(table.body) -%> +<%= ''.join(table.body) %> \end{longtable} diff --git a/sphinx/templates/latex/tabular.tex_t b/sphinx/templates/latex/tabular.tex_t index 7448d6c1f..27e5c30c0 100644 --- a/sphinx/templates/latex/tabular.tex_t +++ b/sphinx/templates/latex/tabular.tex_t @@ -1,12 +1,12 @@ <%- if table.caption -%> \begin{threeparttable} \capstart\caption{<%= ''.join(table.caption) %>}<%= labels %> -<%- endif %> +<% endif -%> \noindent\begin{tabular}<%= table.get_colspec() -%> \hline -<%= ''.join(table.header) -%> -<%= ''.join(table.body) -%> +<%= ''.join(table.header) %> +<%=- ''.join(table.body) %> \end{tabular} -<%- if table.caption -%> +<%- if table.caption %> \end{threeparttable} -<%- endif -%> +<%- endif %> diff --git a/sphinx/templates/latex/tabulary.tex_t b/sphinx/templates/latex/tabulary.tex_t index 959eadcbd..11ec79b33 100644 --- a/sphinx/templates/latex/tabulary.tex_t +++ b/sphinx/templates/latex/tabulary.tex_t @@ -1,12 +1,12 @@ <%- if table.caption -%> \begin{threeparttable} \capstart\caption{<%= ''.join(table.caption) %>}<%= labels %> -<%- endif %> +<% endif -%> \noindent\begin{tabulary}{\linewidth}<%= table.get_colspec() -%> \hline -<%= ''.join(table.header) -%> -<%= ''.join(table.body) -%> +<%= ''.join(table.header) %> +<%=- ''.join(table.body) %> \end{tabulary} -<%- if table.caption -%> +<%- if table.caption %> \end{threeparttable} -<%- endif -%> +<%- endif %> diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 7cdec13c9..5858ad85e 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -1216,7 +1216,7 @@ class LaTeXTranslator(nodes.NodeVisitor): dict(table=self.table, labels=labels)) self.body.append("\n\n") self.body.append(table) - self.body.append("\n\n") + self.body.append("\n") self.unrestrict_footnote(node) self.table = None diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py index 4813fbbbc..1c1cd3928 100644 --- a/tests/test_build_latex.py +++ b/tests/test_build_latex.py @@ -481,7 +481,7 @@ def test_footnote(app, status, warning): '\ncite\n}') in result assert '\\caption{Table caption \\sphinxfootnotemark[4]' in result assert 'name \\sphinxfootnotemark[5]' in result - assert ('\\end{threeparttable}\n\n%\n' + assert ('\\end{threeparttable}\n%\n' '\\begin{footnotetext}[4]\sphinxAtStartFootnote\n' 'footnotes in table caption\n%\n\\end{footnotetext}%\n' '\\begin{footnotetext}[5]\sphinxAtStartFootnote\n' @@ -514,7 +514,7 @@ def test_reference_in_caption_and_codeblock_in_footnote(app, status, warning): 'in caption of normal table}\\label{\\detokenize{index:id28}}') in result assert ('\\caption{footnote \\sphinxfootnotemark[8] ' 'in caption \sphinxfootnotemark[9] of longtable}') in result - assert ('\end{longtable}\n\n%\n\\begin{footnotetext}[8]' + assert ('\end{longtable}\n%\n\\begin{footnotetext}[8]' '\sphinxAtStartFootnote\n' 'Foot note in longtable\n%\n\\end{footnotetext}' in result) assert ('This is a reference to the code-block in the footnote:\n' @@ -837,7 +837,7 @@ def test_latex_table(app, status, warning): assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in table) assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table) assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) - assert ('\\hline\\end{tabulary}' in table) + assert ('\\hline\n\\end{tabulary}' in table) # table having :widths: option table = tables['table having :widths: option'] @@ -848,7 +848,7 @@ def test_latex_table(app, status, warning): assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in table) assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table) assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) - assert ('\\hline\\end{tabular}' in table) + assert ('\\hline\n\\end{tabular}' in table) # table with tabularcolumn table = tables['table with tabularcolumn'] @@ -865,7 +865,7 @@ def test_latex_table(app, status, warning): assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in table) assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table) assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) - assert ('\\hline\\end{tabulary}' in table) + assert ('\\hline\n\\end{tabulary}' in table) assert ('\\end{threeparttable}' in table) # table having verbatim @@ -886,20 +886,20 @@ def test_latex_table(app, status, warning): assert ('\\hline\n' '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax \\\\\n' - '\\hline\\endfirsthead' in table) + '\\hline\n\\endfirsthead' in table) assert ('\\multicolumn{2}{c}%\n' '{{\\tablecontinued{\\tablename\\ \\thetable{} -- ' 'continued from previous page}}} \\\\\n\\hline\n' '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax \\\\\n' - '\\hline\\endhead' in table) - assert ('\\hline \\multicolumn{2}{|r|}' - '{{\\tablecontinued{Continued on next page}}} \\\\ \\hline\n' - '\\endfoot\n\n\\endlastfoot' in table) + '\\hline\n\\endhead' in table) + assert ('\\hline\n\\multicolumn{2}{|r|}' + '{{\\tablecontinued{Continued on next page}}} \\\\\n' + '\\hline\n\\endfoot\n\n\\endlastfoot' in table) assert ('\ncell1-1\n&\ncell1-2\n\\\\' in table) assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table) assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) - assert ('\\hline\\end{longtable}' in table) + assert ('\\hline\n\\end{longtable}' in table) # longtable having :widths: option table = tables['longtable having :widths: option'] From aefa1e0ab7f47a51b3dc537717ea57ab856a31ba Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Mon, 30 Jan 2017 01:55:39 +0900 Subject: [PATCH 163/190] refactor: Add InventoryAdapter class --- sphinx/ext/intersphinx.py | 62 +++++++++++++++++++++++++++------------ 1 file changed, 44 insertions(+), 18 deletions(-) diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py index d1be888ba..818fdf68a 100644 --- a/sphinx/ext/intersphinx.py +++ b/sphinx/ext/intersphinx.py @@ -62,6 +62,37 @@ logger = logging.getLogger(__name__) UTF8StreamReader = codecs.lookup('utf-8')[2] +class InventoryAdapter(object): + """Inventory adapter for environment""" + + def __init__(self, env): + self.env = env + + if not hasattr(env, 'intersphinx_cache'): + self.env.intersphinx_cache = {} # type: ignore + self.env.intersphinx_inventory = {} # type: ignore + self.env.intersphinx_named_inventory = {} # type: ignore + + @property + def cache(self): + # type: () -> Dict[unicode, Tuple[unicode, int, Inventory]] + return self.env.intersphinx_cache # type: ignore + + @property + def main_inventory(self): + # type: () -> Inventory + return self.env.intersphinx_inventory # type: ignore + + @property + def named_inventory(self): + # type: () -> Dict[unicode, Inventory] + return self.env.intersphinx_named_inventory + + def clear(self): + self.env.intersphinx_inventory.clear() + self.env.intersphinx_named_inventory.clear() + + def read_inventory_v1(f, uri, join): # type: (IO, unicode, Callable) -> Inventory f = UTF8StreamReader(f) @@ -262,12 +293,7 @@ def load_mappings(app): """Load all intersphinx mappings into the environment.""" now = int(time.time()) cache_time = now - app.config.intersphinx_cache_limit * 86400 - env = app.builder.env - if not hasattr(env, 'intersphinx_cache'): - env.intersphinx_cache = {} # type: ignore - env.intersphinx_inventory = {} # type: ignore - env.intersphinx_named_inventory = {} # type: ignore - cache = env.intersphinx_cache # type: ignore + inventories = InventoryAdapter(app.builder.env) update = False for key, value in iteritems(app.config.intersphinx_mapping): name = None # type: unicode @@ -296,19 +322,19 @@ def load_mappings(app): inv = posixpath.join(uri, INVENTORY_FILENAME) # decide whether the inventory must be read: always read local # files; remote ones only if the cache time is expired - if '://' not in inv or uri not in cache \ - or cache[uri][1] < cache_time: + if '://' not in inv or uri not in inventories.cache \ + or inventories.cache[uri][1] < cache_time: safe_inv_url = _get_safe_url(inv) # type: ignore logger.info('loading intersphinx inventory from %s...', safe_inv_url) invdata = fetch_inventory(app, uri, inv) if invdata: - cache[uri] = (name, now, invdata) + inventories.cache[uri] = (name, now, invdata) update = True break if update: - env.intersphinx_inventory = {} # type: ignore - env.intersphinx_named_inventory = {} # type: ignore + inventories.clear() + # Duplicate values in different inventories will shadow each # other; which one will override which can vary between builds # since they are specified using an unordered dict. To make @@ -316,21 +342,21 @@ def load_mappings(app): # add the unnamed inventories last. This means that the # unnamed inventories will shadow the named ones but the named # ones can still be accessed when the name is specified. - cached_vals = list(cache.values()) + cached_vals = list(inventories.cache.values()) named_vals = sorted(v for v in cached_vals if v[0]) unnamed_vals = [v for v in cached_vals if not v[0]] for name, _x, invdata in named_vals + unnamed_vals: if name: - env.intersphinx_named_inventory[name] = invdata # type: ignore + inventories.named_inventory[name] = invdata for type, objects in iteritems(invdata): - env.intersphinx_inventory.setdefault( # type: ignore - type, {}).update(objects) + inventories.main_inventory.setdefault(type, {}).update(objects) def missing_reference(app, env, node, contnode): # type: (Sphinx, BuildEnvironment, nodes.Node, nodes.Node) -> None """Attempt to resolve a missing reference via intersphinx references.""" target = node['reftarget'] + inventories = InventoryAdapter(env) objtypes = None # type: List[unicode] if node['reftype'] == 'any': # we search anything! @@ -347,14 +373,14 @@ def missing_reference(app, env, node, contnode): if not objtypes: return objtypes = ['%s:%s' % (domain, objtype) for objtype in objtypes] - to_try = [(env.intersphinx_inventory, target)] # type: ignore + to_try = [(inventories.main_inventory, target)] in_set = None if ':' in target: # first part may be the foreign doc set name setname, newtarget = target.split(':', 1) - if setname in env.intersphinx_named_inventory: # type: ignore + if setname in inventories.named_inventory: in_set = setname - to_try.append((env.intersphinx_named_inventory[setname], newtarget)) # type: ignore # NOQA + to_try.append((inventories.named_inventory[setname], newtarget)) for inventory, target in to_try: for objtype in objtypes: if objtype not in inventory or target not in inventory[objtype]: From 029e9908c0114fe74df5c1cbf3342dd5205bcac5 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Mon, 6 Feb 2017 12:03:32 +0900 Subject: [PATCH 164/190] Remove unused "type: ignore" --- mypy.ini | 1 + sphinx/builders/__init__.py | 2 +- sphinx/builders/devhelp.py | 2 +- sphinx/builders/html.py | 2 +- sphinx/config.py | 7 +++---- sphinx/domains/cpp.py | 2 +- sphinx/domains/python.py | 2 +- sphinx/domains/std.py | 4 ++-- sphinx/ext/autodoc.py | 4 ++-- sphinx/ext/doctest.py | 2 +- sphinx/ext/ifconfig.py | 2 +- sphinx/ext/imgmath.py | 2 +- sphinx/ext/intersphinx.py | 10 +++++----- sphinx/ext/napoleon/__init__.py | 5 ++--- sphinx/ext/napoleon/docstring.py | 2 +- sphinx/ext/pngmath.py | 2 +- sphinx/jinja2glue.py | 2 +- sphinx/search/__init__.py | 2 +- sphinx/util/__init__.py | 2 +- sphinx/util/i18n.py | 2 +- sphinx/writers/texinfo.py | 2 +- 21 files changed, 30 insertions(+), 31 deletions(-) diff --git a/mypy.ini b/mypy.ini index 17ded7ab8..f7b58a613 100644 --- a/mypy.ini +++ b/mypy.ini @@ -4,3 +4,4 @@ silent_imports = True fast_parser = True incremental = True check_untyped_defs = True +warn_unused_ignores = True diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index dbeca1a34..be86b7cb1 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -398,7 +398,7 @@ class Builder(object): self.write_doc(docname, doctree) # warm up caches/compile templates using the first document - firstname, docnames = docnames[0], docnames[1:] # type: ignore + firstname, docnames = docnames[0], docnames[1:] doctree = self.env.get_and_resolve_doctree(firstname, self) self.write_doc_serialized(firstname, doctree) self.write_doc(firstname, doctree) diff --git a/sphinx/builders/devhelp.py b/sphinx/builders/devhelp.py index 031c55184..36a604188 100644 --- a/sphinx/builders/devhelp.py +++ b/sphinx/builders/devhelp.py @@ -121,7 +121,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder): link=ref[1]) if subitems: - parent_title = re.sub(r'\s*\(.*\)\s*$', '', title) # type: ignore + parent_title = re.sub(r'\s*\(.*\)\s*$', '', title) for subitem in subitems: write_index("%s %s" % (parent_title, subitem[0]), subitem[1], []) diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index 062c56669..46daef97c 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -191,7 +191,7 @@ class StandaloneHTMLBuilder(Builder): else: self.translator_class = HTMLTranslator - def get_outdated_docs(self): # type: ignore + def get_outdated_docs(self): # type: () -> Iterator[unicode] cfgdict = dict((confval.name, confval.value) for confval in self.config.filter('html')) self.config_hash = get_stable_hash(cfgdict) diff --git a/sphinx/config.py b/sphinx/config.py index f5867f40f..12f206f96 100644 --- a/sphinx/config.py +++ b/sphinx/config.py @@ -171,8 +171,7 @@ class Config(object): if getenv('SOURCE_DATE_EPOCH') is not None: for k in ('copyright', 'epub_copyright'): if k in config: - config[k] = copyright_year_re.sub('\g<1>%s' % format_date('%Y'), # type: ignore # NOQA - config[k]) + config[k] = copyright_year_re.sub('\g<1>%s' % format_date('%Y'), config[k]) def check_types(self): # type: () -> None @@ -221,7 +220,7 @@ class Config(object): # check all string values for non-ASCII characters in bytestrings, # since that can result in UnicodeErrors all over the place for name, value in iteritems(self._raw_config): - if isinstance(value, binary_type) and nonascii_re.search(value): # type: ignore + if isinstance(value, binary_type) and nonascii_re.search(value): logger.warning('the config value %r is set to a string with non-ASCII ' 'characters; this can lead to Unicode errors occurring. ' 'Please use Unicode strings, e.g. %r.', name, u'Content') @@ -272,7 +271,7 @@ class Config(object): try: if '.' in valname: realvalname, key = valname.split('.', 1) - config.setdefault(realvalname, {})[key] = value # type: ignore + config.setdefault(realvalname, {})[key] = value continue elif valname not in self.values: logger.warning('unknown config value %r in override, ignoring', valname) diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py index aa26df405..dd6b23b3e 100644 --- a/sphinx/domains/cpp.py +++ b/sphinx/domains/cpp.py @@ -4544,7 +4544,7 @@ class CPPObject(ObjectDescription): # type: (Any) -> Any raise NotImplementedError() - def describe_signature(self, signode, ast, options): # type: ignore + def describe_signature(self, signode, ast, options): # type: (addnodes.desc_signature, Any, Dict) -> None ast.describe_signature(signode, 'lastIsName', self.env, options) diff --git a/sphinx/domains/python.py b/sphinx/domains/python.py index 688062be6..2467e85ef 100644 --- a/sphinx/domains/python.py +++ b/sphinx/domains/python.py @@ -191,7 +191,7 @@ class PyObject(ObjectDescription): """ return False - def handle_signature(self, sig, signode): # type: ignore + def handle_signature(self, sig, signode): # type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode] """Transform a Python signature into RST nodes. diff --git a/sphinx/domains/std.py b/sphinx/domains/std.py index 300fc9c19..1c064ee65 100644 --- a/sphinx/domains/std.py +++ b/sphinx/domains/std.py @@ -776,8 +776,8 @@ class StandardDomain(Domain): docname, labelid = self.data['progoptions'].get((progname, target), ('', '')) if not docname: commands = [] - while ws_re.search(target): # type: ignore - subcommand, target = ws_re.split(target, 1) # type: ignore + while ws_re.search(target): + subcommand, target = ws_re.split(target, 1) commands.append(subcommand) progname = "-".join(commands) diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index 01ced26de..cab900421 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -1538,11 +1538,11 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): # type: # to distinguish classmethod/staticmethod obj = self.parent.__dict__.get(self.object_name) - if isinstance(obj, classmethod): # type: ignore + if isinstance(obj, classmethod): self.directivetype = 'classmethod' # document class and static members before ordinary ones self.member_order = self.member_order - 1 - elif isinstance(obj, staticmethod): # type: ignore + elif isinstance(obj, staticmethod): self.directivetype = 'staticmethod' # document class and static members before ordinary ones self.member_order = self.member_order - 1 diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index 984642312..a918a925c 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -128,7 +128,7 @@ class TestDirective(Directive): option_strings = self.options['options'].replace(',', ' ').split() for option in option_strings: prefix, option_name = option[0], option[1:] - if prefix not in '+-': # type: ignore + if prefix not in '+-': self.state.document.reporter.warning( _("missing '+' or '-' in '%s' option.") % option, line=self.lineno) diff --git a/sphinx/ext/ifconfig.py b/sphinx/ext/ifconfig.py index 297e476f5..430cc9325 100644 --- a/sphinx/ext/ifconfig.py +++ b/sphinx/ext/ifconfig.py @@ -62,7 +62,7 @@ def process_ifconfig_nodes(app, doctree, docname): ns['builder'] = app.builder.name for node in doctree.traverse(ifconfig): try: - res = eval(node['expr'], ns) # type: ignore + res = eval(node['expr'], ns) except Exception as err: # handle exceptions in a clean fashion from traceback import format_exception_only diff --git a/sphinx/ext/imgmath.py b/sphinx/ext/imgmath.py index 168862d28..7b817d615 100644 --- a/sphinx/ext/imgmath.py +++ b/sphinx/ext/imgmath.py @@ -128,7 +128,7 @@ def render_math(self, math): else: tempdir = self.builder._imgmath_tempdir - with codecs.open(path.join(tempdir, 'math.tex'), 'w', 'utf-8') as tf: # type: ignore + with codecs.open(path.join(tempdir, 'math.tex'), 'w', 'utf-8') as tf: tf.write(latex) # build latex command; old versions of latex don't have the diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py index 818fdf68a..5493e6782 100644 --- a/sphinx/ext/intersphinx.py +++ b/sphinx/ext/intersphinx.py @@ -69,19 +69,19 @@ class InventoryAdapter(object): self.env = env if not hasattr(env, 'intersphinx_cache'): - self.env.intersphinx_cache = {} # type: ignore - self.env.intersphinx_inventory = {} # type: ignore - self.env.intersphinx_named_inventory = {} # type: ignore + self.env.intersphinx_cache = {} + self.env.intersphinx_inventory = {} + self.env.intersphinx_named_inventory = {} @property def cache(self): # type: () -> Dict[unicode, Tuple[unicode, int, Inventory]] - return self.env.intersphinx_cache # type: ignore + return self.env.intersphinx_cache @property def main_inventory(self): # type: () -> Inventory - return self.env.intersphinx_inventory # type: ignore + return self.env.intersphinx_inventory @property def named_inventory(self): diff --git a/sphinx/ext/napoleon/__init__.py b/sphinx/ext/napoleon/__init__.py index f6fccac7d..118316f07 100644 --- a/sphinx/ext/napoleon/__init__.py +++ b/sphinx/ext/napoleon/__init__.py @@ -290,8 +290,7 @@ def setup(app): """ if not isinstance(app, Sphinx): - return # type: ignore - # probably called by tests + return # probably called by tests _patch_python_domain() @@ -311,7 +310,7 @@ def _patch_python_domain(): pass else: import sphinx.domains.python - import sphinx.locale # type: ignore + import sphinx.locale l_ = sphinx.locale.lazy_gettext for doc_field in sphinx.domains.python.PyObject.doc_field_types: if doc_field.name == 'parameter': diff --git a/sphinx/ext/napoleon/docstring.py b/sphinx/ext/napoleon/docstring.py index 6fee87b34..c5736b8ec 100644 --- a/sphinx/ext/napoleon/docstring.py +++ b/sphinx/ext/napoleon/docstring.py @@ -128,7 +128,7 @@ class GoogleDocstring(UnicodeMixin): self._obj = obj self._opt = options if isinstance(docstring, string_types): - docstring = docstring.splitlines() # type: ignore + docstring = docstring.splitlines() self._lines = docstring self._line_iter = modify_iter(docstring, modifier=lambda s: s.rstrip()) self._parsed_lines = [] # type: List[unicode] diff --git a/sphinx/ext/pngmath.py b/sphinx/ext/pngmath.py index 0bc941ee2..98a4d2513 100644 --- a/sphinx/ext/pngmath.py +++ b/sphinx/ext/pngmath.py @@ -119,7 +119,7 @@ def render_math(self, math): else: tempdir = self.builder._mathpng_tempdir - with codecs.open(path.join(tempdir, 'math.tex'), 'w', 'utf-8') as tf: # type: ignore + with codecs.open(path.join(tempdir, 'math.tex'), 'w', 'utf-8') as tf: tf.write(latex) # build latex command; old versions of latex don't have the diff --git a/sphinx/jinja2glue.py b/sphinx/jinja2glue.py index c1bd04765..6ebb1353f 100644 --- a/sphinx/jinja2glue.py +++ b/sphinx/jinja2glue.py @@ -33,7 +33,7 @@ if False: def _tobool(val): # type: (unicode) -> bool if isinstance(val, string_types): - return val.lower() in ('true', '1', 'yes', 'on') # type: ignore + return val.lower() in ('true', '1', 'yes', 'on') return bool(val) diff --git a/sphinx/search/__init__.py b/sphinx/search/__init__.py index aff6aec34..658cf2f70 100644 --- a/sphinx/search/__init__.py +++ b/sphinx/search/__init__.py @@ -315,7 +315,7 @@ class IndexBuilder(object): """Dump the frozen index to a stream.""" if isinstance(format, string_types): format = self.formats[format] # type: ignore - format.dump(self.freeze(), stream) # type: ignore + format.dump(self.freeze(), stream) def get_objects(self, fn2index): # type: (Dict[unicode, int]) -> Dict[unicode, Dict[unicode, Tuple[int, int, int, unicode]]] # NOQA diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index 5fb42b9d5..3b1f076a9 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -207,7 +207,7 @@ def save_traceback(app): import platform exc = sys.exc_info()[1] if isinstance(exc, SphinxParallelError): - exc_format = '(Error in parallel process)\n' + exc.traceback # type: ignore + exc_format = '(Error in parallel process)\n' + exc.traceback else: exc_format = traceback.format_exc() fd, path = tempfile.mkstemp('.log', 'sphinx-err-') diff --git a/sphinx/util/i18n.py b/sphinx/util/i18n.py index e4f1d95f5..bb6896080 100644 --- a/sphinx/util/i18n.py +++ b/sphinx/util/i18n.py @@ -95,7 +95,7 @@ def find_catalog_files(docname, srcdir, locale_dirs, lang, compaction): domain = find_catalog(docname, compaction) files = [gettext.find(domain, path.join(srcdir, dir_), [lang]) # type: ignore - for dir_ in locale_dirs] # type: ignore + for dir_ in locale_dirs] files = [path.relpath(f, srcdir) for f in files if f] # type: ignore return files # type: ignore diff --git a/sphinx/writers/texinfo.py b/sphinx/writers/texinfo.py index 07a0e1e83..741009995 100644 --- a/sphinx/writers/texinfo.py +++ b/sphinx/writers/texinfo.py @@ -248,7 +248,7 @@ class TexinfoTranslator(nodes.NodeVisitor): title = None # type: unicode title = elements['title'] # type: ignore if not title: - title = self.document.next_node(nodes.title) # type: ignore + title = self.document.next_node(nodes.title) title = (title and title.astext()) or '<untitled>' # type: ignore elements['title'] = self.escape_id(title) or '<untitled>' # filename From 1be5282c776ab6f286dafe803b8b50babc88849d Mon Sep 17 00:00:00 2001 From: Stefan Scherfke <stefan.scherfke@energymeteo.de> Date: Mon, 6 Feb 2017 10:03:22 +0100 Subject: [PATCH 165/190] Add "--color" option to sphinx-build (fixes issue #3248). --- sphinx/cmdline.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/sphinx/cmdline.py b/sphinx/cmdline.py index 7a97e10e2..0662da14c 100644 --- a/sphinx/cmdline.py +++ b/sphinx/cmdline.py @@ -116,9 +116,6 @@ def handle_exception(app, opts, exception, stderr=sys.stderr): def main(argv): # type: (List[unicode]) -> int - if not color_terminal(): - nocolor() - parser = optparse.OptionParser(USAGE, epilog=EPILOG, formatter=MyFormatter()) parser.add_option('--version', action='store_true', dest='version', help='show version information and exit') @@ -167,8 +164,12 @@ def main(argv): help='no output on stdout, just warnings on stderr') group.add_option('-Q', action='store_true', dest='really_quiet', help='no output at all, not even warnings') + group.add_option('--color', type='choice', action='store', default='auto', + choices=['yes', 'no', 'auto'], + help='color terminal output (yes/no/auto)') group.add_option('-N', action='store_true', dest='nocolor', - help='do not emit colored output') + help='do not emit colored output (deprecated, ' + 'please use "--color=no")') group.add_option('-w', metavar='FILE', dest='warnfile', help='write warnings (and errors) to given file') group.add_option('-W', action='store_true', dest='warningiserror', @@ -239,6 +240,8 @@ def main(argv): return 1 if opts.nocolor: + opts.color = 'no' + if opts.color == 'no' or (opts.color == 'auto' and not color_terminal()): nocolor() doctreedir = abspath(opts.doctreedir or path.join(outdir, '.doctrees')) From 577e842532022c1d8bf5f18ed08759f85d3601db Mon Sep 17 00:00:00 2001 From: Stefan Scherfke <stefan.scherfke@energymeteo.de> Date: Tue, 7 Feb 2017 14:12:18 +0100 Subject: [PATCH 166/190] Use "--color" and "--no-color" options --- sphinx/cmdline.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/sphinx/cmdline.py b/sphinx/cmdline.py index 0662da14c..70828c635 100644 --- a/sphinx/cmdline.py +++ b/sphinx/cmdline.py @@ -164,12 +164,12 @@ def main(argv): help='no output on stdout, just warnings on stderr') group.add_option('-Q', action='store_true', dest='really_quiet', help='no output at all, not even warnings') - group.add_option('--color', type='choice', action='store', default='auto', - choices=['yes', 'no', 'auto'], - help='color terminal output (yes/no/auto)') - group.add_option('-N', action='store_true', dest='nocolor', - help='do not emit colored output (deprecated, ' - 'please use "--color=no")') + group.add_option('--color', dest='color', + action='store_const', const='yes', default='auto', + help='Do emit colored output (default: auto-detect)') + group.add_option('-N', '--no-color', dest='color', + action='store_const', const='no', + help='Do not emit colored output (default: auot-detect)') group.add_option('-w', metavar='FILE', dest='warnfile', help='write warnings (and errors) to given file') group.add_option('-W', action='store_true', dest='warningiserror', @@ -239,8 +239,6 @@ def main(argv): print('Error: Cannot combine -a option and filenames.', file=sys.stderr) return 1 - if opts.nocolor: - opts.color = 'no' if opts.color == 'no' or (opts.color == 'auto' and not color_terminal()): nocolor() From 8b2c92d54f0132cd5f3f87fb1a0a734820ecc70d Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sun, 5 Feb 2017 00:23:35 +0900 Subject: [PATCH 167/190] latex: Refactor spanning cells --- sphinx/writers/latex.py | 155 +++++++++++++++---------- tests/roots/test-latex-table/index.rst | 17 +++ tests/test_build_latex.py | 17 +++ 3 files changed, 126 insertions(+), 63 deletions(-) diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 5858ad85e..706d00e01 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -15,6 +15,7 @@ import re import sys from os import path +from collections import defaultdict from six import itervalues, text_type from docutils import nodes, writers @@ -321,17 +322,19 @@ class Table(object): self.body = [] # type: List[unicode] self.classes = node.get('classes', []) # type: List[unicode] self.col = 0 + self.row = 0 self.colcount = 0 self.colspec = None # type: unicode self.colwidths = [] # type: List[int] - self.rowcount = 0 self.has_problematic = False self.has_verbatim = False self.caption = None # type: List[unicode] + self.cells = defaultdict(int) # type: Dict[Tuple[int, int], int] + self.cell_id = 0 def is_longtable(self): # type: () -> bool - return self.rowcount > 30 or 'longtable' in self.classes + return self.row > 30 or 'longtable' in self.classes def get_table_type(self): # type: () -> unicode @@ -361,6 +364,52 @@ class Table(object): else: return '{|' + ('l|' * self.colcount) + '}\n' + def add_cell(self, height, width): + self.cell_id += 1 + for col in range(width): + for row in range(height): + assert self.cells[(self.row + row, self.col + col)] == 0 + self.cells[(self.row + row, self.col + col)] = self.cell_id + + def cell(self, row=None, col=None): + try: + if row is None: + row = self.row + if col is None: + col = self.col + return TableCell(self, row, col) + except IndexError: + return None + + +class TableCell(object): + def __init__(self, table, row, col): + if table.cells[(row, col)] == 0: + raise IndexError + + self.table = table + self.cell_id = table.cells[(row, col)] + for n in range(row + 1): + if table.cells[(row - n, col)] == self.cell_id: + self.row = row - n + for n in range(col + 1): + if table.cells[(row, col - n)] == self.cell_id: + self.col = col - n + + @property + def width(self): + width = 0 + while self.table.cells[(self.row, self.col + width)] == self.cell_id: + width += 1 + return width + + @property + def height(self): + height = 0 + while self.table.cells[(self.row + height, self.col)] == self.cell_id: + height += 1 + return height + def escape_abbr(text): # type: (unicode) -> unicode @@ -417,8 +466,6 @@ class LaTeXTranslator(nodes.NodeVisitor): self.in_parsed_literal = 0 self.compact_list = 0 self.first_param = 0 - self.remember_multirow = {} # type: Dict[int, int] - self.remember_multirowcol = {} # type: Dict[int, int] # determine top section level if builder.config.latex_toplevel_sectioning: @@ -1254,72 +1301,53 @@ class LaTeXTranslator(nodes.NodeVisitor): def depart_tbody(self, node): # type: (nodes.Node) -> None self.popbody() - self.remember_multirow = {} - self.remember_multirowcol = {} def visit_row(self, node): # type: (nodes.Node) -> None self.table.col = 0 - for key, value in self.remember_multirow.items(): - if not value and key in self.remember_multirowcol: - del self.remember_multirowcol[key] + + # fill column if first one is a wide-multirow + cell = self.table.cell(self.table.row, 0) + if cell and cell.row != self.table.row: # bottom part of multirow cell + self.table.col += cell.width + if cell.width > 1: # use \multicolumn for wide multirow cell + self.body.append('\\multicolumn{%d}{|l|}{}\\relax ' % cell.width) def depart_row(self, node): # type: (nodes.Node) -> None self.body.append('\\\\\n') - if any(self.remember_multirow.values()): - linestart = 1 - col = self.table.colcount - for col in range(1, self.table.col + 1): - if self.remember_multirow.get(col): - if linestart != col: - linerange = str(linestart) + '-' + str(col - 1) - self.body.append('\\cline{' + linerange + '}') - linestart = col + 1 - if self.remember_multirowcol.get(col, 0): - linestart += self.remember_multirowcol[col] - if linestart <= col: - linerange = str(linestart) + '-' + str(col) - self.body.append('\\cline{' + linerange + '}') - else: + cells = [self.table.cell(self.table.row, i) for i in range(self.table.colcount)] + underlined = [cell.row + cell.height == self.table.row + 1 for cell in cells] + if all(underlined): self.body.append('\\hline') - self.table.rowcount += 1 + else: + i = 0 + underlined.extend([False]) # sentinel + while i < len(underlined): + if underlined[i] is True: + j = underlined[i:].index(False) + self.body.append('\\cline{%d-%d}' % (i + 1, i + j)) + i += j + i += 1 + self.table.row += 1 def visit_entry(self, node): # type: (nodes.Node) -> None - if self.table.col == 0: - while self.remember_multirow.get(self.table.col + 1, 0): - self.table.col += 1 - self.remember_multirow[self.table.col] -= 1 - if self.remember_multirowcol.get(self.table.col, 0): - extracols = self.remember_multirowcol[self.table.col] - self.body.append('\\multicolumn{') - self.body.append(str(extracols + 1)) - self.body.append('}{|l|}{}\\relax ') - self.table.col += extracols - self.body.append('&') - else: + if self.table.col > 0: self.body.append('&') - self.table.col += 1 + self.table.add_cell(node.get('morerows', 0) + 1, node.get('morecols', 0) + 1) + cell = self.table.cell() context = '' - if 'morecols' in node: - self.body.append('\\multicolumn{') - self.body.append(str(node.get('morecols') + 1)) - if self.table.col == 1: - self.body.append('}{|l|}{\\relax ') + if cell.width > 1: + self.body.append('\\multicolumn{%d}' % cell.width) + if self.table.col == 0: + self.body.append('{|l|}{\\relax ') else: - self.body.append('}{l|}{\\relax ') + self.body.append('{l|}{\\relax ') context += '\\unskip}\\relax ' - if 'morerows' in node: - self.body.append('\\multirow{') - self.body.append(str(node.get('morerows') + 1)) - self.body.append('}{*}{\\relax ') + if cell.height > 1: + self.body.append('\\multirow{%d}{*}{\\relax ' % cell.height) context += '\\unskip}\\relax ' - self.remember_multirow[self.table.col] = node.get('morerows') - if 'morecols' in node: - if 'morerows' in node: - self.remember_multirowcol[self.table.col] = node.get('morecols') - self.table.col += node.get('morecols') if (('morecols' in node or 'morerows' in node) and (len(node) > 2 or len(node.astext().split('\n')) > 2)): self.in_merged_cell = 1 @@ -1333,16 +1361,6 @@ class LaTeXTranslator(nodes.NodeVisitor): else: self.body.append('\\sphinxstylethead{\\relax ') context += '\\unskip}\\relax ' - while self.remember_multirow.get(self.table.col + 1, 0): - self.table.col += 1 - self.remember_multirow[self.table.col] -= 1 - context += '&' - if self.remember_multirowcol.get(self.table.col, 0): - extracols = self.remember_multirowcol[self.table.col] - context += '\\multicolumn{' - context += str(extracols + 1) - context += '}{l|}{}\\relax ' - self.table.col += extracols if len(node.traverse(nodes.paragraph)) >= 2: self.table.has_problematic = True self.context.append(context) @@ -1361,6 +1379,17 @@ class LaTeXTranslator(nodes.NodeVisitor): self.body.append(line) self.body.append(self.context.pop()) # header + cell = self.table.cell() + self.table.col += cell.width + + # fill column if next one is a wide-multirow + nextcell = self.table.cell() + if nextcell and nextcell.row != self.table.row: # bottom part of multirow cell + self.table.col += nextcell.width + self.body.append('&') + if nextcell.width > 1: # use \multicolumn for wide multirow cell + self.body.append('\\multicolumn{%d}{l|}{}\\relax ' % nextcell.width) + def visit_acks(self, node): # type: (nodes.Node) -> None # this is a list in the source, but should be rendered as a diff --git a/tests/roots/test-latex-table/index.rst b/tests/roots/test-latex-table/index.rst index ae461df2d..129d024a0 100644 --- a/tests/roots/test-latex-table/index.rst +++ b/tests/roots/test-latex-table/index.rst @@ -12,6 +12,23 @@ cell2-1 cell2-2 cell3-1 cell3-2 ======= ======= +grid table +---------- + ++---------+---------+---------+ +| header1 | header2 | header3 | ++=========+=========+=========+ +| cell1-1 | cell1-2 | cell1-3 | ++---------+ +---------+ +| cell2-1 | | cell2-2 | ++ +---------+---------+ +| | cell3-2 | ++---------+ | +| cell4-1 | | ++---------+---------+---------+ +| cell5-1 | ++---------+---------+---------+ + table having :widths: option ---------------------------- diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py index 1c1cd3928..17fa61957 100644 --- a/tests/test_build_latex.py +++ b/tests/test_build_latex.py @@ -839,6 +839,23 @@ def test_latex_table(app, status, warning): assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) assert ('\\hline\n\\end{tabulary}' in table) + # grid table + table = tables['grid table'] + assert ('\\noindent\\begin{tabulary}{\\linewidth}{|L|L|L|}' in table) + assert ('\\hline\n' + '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' + '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax &' + '\\sphinxstylethead{\\relax \nheader3\n\\unskip}\\relax \\\\' in table) + assert ('\\hline\ncell1-1\n&\\multirow{2}{*}{\\relax \ncell1-2\n\\unskip}\\relax &\n' + 'cell1-3\n\\\\' in table) + assert ('\\cline{1-1}\\cline{3-3}\\multirow{2}{*}{\\relax \ncell2-1\n\\unskip}\\relax &&\n' + 'cell2-2\n\\\\' in table) + assert ('\\cline{2-3}&\\multicolumn{2}{l|}{\\relax \\multirow{2}{*}{\\relax \n' + 'cell3-2\n\\unskip}\\relax \\unskip}\\relax \\\\' in table) + assert ('\\cline{1-1}\ncell4-1\n&\\multicolumn{2}{l|}{}\\relax \\\\' in table) + assert ('\\hline\\multicolumn{3}{|l|}{\\relax \ncell5-1\n\\unskip}\\relax \\\\\n' + '\\hline\n\\end{tabulary}' in table) + # table having :widths: option table = tables['table having :widths: option'] assert ('\\noindent\\begin{tabular}{|\\X{30}{100}|\\X{70}{100}|}' in table) From ee409e9b7fb6e407fc9d9cd2a682223058dab2d7 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 7 Feb 2017 22:11:55 +0900 Subject: [PATCH 168/190] Refactor: make TableCell.__init__() simple --- sphinx/writers/latex.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 706d00e01..d03e610e3 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -389,12 +389,14 @@ class TableCell(object): self.table = table self.cell_id = table.cells[(row, col)] - for n in range(row + 1): - if table.cells[(row - n, col)] == self.cell_id: - self.row = row - n - for n in range(col + 1): - if table.cells[(row, col - n)] == self.cell_id: - self.col = col - n + self.row = row + self.col = col + + # adjust position for multirow/multicol cell + while table.cells[(self.row - 1, self.col)] == self.cell_id: + self.row -= 1 + while table.cells[(self.row, self.col - 1)] == self.cell_id: + self.col -= 1 @property def width(self): From 44e845de230dd9ed253e2ce7b9da4eba6ae1a24f Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 7 Feb 2017 22:51:03 +0900 Subject: [PATCH 169/190] Add comments --- sphinx/writers/latex.py | 44 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index d03e610e3..c5961d4b3 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -316,27 +316,44 @@ class ShowUrlsTransform(object): class Table(object): + """A table data""" + def __init__(self, node): # type: (nodes.table) -> None self.header = [] # type: List[unicode] self.body = [] # type: List[unicode] - self.classes = node.get('classes', []) # type: List[unicode] - self.col = 0 - self.row = 0 self.colcount = 0 self.colspec = None # type: unicode self.colwidths = [] # type: List[int] self.has_problematic = False self.has_verbatim = False self.caption = None # type: List[unicode] + + # current position + self.col = 0 + self.row = 0 + + # for internal use + self.classes = node.get('classes', []) # type: List[unicode] self.cells = defaultdict(int) # type: Dict[Tuple[int, int], int] - self.cell_id = 0 + # it maps table location to cell_id + # (cell = rectangular area) + self.cell_id = 0 # last assigned cell_id def is_longtable(self): + """True if and only if table uses longtable environment.""" # type: () -> bool return self.row > 30 or 'longtable' in self.classes def get_table_type(self): + """Returns the LaTeX environment name for the table. + + The class currently supports: + + * longtable + * tabular + * taburary + """ # type: () -> unicode if self.is_longtable(): return 'longtable' @@ -350,6 +367,12 @@ class Table(object): return 'tabulary' def get_colspec(self): + """Returns a column spec of table. + + This is what LaTeX calls the 'preamble argument' of the used table environment. + + .. note:: the ``\X`` column type specifier is defined in ``sphinx.sty``. + """ # type: () -> unicode if self.colspec: return self.colspec @@ -365,6 +388,10 @@ class Table(object): return '{|' + ('l|' * self.colcount) + '}\n' def add_cell(self, height, width): + """Adds a new cell to a table. + + It will be located at current position: (``self.row``, ``self.col``). + """ self.cell_id += 1 for col in range(width): for row in range(height): @@ -372,6 +399,11 @@ class Table(object): self.cells[(self.row + row, self.col + col)] = self.cell_id def cell(self, row=None, col=None): + """Returns a cell object (i.e. rectangular area) containing given position: (``row``, ``col``) + + If no ``row`` or ``col`` are given, the current position; ``self.row`` and + ``self.col`` are used to get a cell object by default. + """ try: if row is None: row = self.row @@ -383,6 +415,8 @@ class Table(object): class TableCell(object): + """A cell data of tables.""" + def __init__(self, table, row, col): if table.cells[(row, col)] == 0: raise IndexError @@ -400,6 +434,7 @@ class TableCell(object): @property def width(self): + """Returns the cell width.""" width = 0 while self.table.cells[(self.row, self.col + width)] == self.cell_id: width += 1 @@ -407,6 +442,7 @@ class TableCell(object): @property def height(self): + """Returns the cell height.""" height = 0 while self.table.cells[(self.row + height, self.col)] == self.cell_id: height += 1 From 141cb9e9543919d527e5ecc7b51a45e7db3bdfa0 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 8 Feb 2017 00:38:08 +0900 Subject: [PATCH 170/190] Fix flake8 violation --- sphinx/writers/latex.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index c5961d4b3..6c4973f03 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -399,10 +399,10 @@ class Table(object): self.cells[(self.row + row, self.col + col)] = self.cell_id def cell(self, row=None, col=None): - """Returns a cell object (i.e. rectangular area) containing given position: (``row``, ``col``) + """Returns a cell object (i.e. rectangular area) containing given position. - If no ``row`` or ``col`` are given, the current position; ``self.row`` and - ``self.col`` are used to get a cell object by default. + If no option arguments: ``row`` or ``col`` are given, the current position; + ``self.row`` and ``self.col`` are used to get a cell object by default. """ try: if row is None: From 6ae8f6ba182702ce0af475d4baa51f29b237c5de Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Tue, 7 Feb 2017 20:03:07 +0100 Subject: [PATCH 171/190] logging module docs --- doc/extdev/logging.rst | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/doc/extdev/logging.rst b/doc/extdev/logging.rst index 60e11469e..50110c2a4 100644 --- a/doc/extdev/logging.rst +++ b/doc/extdev/logging.rst @@ -5,11 +5,11 @@ Logging API .. function:: sphinx.util.logging.getLogger(name) - Return a logger wrapped by :class:`SphinxLoggerAdapter` with the specified *name*. + Returns a logger wrapped by :class:`SphinxLoggerAdapter` with the specified *name*. Example usage:: - from sphinx.util import logging # Load instead python's logging module + from sphinx.util import logging # Load on top of python's logging module logger = logging.getLogger(__name__) logger.info('Hello, this is an extension!') @@ -20,18 +20,18 @@ Logging API .. method:: SphinxLoggerAdapter.critical(level, msg, *args, **kwargs) .. method:: SphinxLoggerAdapter.warning(level, msg, *args, **kwargs) - Logs a message with specified level on this logger. - Basically, the arguments are same as python's logging module. + Logs a message on this logger with the specified level. + Basically, the arguments are as with python's logging module. In addition, Sphinx logger supports following keyword arguments: **type**, ***subtype*** - Indicate categories of warning logs. It is used to suppress + Categories of warning logs. It is used to suppress warnings by :confval:`suppress_warnings` setting. **location** - Indicate where the warning is happened. It is used to show - the path and line number to each log. It allows docname, + Where the warning happened. It is used to include + the path and line number in each log. It allows docname, tuple of docname and line number and nodes:: logger = sphinx.util.logging.getLogger(__name__) @@ -40,7 +40,7 @@ Logging API logger.warning('Warning happened!', location=some_node) **color** - Indicate the color of logs. By default, warning level logs are + The color of logs. By default, warning level logs are colored as ``"darkred"``. The others are not colored. .. method:: SphinxLoggerAdapter.log(level, msg, *args, **kwargs) @@ -48,23 +48,23 @@ Logging API .. method:: SphinxLoggerAdapter.verbose(level, msg, *args, **kwargs) .. method:: SphinxLoggerAdapter.debug(level, msg, *args, **kwargs) - Logs a message with specified level on this logger. - Basically, the arguments are same as python's logging module. + Logs a message to this logger with the specified level. + Basically, the arguments are as with python's logging module. In addition, Sphinx logger supports following keyword arguments: **nonl** - If true, the logger does not fold lines at end of the log message. + If true, the logger does not fold lines at the end of the log message. The default is ``False``. **color** - Indicate the color of logs. By default, debug level logs are - colored as ``"darkgray"``, and debug2 ones are ``"lightgray"``. + The color of logs. By default, debug level logs are + colored as ``"darkgray"``, and debug2 level ones are ``"lightgray"``. The others are not colored. .. function:: pending_logging() - Make all logs as pending while the context:: + Marks all logs as pending:: with pending_logging(): logger.warning('Warning message!') # not flushed yet @@ -74,4 +74,4 @@ Logging API .. function:: pending_warnings() - Make warning logs as pending while the context. Similar to :func:`pending_logging`. + Marks warning logs as pending. Similar to :func:`pending_logging`. From a58ebedffde696e6394e483e510f36be4b34e0d9 Mon Sep 17 00:00:00 2001 From: Jakub Wilk <jwilk@jwilk.net> Date: Tue, 7 Feb 2017 20:14:47 +0100 Subject: [PATCH 172/190] Fix typos --- doc/extdev/appapi.rst | 2 +- doc/intl.rst | 2 +- doc/markup/code.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/extdev/appapi.rst b/doc/extdev/appapi.rst index c02e85933..e19e6f03c 100644 --- a/doc/extdev/appapi.rst +++ b/doc/extdev/appapi.rst @@ -361,7 +361,7 @@ package. .. method:: Sphinx.add_env_collector(collector) - Register a environment collector class (refs: :ref:`collector-api`) + Register an environment collector class (refs: :ref:`collector-api`) .. versionadded:: 1.6 diff --git a/doc/intl.rst b/doc/intl.rst index 97f0e013e..dacced65b 100644 --- a/doc/intl.rst +++ b/doc/intl.rst @@ -74,7 +74,7 @@ Quick guide ^^^^^^^^^^^ `sphinx-intl`_ is a useful tool to work with Sphinx translation flow. -This section describe a easy way to translate with sphinx-intl. +This section describe an easy way to translate with sphinx-intl. #. Install `sphinx-intl`_ by :command:`pip install sphinx-intl` or :command:`easy_install sphinx-intl`. diff --git a/doc/markup/code.rst b/doc/markup/code.rst index c7cb0f911..80191c977 100644 --- a/doc/markup/code.rst +++ b/doc/markup/code.rst @@ -232,7 +232,7 @@ For example:: :rst:dir:`literalinclude` also supports the ``caption`` and ``name`` option. -``caption`` has a additional feature that if you leave the value empty, the shown +``caption`` has an additional feature that if you leave the value empty, the shown filename will be exactly the one given as an argument. From 81eb101e9f8fcee1c439ee0dd501d135eced01c6 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 8 Feb 2017 14:31:59 +0900 Subject: [PATCH 173/190] Fix mypy violations --- sphinx/writers/latex.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 6c4973f03..7b27eb3ab 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -341,11 +341,12 @@ class Table(object): self.cell_id = 0 # last assigned cell_id def is_longtable(self): - """True if and only if table uses longtable environment.""" # type: () -> bool + """True if and only if table uses longtable environment.""" return self.row > 30 or 'longtable' in self.classes def get_table_type(self): + # type: () -> unicode """Returns the LaTeX environment name for the table. The class currently supports: @@ -354,7 +355,6 @@ class Table(object): * tabular * taburary """ - # type: () -> unicode if self.is_longtable(): return 'longtable' elif self.has_verbatim: @@ -367,13 +367,13 @@ class Table(object): return 'tabulary' def get_colspec(self): + # type: () -> unicode """Returns a column spec of table. This is what LaTeX calls the 'preamble argument' of the used table environment. .. note:: the ``\X`` column type specifier is defined in ``sphinx.sty``. """ - # type: () -> unicode if self.colspec: return self.colspec elif self.colwidths and 'colwidths-given' in self.classes: @@ -388,6 +388,7 @@ class Table(object): return '{|' + ('l|' * self.colcount) + '}\n' def add_cell(self, height, width): + # type: (int, int) -> None """Adds a new cell to a table. It will be located at current position: (``self.row``, ``self.col``). @@ -399,6 +400,7 @@ class Table(object): self.cells[(self.row + row, self.col + col)] = self.cell_id def cell(self, row=None, col=None): + # type: (int, int) -> TableCell """Returns a cell object (i.e. rectangular area) containing given position. If no option arguments: ``row`` or ``col`` are given, the current position; @@ -418,6 +420,7 @@ class TableCell(object): """A cell data of tables.""" def __init__(self, table, row, col): + # type: (Table, int, int) -> None if table.cells[(row, col)] == 0: raise IndexError @@ -434,6 +437,7 @@ class TableCell(object): @property def width(self): + # type: () -> int """Returns the cell width.""" width = 0 while self.table.cells[(self.row, self.col + width)] == self.cell_id: @@ -442,6 +446,7 @@ class TableCell(object): @property def height(self): + # type: () -> int """Returns the cell height.""" height = 0 while self.table.cells[(self.row + height, self.col)] == self.cell_id: From 6fa0262802a09050e09445c9fd630c69b5ad1204 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 8 Feb 2017 14:31:59 +0900 Subject: [PATCH 174/190] Fix mypy violations --- sphinx/addnodes.py | 12 ++++++++++++ sphinx/application.py | 2 +- sphinx/builders/__init__.py | 2 ++ sphinx/builders/changes.py | 1 + sphinx/builders/dummy.py | 13 +++++++++++++ sphinx/builders/epub3.py | 26 ++++++++++++++++++++++---- sphinx/builders/html.py | 5 +++++ sphinx/builders/htmlhelp.py | 18 +++++++++++++++++- sphinx/builders/latex.py | 3 +++ sphinx/builders/qthelp.py | 1 + sphinx/builders/text.py | 15 ++++++++++++++- sphinx/builders/websupport.py | 25 ++++++++++++++++++++++--- sphinx/builders/xml.py | 14 +++++++++++++- sphinx/directives/patches.py | 7 +++++++ sphinx/errors.py | 10 ++++++++++ sphinx/ext/autodoc.py | 2 +- sphinx/io.py | 5 ++++- sphinx/parsers.py | 5 +++++ sphinx/pycode/pgen2/pgen.py | 2 +- sphinx/pycode/pgen2/tokenize.py | 4 ++-- sphinx/util/__init__.py | 30 ++++++++++++++++++++++++++---- sphinx/util/compat.py | 6 ++++++ sphinx/util/console.py | 1 + sphinx/util/docfields.py | 7 ++++--- sphinx/util/docstrings.py | 2 ++ sphinx/util/docutils.py | 9 ++++++++- sphinx/util/fileutil.py | 14 +++++++++++--- sphinx/util/images.py | 8 +++++++- sphinx/util/inspect.py | 6 ++++-- sphinx/util/jsdump.py | 5 +++-- sphinx/util/jsonimpl.py | 11 ++++++++++- sphinx/util/logging.py | 12 ++++++++++-- sphinx/util/nodes.py | 1 + sphinx/util/osutil.py | 12 +++++++++--- sphinx/util/png.py | 2 ++ sphinx/util/pycompat.py | 7 +++++-- sphinx/util/requests.py | 12 +++++++++++- sphinx/util/rst.py | 3 ++- sphinx/util/smartypants.py | 14 +++++++++++++- sphinx/util/stemmer/porter.py | 21 +++++++++++++++++++-- sphinx/util/tags.py | 12 ++++++++++++ sphinx/util/template.py | 12 ++++++++++++ sphinx/util/texescape.py | 1 + sphinx/util/websupport.py | 5 +++++ sphinx/writers/latex.py | 2 +- 45 files changed, 341 insertions(+), 46 deletions(-) diff --git a/sphinx/addnodes.py b/sphinx/addnodes.py index c410cb9b7..1cadb27e5 100644 --- a/sphinx/addnodes.py +++ b/sphinx/addnodes.py @@ -11,6 +11,10 @@ from docutils import nodes +if False: + # For type annotation + from typing import Sequence # NOQA + class translatable(object): """Node which supports translation. @@ -27,14 +31,17 @@ class translatable(object): """ def preserve_original_messages(self): + # type: () -> None """Preserve original translatable messages.""" raise NotImplementedError def apply_translated_message(self, original_message, translated_message): + # type: (unicode, unicode) -> None """Apply translated message.""" raise NotImplementedError def extract_original_messages(self): + # type: () -> Sequence[unicode] """Extract translation messages. :returns: list of extracted messages or messages generator @@ -46,14 +53,17 @@ class toctree(nodes.General, nodes.Element, translatable): """Node for inserting a "TOC tree".""" def preserve_original_messages(self): + # type: () -> None if self.get('caption'): self['rawcaption'] = self['caption'] def apply_translated_message(self, original_message, translated_message): + # type: (unicode, unicode) -> None if self.get('rawcaption') == original_message: self['caption'] = translated_message def extract_original_messages(self): + # type: () -> List[unicode] if 'rawcaption' in self: return [self['rawcaption']] else: @@ -106,6 +116,7 @@ class desc_type(nodes.Part, nodes.Inline, nodes.TextElement): class desc_returns(desc_type): """Node for a "returns" annotation (a la -> in Python).""" def astext(self): + # type: () -> unicode return ' -> ' + nodes.TextElement.astext(self) @@ -127,6 +138,7 @@ class desc_optional(nodes.Part, nodes.Inline, nodes.TextElement): child_text_separator = ', ' def astext(self): + # type: () -> unicode return '[' + nodes.TextElement.astext(self) + ']' diff --git a/sphinx/application.py b/sphinx/application.py index d10b89b99..782dccb9a 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -128,7 +128,7 @@ class Sphinx(object): confoverrides=None, status=sys.stdout, warning=sys.stderr, freshenv=False, warningiserror=False, tags=None, verbosity=0, parallel=0): - # type: (unicode, unicode, unicode, unicode, unicode, Dict, IO, IO, bool, bool, unicode, int, int) -> None # NOQA + # type: (unicode, unicode, unicode, unicode, unicode, Dict, IO, IO, bool, bool, List[unicode], int, int) -> None # NOQA self.verbosity = verbosity self.next_listener_id = 0 self._extensions = {} # type: Dict[unicode, Any] diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index be86b7cb1..35280f45f 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -180,6 +180,7 @@ class Builder(object): return def cat2relpath(cat): + # type: (CatalogInfo) -> unicode return path.relpath(cat.mo_path, self.env.srcdir).replace(path.sep, SEP) logger.info(bold('building [mo]: ') + message) @@ -202,6 +203,7 @@ class Builder(object): def compile_specific_catalogs(self, specified_files): # type: (List[unicode]) -> None def to_domain(fpath): + # type: (unicode) -> unicode docname, _ = path.splitext(path_stabilize(fpath)) dom = find_catalog(docname, self.config.gettext_compact) return dom diff --git a/sphinx/builders/changes.py b/sphinx/builders/changes.py index fc8532794..1dc22f4ae 100644 --- a/sphinx/builders/changes.py +++ b/sphinx/builders/changes.py @@ -122,6 +122,7 @@ class ChangesBuilder(Builder): '.. deprecated:: %s' % version] def hl(no, line): + # type: (int, unicode) -> unicode line = '<a name="L%s"> </a>' % no + htmlescape(line) for x in hltext: if x in line: diff --git a/sphinx/builders/dummy.py b/sphinx/builders/dummy.py index 2fb146ecf..2ba6337a6 100644 --- a/sphinx/builders/dummy.py +++ b/sphinx/builders/dummy.py @@ -12,31 +12,44 @@ from sphinx.builders import Builder +if False: + # For type annotation + from typing import Any # NOQA + from docutils import nodes # NOQA + from sphinx.application import Sphinx # NOQA + class DummyBuilder(Builder): name = 'dummy' allow_parallel = True def init(self): + # type: () -> None pass def get_outdated_docs(self): + # type: () -> Set[unicode] return self.env.found_docs def get_target_uri(self, docname, typ=None): + # type: (unicode, unicode) -> unicode return '' def prepare_writing(self, docnames): + # type: (Set[unicode]) -> None pass def write_doc(self, docname, doctree): + # type: (unicode, nodes.Node) -> None pass def finish(self): + # type: () -> None pass def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.add_builder(DummyBuilder) return { diff --git a/sphinx/builders/epub3.py b/sphinx/builders/epub3.py index c2022e622..2723bff9c 100644 --- a/sphinx/builders/epub3.py +++ b/sphinx/builders/epub3.py @@ -18,6 +18,11 @@ from sphinx.config import string_classes from sphinx.builders.epub import EpubBuilder from sphinx.util import logging +if False: + # For type annotation + from typing import Any, Iterable # NOQA + from docutils import nodes # NOQA + from sphinx.application import Sphinx # NOQA logger = logging.getLogger(__name__) @@ -117,6 +122,7 @@ class Epub3Builder(EpubBuilder): # Finish by building the epub file def handle_finish(self): + # type: () -> None """Create the metainfo files and finally the epub.""" self.get_toc() self.build_mimetype(self.outdir, 'mimetype') @@ -127,6 +133,7 @@ class Epub3Builder(EpubBuilder): self.build_epub(self.outdir, self.config.epub_basename + '.epub') def content_metadata(self, files, spine, guide): + # type: (List[unicode], List[unicode], List[unicode]) -> Dict """Create a dictionary with all metadata for the content.opf file properly escaped. """ @@ -141,6 +148,7 @@ class Epub3Builder(EpubBuilder): return metadata def _page_progression_direction(self): + # type: () -> unicode if self.config.epub_writing_mode == 'horizontal': page_progression_direction = 'ltr' elif self.config.epub_writing_mode == 'vertical': @@ -150,6 +158,7 @@ class Epub3Builder(EpubBuilder): return page_progression_direction def _ibook_scroll_axis(self): + # type: () -> unicode if self.config.epub_writing_mode == 'horizontal': scroll_axis = 'vertical' elif self.config.epub_writing_mode == 'vertical': @@ -159,6 +168,7 @@ class Epub3Builder(EpubBuilder): return scroll_axis def _css_writing_mode(self): + # type: () -> unicode if self.config.epub_writing_mode == 'vertical': editing_mode = 'vertical-rl' else: @@ -166,10 +176,12 @@ class Epub3Builder(EpubBuilder): return editing_mode def prepare_writing(self, docnames): + # type: (Iterable[unicode]) -> None super(Epub3Builder, self).prepare_writing(docnames) self.globalcontext['theme_writing_mode'] = self._css_writing_mode() def new_navlist(self, node, level, has_child): + # type: (nodes.Node, int, bool) -> unicode """Create a new entry in the toc from the node at given level.""" # XXX Modifies the node self.tocid += 1 @@ -180,14 +192,17 @@ class Epub3Builder(EpubBuilder): return self.navlist_template % node def begin_navlist_block(self, level): + # type: (int) -> unicode return self.navlist_template_begin_block % { "indent": self.navlist_indent * level } def end_navlist_block(self, level): + # type: (int) -> unicode return self.navlist_template_end_block % {"indent": self.navlist_indent * level} - def build_navlist(self, nodes): + def build_navlist(self, navnodes): + # type: (List[nodes.Node]) -> unicode """Create the toc navigation structure. This method is almost same as build_navpoints method in epub.py. @@ -200,7 +215,7 @@ class Epub3Builder(EpubBuilder): navlist = [] level = 1 usenodes = [] - for node in nodes: + for node in navnodes: if not node['text']: continue file = node['refuri'].split('#')[0] @@ -228,6 +243,7 @@ class Epub3Builder(EpubBuilder): return '\n'.join(navlist) def navigation_doc_metadata(self, navlist): + # type: (unicode) -> Dict """Create a dictionary with all metadata for the nav.xhtml file properly escaped. """ @@ -238,6 +254,7 @@ class Epub3Builder(EpubBuilder): return metadata def build_navigation_doc(self, outdir, outname): + # type: (unicode, unicode) -> None """Write the metainfo file nav.xhtml.""" logger.info('writing %s file...', outname) @@ -251,8 +268,8 @@ class Epub3Builder(EpubBuilder): # 'includehidden' refnodes = self.refnodes navlist = self.build_navlist(refnodes) - with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f: - f.write(self.navigation_doc_template % + with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f: # type: ignore + f.write(self.navigation_doc_template % # type: ignore self.navigation_doc_metadata(navlist)) # Add nav.xhtml to epub file @@ -261,6 +278,7 @@ class Epub3Builder(EpubBuilder): def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.setup_extension('sphinx.builders.epub') app.add_builder(Epub3Builder) diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index 46daef97c..027c06205 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -598,6 +598,7 @@ class StandaloneHTMLBuilder(Builder): def copy_download_files(self): # type: () -> None def to_relpath(f): + # type: (unicode) -> unicode return relative_path(self.srcdir, f) # copy downloadable files if self.env.dlfiles: @@ -775,6 +776,7 @@ class StandaloneHTMLBuilder(Builder): def add_sidebars(self, pagename, ctx): # type: (unicode, Dict) -> None def has_wildcard(pattern): + # type: (unicode) -> bool return any(char in pattern for char in '*?[') sidebars = None matched = None @@ -823,6 +825,7 @@ class StandaloneHTMLBuilder(Builder): default_baseuri = default_baseuri.rsplit('#', 1)[0] def pathto(otheruri, resource=False, baseuri=default_baseuri): + # type: (unicode, bool, unicode) -> unicode if resource and '://' in otheruri: # allow non-local resources given by scheme return otheruri @@ -835,6 +838,7 @@ class StandaloneHTMLBuilder(Builder): ctx['pathto'] = pathto def hasdoc(name): + # type: (unicode) -> bool if name in self.env.all_docs: return True elif name == 'search' and self.search: @@ -879,6 +883,7 @@ class StandaloneHTMLBuilder(Builder): copyfile(self.env.doc2path(pagename), source_name) def update_page_context(self, pagename, templatename, ctx, event_arg): + # type: (unicode, unicode, Dict, Any) -> None pass def handle_finish(self): diff --git a/sphinx/builders/htmlhelp.py b/sphinx/builders/htmlhelp.py index 68fd3b1db..338015aca 100644 --- a/sphinx/builders/htmlhelp.py +++ b/sphinx/builders/htmlhelp.py @@ -24,6 +24,11 @@ from sphinx.util import logging from sphinx.util.osutil import make_filename from sphinx.util.pycompat import htmlescape +if False: + # For type annotation + from typing import Any, IO, Tuple # NOQA + from sphinx.application import Sphinx # NOQA + logger = logging.getLogger(__name__) @@ -186,6 +191,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder): encoding = 'cp1252' def init(self): + # type: () -> None StandaloneHTMLBuilder.init(self) # the output files for HTML help must be .html only self.out_suffix = '.html' @@ -196,17 +202,21 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder): self.lcid, self.encoding = locale def open_file(self, outdir, basename, mode='w'): + # type: (unicode, unicode, unicode) -> IO # open a file with the correct encoding for the selected language - return codecs.open(path.join(outdir, basename), mode, + return codecs.open(path.join(outdir, basename), mode, # type: ignore self.encoding, 'xmlcharrefreplace') def update_page_context(self, pagename, templatename, ctx, event_arg): + # type: (unicode, unicode, Dict, unicode) -> None ctx['encoding'] = self.encoding def handle_finish(self): + # type: () -> None self.build_hhx(self.outdir, self.config.htmlhelp_basename) def write_doc(self, docname, doctree): + # type: (unicode, nodes.Node) -> None for node in doctree.traverse(nodes.reference): # add ``target=_blank`` attributes to external links if node.get('internal') is None and 'refuri' in node: @@ -215,6 +225,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder): StandaloneHTMLBuilder.write_doc(self, docname, doctree) def build_hhx(self, outdir, outname): + # type: (unicode, unicode) -> None logger.info('dumping stopword list...') with self.open_file(outdir, outname + '.stp') as f: for word in sorted(stopwords): @@ -255,6 +266,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder): self.config.master_doc, self, prune_toctrees=False) def write_toc(node, ullevel=0): + # type: (nodes.Node, int) -> None if isinstance(node, nodes.list_item): f.write('<LI> ') for subnode in node: @@ -275,6 +287,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder): write_toc(subnode, ullevel) def istoctree(node): + # type: (nodes.Node) -> bool return isinstance(node, addnodes.compact_paragraph) and \ 'toctree' in node for node in tocdoc.traverse(istoctree): @@ -287,7 +300,9 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder): f.write('<UL>\n') def write_index(title, refs, subitems): + # type: (unicode, List[Tuple[unicode, unicode]], List[Tuple[unicode, List[Tuple[unicode, unicode]]]]) -> None # NOQA def write_param(name, value): + # type: (unicode, unicode) -> None item = ' <param name="%s" value="%s">\n' % \ (name, value) f.write(item) @@ -316,6 +331,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder): def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.setup_extension('sphinx.builders.html') app.add_builder(HTMLHelpBuilder) diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py index 22d2717d4..d3bc50322 100644 --- a/sphinx/builders/latex.py +++ b/sphinx/builders/latex.py @@ -38,6 +38,7 @@ if False: # For type annotation from typing import Any, Iterable, Tuple, Union # NOQA from sphinx.application import Sphinx # NOQA + from sphinx.config import Config # NOQA logger = logging.getLogger(__name__) @@ -259,6 +260,7 @@ def validate_config_values(app): def default_latex_engine(config): + # type: (Config) -> unicode """ Better default latex_engine settings for specific languages. """ if config.language == 'ja': return 'platex' @@ -267,6 +269,7 @@ def default_latex_engine(config): def default_latex_docclass(config): + # type: (Config) -> Dict[unicode, unicode] """ Better default latex_docclass settings for specific languages. """ if config.language == 'ja': return {'manual': 'jsbook', diff --git a/sphinx/builders/qthelp.py b/sphinx/builders/qthelp.py index 25dec7586..eeb3fdb73 100644 --- a/sphinx/builders/qthelp.py +++ b/sphinx/builders/qthelp.py @@ -149,6 +149,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder): prune_toctrees=False) def istoctree(node): + # type: (nodes.Node) -> bool return isinstance(node, addnodes.compact_paragraph) and \ 'toctree' in node sections = [] diff --git a/sphinx/builders/text.py b/sphinx/builders/text.py index 7147baa5c..bf70f6fdb 100644 --- a/sphinx/builders/text.py +++ b/sphinx/builders/text.py @@ -19,6 +19,12 @@ from sphinx.util import logging from sphinx.util.osutil import ensuredir, os_path from sphinx.writers.text import TextWriter +if False: + # For type annotation + from typing import Any, Iterator # NOQA + from docutils import nodes # NOQA + from sphinx.application import Sphinx # NOQA + logger = logging.getLogger(__name__) @@ -31,9 +37,11 @@ class TextBuilder(Builder): current_docname = None # type: unicode def init(self): + # type: () -> None pass def get_outdated_docs(self): + # type: () -> Iterator[unicode] for docname in self.env.found_docs: if docname not in self.env.all_docs: yield docname @@ -53,28 +61,33 @@ class TextBuilder(Builder): pass def get_target_uri(self, docname, typ=None): + # type: (unicode, unicode) -> unicode return '' def prepare_writing(self, docnames): + # type: (Set[unicode]) -> None self.writer = TextWriter(self) def write_doc(self, docname, doctree): + # type: (unicode, nodes.Node) -> None self.current_docname = docname destination = StringOutput(encoding='utf-8') self.writer.write(doctree, destination) outfilename = path.join(self.outdir, os_path(docname) + self.out_suffix) ensuredir(path.dirname(outfilename)) try: - with codecs.open(outfilename, 'w', 'utf-8') as f: + with codecs.open(outfilename, 'w', 'utf-8') as f: # type: ignore f.write(self.writer.output) except (IOError, OSError) as err: logger.warning("error writing file %s: %s", outfilename, err) def finish(self): + # type: () -> None pass def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.add_builder(TextBuilder) app.add_config_value('text_sectionchars', '*=-~"+`', 'env') diff --git a/sphinx/builders/websupport.py b/sphinx/builders/websupport.py index 2ed37a697..1982b92cd 100644 --- a/sphinx/builders/websupport.py +++ b/sphinx/builders/websupport.py @@ -20,6 +20,12 @@ from sphinx.util.osutil import os_path, relative_uri, ensuredir, copyfile from sphinx.builders.html import PickleHTMLBuilder from sphinx.writers.websupport import WebSupportTranslator +if False: + # For type annotation + from typing import Any, Iterable, Tuple # NOQA + from docutils import nodes # NOQA + from sphinx.application import Sphinx # NOQA + class WebSupportBuilder(PickleHTMLBuilder): """ @@ -30,6 +36,7 @@ class WebSupportBuilder(PickleHTMLBuilder): versioning_compare = True # for commentable node's uuid stability. def init(self): + # type: () -> None PickleHTMLBuilder.init(self) # templates are needed for this builder, but the serializing # builder does not initialize them @@ -41,20 +48,24 @@ class WebSupportBuilder(PickleHTMLBuilder): self.script_files.append('_static/websupport.js') def set_webinfo(self, staticdir, virtual_staticdir, search, storage): + # type: (unicode, unicode, Any, unicode) -> None self.staticdir = staticdir self.virtual_staticdir = virtual_staticdir self.search = search self.storage = storage def init_translator_class(self): + # type: () -> None if self.translator_class is None: self.translator_class = WebSupportTranslator def prepare_writing(self, docnames): + # type: (Iterable[unicode]) -> None PickleHTMLBuilder.prepare_writing(self, docnames) self.globalcontext['no_search_suffix'] = True def write_doc(self, docname, doctree): + # type: (unicode, nodes.Node) -> None destination = StringOutput(encoding='utf-8') doctree.settings = self.docsettings @@ -72,6 +83,7 @@ class WebSupportBuilder(PickleHTMLBuilder): self.handle_page(docname, ctx, event_arg=doctree) def write_doc_serialized(self, docname, doctree): + # type: (unicode, nodes.Node) -> None self.imgpath = '/' + posixpath.join(self.virtual_staticdir, self.imagedir) self.post_process_images(doctree) title = self.env.longtitles.get(docname) @@ -79,10 +91,12 @@ class WebSupportBuilder(PickleHTMLBuilder): self.index_page(docname, doctree, title) def load_indexer(self, docnames): - self.indexer = self.search - self.indexer.init_indexing(changed=docnames) + # type: (Iterable[unicode]) -> None + self.indexer = self.search # type: ignore + self.indexer.init_indexing(changed=docnames) # type: ignore def _render_page(self, pagename, addctx, templatename, event_arg=None): + # type: (unicode, Dict, unicode, unicode) -> Tuple[Dict, Dict] # This is mostly copied from StandaloneHTMLBuilder. However, instead # of rendering the template and saving the html, create a context # dict and pickle it. @@ -91,6 +105,7 @@ class WebSupportBuilder(PickleHTMLBuilder): def pathto(otheruri, resource=False, baseuri=self.get_target_uri(pagename)): + # type: (unicode, bool, unicode) -> unicode if resource and '://' in otheruri: return otheruri elif not resource: @@ -128,6 +143,7 @@ class WebSupportBuilder(PickleHTMLBuilder): def handle_page(self, pagename, addctx, templatename='page.html', outfilename=None, event_arg=None): + # type: (unicode, Dict, unicode, unicode, unicode) -> None ctx, doc_ctx = self._render_page(pagename, addctx, templatename, event_arg) @@ -146,6 +162,7 @@ class WebSupportBuilder(PickleHTMLBuilder): copyfile(self.env.doc2path(pagename), source_name) def handle_finish(self): + # type: () -> None # get global values for css and script files _, doc_ctx = self._render_page('tmp', {}, 'page.html') self.globalcontext['css'] = doc_ctx['css'] @@ -164,10 +181,12 @@ class WebSupportBuilder(PickleHTMLBuilder): shutil.move(src, dst) def dump_search_index(self): - self.indexer.finish_indexing() + # type: () -> None + self.indexer.finish_indexing() # type: ignore def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.add_builder(WebSupportBuilder) return { diff --git a/sphinx/builders/xml.py b/sphinx/builders/xml.py index fc43b4c12..c149df83c 100644 --- a/sphinx/builders/xml.py +++ b/sphinx/builders/xml.py @@ -20,6 +20,11 @@ from sphinx.util import logging from sphinx.util.osutil import ensuredir, os_path from sphinx.writers.xml import XMLWriter, PseudoXMLWriter +if False: + # For type annotation + from typing import Any, Iterator # NOQA + from sphinx.application import Sphinx # NOQA + logger = logging.getLogger(__name__) @@ -35,9 +40,11 @@ class XMLBuilder(Builder): _writer_class = XMLWriter def init(self): + # type: () -> None pass def get_outdated_docs(self): + # type: () -> Iterator[unicode] for docname in self.env.found_docs: if docname not in self.env.all_docs: yield docname @@ -57,12 +64,15 @@ class XMLBuilder(Builder): pass def get_target_uri(self, docname, typ=None): + # type: (unicode, unicode) -> unicode return docname def prepare_writing(self, docnames): + # type: (Set[unicode]) -> None self.writer = self._writer_class(self) def write_doc(self, docname, doctree): + # type: (unicode, nodes.Node) -> None # work around multiple string % tuple issues in docutils; # replace tuples in attribute values with lists doctree = doctree.deepcopy() @@ -80,12 +90,13 @@ class XMLBuilder(Builder): outfilename = path.join(self.outdir, os_path(docname) + self.out_suffix) ensuredir(path.dirname(outfilename)) try: - with codecs.open(outfilename, 'w', 'utf-8') as f: + with codecs.open(outfilename, 'w', 'utf-8') as f: # type: ignore f.write(self.writer.output) except (IOError, OSError) as err: logger.warning("error writing file %s: %s", outfilename, err) def finish(self): + # type: () -> None pass @@ -101,6 +112,7 @@ class PseudoXMLBuilder(XMLBuilder): def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] app.add_builder(XMLBuilder) app.add_builder(PseudoXMLBuilder) diff --git a/sphinx/directives/patches.py b/sphinx/directives/patches.py index 8b14ba2b0..2501df417 100644 --- a/sphinx/directives/patches.py +++ b/sphinx/directives/patches.py @@ -13,6 +13,10 @@ from docutils.parsers.rst.directives import images, html from sphinx import addnodes +if False: + # For type annotation + from sphinx.application import Sphinx # NOQA + class Figure(images.Figure): """The figure directive which applies `:name:` option to the figure node @@ -20,6 +24,7 @@ class Figure(images.Figure): """ def run(self): + # type: () -> List[nodes.Node] name = self.options.pop('name', None) result = images.Figure.run(self) if len(result) == 2 or isinstance(result[0], nodes.system_message): @@ -39,6 +44,7 @@ class Figure(images.Figure): class Meta(html.Meta): def run(self): + # type: () -> List[nodes.Node] env = self.state.document.settings.env result = html.Meta.run(self) for node in result: @@ -56,6 +62,7 @@ class Meta(html.Meta): def setup(app): + # type: (Sphinx) -> Dict directives.register_directive('figure', Figure) directives.register_directive('meta', Meta) diff --git a/sphinx/errors.py b/sphinx/errors.py index 01f29d7aa..837bd5cff 100644 --- a/sphinx/errors.py +++ b/sphinx/errors.py @@ -10,6 +10,10 @@ :license: BSD, see LICENSE for details. """ +if False: + # For type annotation + from typing import Any # NOQA + class SphinxError(Exception): """ @@ -29,16 +33,19 @@ class ExtensionError(SphinxError): category = 'Extension error' def __init__(self, message, orig_exc=None): + # type: (unicode, Exception) -> None SphinxError.__init__(self, message) self.orig_exc = orig_exc def __repr__(self): + # type: () -> str if self.orig_exc: return '%s(%r, %r)' % (self.__class__.__name__, self.message, self.orig_exc) return '%s(%r)' % (self.__class__.__name__, self.message) def __str__(self): + # type: () -> str parent_str = SphinxError.__str__(self) if self.orig_exc: return '%s (exception: %s)' % (parent_str, self.orig_exc) @@ -59,6 +66,7 @@ class VersionRequirementError(SphinxError): class PycodeError(Exception): def __str__(self): + # type: () -> str res = self.args[0] if len(self.args) > 1: res += ' (exception was: %r)' % self.args[1] @@ -70,8 +78,10 @@ class SphinxParallelError(SphinxError): category = 'Sphinx parallel build error' def __init__(self, message, traceback): + # type: (str, Any) -> None self.message = message self.traceback = traceback def __str__(self): + # type: () -> str return self.message diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index cab900421..7f77d3a3f 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -1193,7 +1193,7 @@ class ClassLevelDocumenter(Documenter): # ... if still None, there's no way to know if mod_cls is None: return None, [] - modname, cls = rpartition(mod_cls, '.') + modname, cls = rpartition(mod_cls, '.') # type: ignore parents = [cls] # if the module name is still missing, get it like above if not modname: diff --git a/sphinx/io.py b/sphinx/io.py index e29420d97..52b5b1729 100644 --- a/sphinx/io.py +++ b/sphinx/io.py @@ -28,7 +28,7 @@ from sphinx.util.docutils import LoggingReporter if False: # For type annotation - from typing import Any, Union # NOQA + from typing import Any, Tuple, Union # NOQA from docutils import nodes # NOQA from docutils.io import Input # NOQA from docutils.parsers import Parser # NOQA @@ -75,6 +75,7 @@ class SphinxBaseReader(standalone.Reader): return standalone.Reader.get_transforms(self) + self.transforms def new_document(self): + # type: () -> nodes.document document = standalone.Reader.new_document(self) reporter = document.reporter document.reporter = LoggingReporter(reporter.source, reporter.report_level, @@ -122,6 +123,7 @@ class SphinxI18nReader(SphinxBaseReader): reporter = document.reporter def get_source_and_line(lineno=None): + # type: (int) -> Tuple[unicode, int] return reporter.source, self.lineno reporter.get_source_and_line = get_source_and_line @@ -153,6 +155,7 @@ class SphinxFileInput(FileInput): def read(self): # type: () -> unicode def get_parser_type(source_path): + # type: (unicode) -> Tuple[unicode] for suffix in self.env.config.source_parsers: if source_path.endswith(suffix): parser_class = self.env.config.source_parsers[suffix] diff --git a/sphinx/parsers.py b/sphinx/parsers.py index 926de9f1c..1c0a75b06 100644 --- a/sphinx/parsers.py +++ b/sphinx/parsers.py @@ -11,6 +11,10 @@ import docutils.parsers +if False: + # For type annotation + from sphinx.application import Sphinx # NOQA + class Parser(docutils.parsers.Parser): """ @@ -33,6 +37,7 @@ class Parser(docutils.parsers.Parser): """ def set_application(self, app): + # type: (Sphinx) -> None """set_application will be called from Sphinx to set app and other instance variables :param sphinx.application.Sphinx app: Sphinx application object diff --git a/sphinx/pycode/pgen2/pgen.py b/sphinx/pycode/pgen2/pgen.py index 3fe91e57e..0106763d0 100644 --- a/sphinx/pycode/pgen2/pgen.py +++ b/sphinx/pycode/pgen2/pgen.py @@ -193,7 +193,7 @@ class ParserGenerator(object): if state in base: return base[state] = 1 - for label, next in state.arcs: # type: ignore + for label, next in state.arcs: if label is None: addclosure(next, base) states = [DFAState(closure(start), finish)] diff --git a/sphinx/pycode/pgen2/tokenize.py b/sphinx/pycode/pgen2/tokenize.py index a096795f8..cbe64a581 100644 --- a/sphinx/pycode/pgen2/tokenize.py +++ b/sphinx/pycode/pgen2/tokenize.py @@ -298,12 +298,12 @@ def generate_tokens(readline): endmatch = endprog.match(line) # type: ignore if endmatch: pos = end = endmatch.end(0) - yield (STRING, contstr + line[:end], # type: ignore + yield (STRING, contstr + line[:end], strstart, (lnum, end), contline + line) # type: ignore contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': - yield (ERRORTOKEN, contstr + line, # type: ignore + yield (ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) # type: ignore contstr = '' contline = None diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index 3b1f076a9..8206c911d 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -45,7 +45,7 @@ from sphinx.util.matching import patfilter # noqa if False: # For type annotation - from typing import Any, Callable, Iterable, Iterator, Pattern, Sequence, Tuple, Union # NOQA + from typing import Any, Callable, IO, Iterable, Iterator, Pattern, Sequence, Tuple, Union # NOQA logger = logging.getLogger(__name__) @@ -121,6 +121,7 @@ class FilenameUniqDict(dict): appear in. Used for images and downloadable files in the environment. """ def __init__(self): + # type: () -> None self._existing = set() # type: Set[unicode] def add_file(self, docname, newfile): @@ -153,9 +154,11 @@ class FilenameUniqDict(dict): self.add_file(doc, filename) def __getstate__(self): + # type: () -> Set[unicode] return self._existing def __setstate__(self, state): + # type: (Set[unicode]) -> None self._existing = state @@ -214,7 +217,7 @@ def save_traceback(app): last_msgs = '' if app is not None: last_msgs = '\n'.join( - '# %s' % strip_colors(force_decode(s, 'utf-8')).strip() + '# %s' % strip_colors(force_decode(s, 'utf-8')).strip() # type: ignore for s in app.messagelog) os.write(fd, (_DEBUG_HEADER % (sphinx.__display_version__, @@ -301,12 +304,14 @@ def detect_encoding(readline): """Like tokenize.detect_encoding() from Py3k, but a bit simplified.""" def read_or_stop(): + # type: () -> unicode try: return readline() except StopIteration: return None def get_normal_name(orig_enc): + # type: (str) -> str """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace('_', '-') @@ -318,12 +323,13 @@ def detect_encoding(readline): return orig_enc def find_cookie(line): + # type: (unicode) -> unicode try: line_string = line.decode('ascii') except UnicodeDecodeError: return None - matches = _coding_re.findall(line_string) + matches = _coding_re.findall(line_string) # type: ignore if not matches: return None return get_normal_name(matches[0]) @@ -354,14 +360,17 @@ class Tee(object): File-like object writing to two streams. """ def __init__(self, stream1, stream2): + # type: (IO, IO) -> None self.stream1 = stream1 self.stream2 = stream2 def write(self, text): + # type: (unicode) -> None self.stream1.write(text) self.stream2.write(text) def flush(self): + # type: () -> None if hasattr(self.stream1, 'flush'): self.stream1.flush() if hasattr(self.stream2, 'flush'): @@ -369,6 +378,7 @@ class Tee(object): def parselinenos(spec, total): + # type: (unicode, int) -> List[int] """Parse a line number spec (such as "1,2,4-6") and return a list of wanted line numbers. """ @@ -391,6 +401,7 @@ def parselinenos(spec, total): def force_decode(string, encoding): + # type: (unicode, unicode) -> unicode """Forcibly get a unicode string out of a bytestring.""" if isinstance(string, binary_type): try: @@ -407,16 +418,20 @@ def force_decode(string, encoding): class attrdict(dict): def __getattr__(self, key): + # type: (unicode) -> unicode return self[key] def __setattr__(self, key, val): + # type: (unicode, unicode) -> None self[key] = val def __delattr__(self, key): + # type: (unicode) -> None del self[key] def rpartition(s, t): + # type: (unicode, unicode) -> Tuple[unicode, unicode] """Similar to str.rpartition from 2.5, but doesn't return the separator.""" i = s.rfind(t) if i != -1: @@ -425,6 +440,7 @@ def rpartition(s, t): def split_into(n, type, value): + # type: (int, unicode, unicode) -> List[unicode] """Split an index entry into a given number of parts at semicolons.""" parts = [x.strip() for x in value.split(';', n - 1)] if sum(1 for part in parts if part) < n: @@ -433,6 +449,7 @@ def split_into(n, type, value): def split_index_msg(type, value): + # type: (unicode, unicode) -> List[unicode] # new entry types must be listed in directives/other.py! if type == 'single': try: @@ -471,13 +488,16 @@ class PeekableIterator(object): what's the next item. """ def __init__(self, iterable): + # type: (Iterable) -> None self.remaining = deque() # type: deque self._iterator = iter(iterable) def __iter__(self): + # type: () -> PeekableIterator return self def __next__(self): + # type: () -> Any """Return the next item from the iterator.""" if self.remaining: return self.remaining.popleft() @@ -486,14 +506,16 @@ class PeekableIterator(object): next = __next__ # Python 2 compatibility def push(self, item): + # type: (Any) -> None """Push the `item` on the internal stack, it will be returned on the next :meth:`next` call. """ self.remaining.append(item) def peek(self): + # type: () -> Any """Return the next item without changing the state of the iterator.""" - item = next(self) + item = next(self) # type: ignore self.push(item) return item diff --git a/sphinx/util/compat.py b/sphinx/util/compat.py index f47237d11..42406afe7 100644 --- a/sphinx/util/compat.py +++ b/sphinx/util/compat.py @@ -20,13 +20,19 @@ from sphinx.deprecation import RemovedInSphinx17Warning docutils_version = tuple(int(x) for x in _du_version.split('.')[:2]) +if False: + # For type annotation + from typing import Any # NOQA + class _DeprecationWrapper(object): def __init__(self, mod, deprecated): + # type: (Any, Dict) -> None self._mod = mod self._deprecated = deprecated def __getattr__(self, attr): + # type: (str) -> Any if attr in self._deprecated: warnings.warn("sphinx.util.compat.%s is deprecated and will be " "removed in Sphinx 1.7, please use the standard " diff --git a/sphinx/util/console.py b/sphinx/util/console.py index b418edfb8..ac3d2282f 100644 --- a/sphinx/util/console.py +++ b/sphinx/util/console.py @@ -95,6 +95,7 @@ def strip_colors(s): def create_color_func(name): # type: (str) -> None def inner(text): + # type: (unicode) -> unicode return colorize(name, text) globals()[name] = inner diff --git a/sphinx/util/docfields.py b/sphinx/util/docfields.py index 81b8347c6..f0af0c59d 100644 --- a/sphinx/util/docfields.py +++ b/sphinx/util/docfields.py @@ -81,7 +81,7 @@ class Field(object): return (fieldarg, content) def make_field(self, types, domain, item): - # type: (List, unicode, Tuple) -> nodes.field + # type: (Dict[unicode, List[nodes.Node]], unicode, Tuple) -> nodes.field fieldarg, content = item fieldname = nodes.field_name('', self.label) if fieldarg: @@ -122,7 +122,7 @@ class GroupedField(Field): self.can_collapse = can_collapse def make_field(self, types, domain, items): - # type: (List, unicode, Tuple) -> nodes.field + # type: (Dict[unicode, List[nodes.Node]], unicode, Tuple) -> nodes.field fieldname = nodes.field_name('', self.label) listnode = self.list_type() for fieldarg, content in items: @@ -170,8 +170,9 @@ class TypedField(GroupedField): self.typerolename = typerolename def make_field(self, types, domain, items): - # type: (List, unicode, Tuple) -> nodes.field + # type: (Dict[unicode, List[nodes.Node]], unicode, Tuple) -> nodes.field def handle_item(fieldarg, content): + # type: (unicode, unicode) -> nodes.paragraph par = nodes.paragraph() par.extend(self.make_xrefs(self.rolename, domain, fieldarg, addnodes.literal_strong)) diff --git a/sphinx/util/docstrings.py b/sphinx/util/docstrings.py index fba9bf490..e79408da2 100644 --- a/sphinx/util/docstrings.py +++ b/sphinx/util/docstrings.py @@ -13,6 +13,7 @@ import sys def prepare_docstring(s, ignore=1): + # type: (unicode, int) -> List[unicode] """Convert a docstring into lines of parseable reST. Remove common leading indentation, where the indentation of a given number of lines (usually just one) is ignored. @@ -46,6 +47,7 @@ def prepare_docstring(s, ignore=1): def prepare_commentdoc(s): + # type: (unicode) -> List[unicode] """Extract documentation comment lines (starting with #:) and return them as a list of lines. Returns an empty list if there is no documentation. """ diff --git a/sphinx/util/docutils.py b/sphinx/util/docutils.py index ae106c190..38fdcde28 100644 --- a/sphinx/util/docutils.py +++ b/sphinx/util/docutils.py @@ -63,12 +63,15 @@ class sphinx_domains(object): self.roles_func = None # type: Callable def __enter__(self): + # type: () -> None self.enable() def __exit__(self, type, value, traceback): + # type: (unicode, unicode, unicode) -> None self.disable() def enable(self): + # type: () -> None self.directive_func = directives.directive self.role_func = roles.role @@ -76,6 +79,7 @@ class sphinx_domains(object): roles.role = self.lookup_role def disable(self): + # type: () -> None directives.directive = self.directive_func roles.role = self.role_func @@ -125,7 +129,8 @@ class sphinx_domains(object): class WarningStream(object): def write(self, text): - matched = report_re.search(text) + # type: (unicode) -> None + matched = report_re.search(text) # type: ignore if not matched: logger.warning(text.rstrip("\r\n")) else: @@ -136,9 +141,11 @@ class WarningStream(object): class LoggingReporter(Reporter): def __init__(self, source, report_level, halt_level, debug=False, error_handler='backslashreplace'): + # type: (unicode, int, int, bool, unicode) -> None stream = WarningStream() Reporter.__init__(self, source, report_level, halt_level, stream, debug, error_handler=error_handler) def set_conditions(self, category, report_level, halt_level, debug=False): + # type: (unicode, int, int, bool) -> None Reporter.set_conditions(self, category, report_level, halt_level, debug=debug) diff --git a/sphinx/util/fileutil.py b/sphinx/util/fileutil.py index b258c2039..ddfb61e6b 100644 --- a/sphinx/util/fileutil.py +++ b/sphinx/util/fileutil.py @@ -16,8 +16,15 @@ import posixpath from docutils.utils import relative_path from sphinx.util.osutil import copyfile, ensuredir, walk +if False: + # For type annotation + from typing import Callable, Union # NOQA + from sphinx.util.matching import Matcher # NOQA + from sphinx.util.template import BaseRenderer # NOQA + def copy_asset_file(source, destination, context=None, renderer=None): + # type: (unicode, unicode, Dict, BaseRenderer) -> None """Copy an asset file to destination. On copying, it expands the template variables if context argument is given and @@ -40,16 +47,17 @@ def copy_asset_file(source, destination, context=None, renderer=None): from sphinx.util.template import SphinxRenderer renderer = SphinxRenderer() - with codecs.open(source, 'r', encoding='utf-8') as fsrc: + with codecs.open(source, 'r', encoding='utf-8') as fsrc: # type: ignore if destination.lower().endswith('_t'): destination = destination[:-2] - with codecs.open(destination, 'w', encoding='utf-8') as fdst: - fdst.write(renderer.render_string(fsrc.read(), context)) + with codecs.open(destination, 'w', encoding='utf-8') as fdst: # type: ignore + fdst.write(renderer.render_string(fsrc.read(), context)) # type: ignore else: copyfile(source, destination) def copy_asset(source, destination, excluded=lambda path: False, context=None, renderer=None): + # type: (unicode, unicode, Union[Callable[[unicode], bool], Matcher], Dict, BaseRenderer) -> None # NOQA """Copy asset files to destination recursively. On copying, it expands the template variables if context argument is given and diff --git a/sphinx/util/images.py b/sphinx/util/images.py index 698a030ad..81dfaf681 100644 --- a/sphinx/util/images.py +++ b/sphinx/util/images.py @@ -21,14 +21,19 @@ except ImportError: except ImportError: Image = None +if False: + # For type annotation + from typing import Tuple # NOQA + mime_suffixes = { '.pdf': 'application/pdf', '.svg': 'image/svg+xml', '.svgz': 'image/svg+xml', -} +} # type: Dict[unicode, unicode] def get_image_size(filename): + # type: (unicode) -> Tuple[int, int] try: size = imagesize.get(filename) if size[0] == -1: @@ -48,6 +53,7 @@ def get_image_size(filename): def guess_mimetype(filename): + # type: (unicode) -> unicode _, ext = path.splitext(filename) if ext in mime_suffixes: return mime_suffixes[ext] diff --git a/sphinx/util/inspect.py b/sphinx/util/inspect.py index 28bc877b3..3cb795339 100644 --- a/sphinx/util/inspect.py +++ b/sphinx/util/inspect.py @@ -18,7 +18,7 @@ from sphinx.util import force_decode if False: # For type annotation - from typing import Any, Callable, Tuple # NOQA + from typing import Any, Callable, Tuple, Type # NOQA # this imports the standard library inspect module without resorting to # relatively import this module @@ -68,6 +68,7 @@ else: # 2.7 from functools import partial def getargspec(func): + # type: (Any) -> Any """Like inspect.getargspec but supports functools.partial as well.""" if inspect.ismethod(func): func = func.__func__ @@ -105,6 +106,7 @@ except ImportError: def isenumclass(x): + # type: (Type) -> bool """Check if the object is subclass of enum.""" if enum is None: return False @@ -174,7 +176,7 @@ def object_description(object): except Exception: raise ValueError if isinstance(s, binary_type): - s = force_decode(s, None) + s = force_decode(s, None) # type: ignore # Strip non-deterministic memory addresses such as # ``<__main__.A at 0x7f68cb685710>`` s = memory_address_re.sub('', s) diff --git a/sphinx/util/jsdump.py b/sphinx/util/jsdump.py index 7c60a1b70..592a4565f 100644 --- a/sphinx/util/jsdump.py +++ b/sphinx/util/jsdump.py @@ -18,7 +18,7 @@ from sphinx.util.pycompat import u if False: # For type annotation - from typing import Any, IO, Union # NOQA + from typing import Any, IO, Match, Union # NOQA _str_re = re.compile(r'"(\\\\|\\"|[^"])*"') _int_re = re.compile(r'\d+') @@ -43,6 +43,7 @@ ESCAPED = re.compile(r'\\u.{4}|\\.') def encode_string(s): # type: (str) -> str def replace(match): + # type: (Match) -> unicode s = match.group(0) try: return ESCAPE_DICT[s] @@ -56,7 +57,7 @@ def encode_string(s): s1 = 0xd800 | ((n >> 10) & 0x3ff) s2 = 0xdc00 | (n & 0x3ff) return '\\u%04x\\u%04x' % (s1, s2) - return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' + return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' # type: ignore def decode_string(s): diff --git a/sphinx/util/jsonimpl.py b/sphinx/util/jsonimpl.py index 215dfe44d..e5f6a0e72 100644 --- a/sphinx/util/jsonimpl.py +++ b/sphinx/util/jsonimpl.py @@ -14,28 +14,37 @@ import json from six import text_type from six.moves import UserString +if False: + # For type annotation + from typing import Any, IO # NOQA + class SphinxJSONEncoder(json.JSONEncoder): """JSONEncoder subclass that forces translation proxies.""" def default(self, obj): + # type: (Any) -> unicode if isinstance(obj, UserString): return text_type(obj) return json.JSONEncoder.default(self, obj) def dump(obj, fp, *args, **kwds): + # type: (Any, IO, Any, Any) -> unicode kwds['cls'] = SphinxJSONEncoder - return json.dump(obj, fp, *args, **kwds) + json.dump(obj, fp, *args, **kwds) def dumps(obj, *args, **kwds): + # type: (Any, Any, Any) -> unicode kwds['cls'] = SphinxJSONEncoder return json.dumps(obj, *args, **kwds) def load(*args, **kwds): + # type: (Any, Any) -> Any return json.load(*args, **kwds) def loads(*args, **kwds): + # type: (Any, Any) -> Any return json.loads(*args, **kwds) diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py index b4f8f5796..bc85df6b5 100644 --- a/sphinx/util/logging.py +++ b/sphinx/util/logging.py @@ -63,6 +63,7 @@ def getLogger(name): def convert_serializable(records): + # type: (List[logging.LogRecord]) -> None """Convert LogRecord serializable.""" for r in records: # extract arguments to a message and clear them @@ -174,6 +175,7 @@ class MemoryHandler(logging.handlers.BufferingHandler): """Handler buffering all logs.""" def __init__(self): + # type: () -> None super(MemoryHandler, self).__init__(-1) def shouldFlush(self, record): @@ -248,10 +250,12 @@ def pending_logging(): class LogCollector(object): def __init__(self): - self.logs = [] # type: logging.LogRecord + # type: () -> None + self.logs = [] # type: List[logging.LogRecord] @contextmanager def collect(self): + # type: () -> Generator with pending_logging() as memhandler: yield @@ -368,13 +372,14 @@ class WarningLogRecordTranslator(logging.Filter): class ColorizeFormatter(logging.Formatter): def format(self, record): + # type: (logging.LogRecord) -> str message = super(ColorizeFormatter, self).format(record) color = getattr(record, 'color', None) if color is None: color = COLOR_MAP.get(record.levelno) if color: - return colorize(color, message) + return colorize(color, message) # type: ignore else: return message @@ -382,10 +387,12 @@ class ColorizeFormatter(logging.Formatter): class SafeEncodingWriter(object): """Stream writer which ignores UnicodeEncodeError silently""" def __init__(self, stream): + # type: (IO) -> None self.stream = stream self.encoding = getattr(stream, 'encoding', 'ascii') or 'ascii' def write(self, data): + # type: (unicode) -> None try: self.stream.write(data) except UnicodeEncodeError: @@ -394,6 +401,7 @@ class SafeEncodingWriter(object): self.stream.write(data.encode(self.encoding, 'replace').decode(self.encoding)) def flush(self): + # type: () -> None if hasattr(self.stream, 'flush'): self.stream.flush() diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py index 17584e7c9..44c28f6b2 100644 --- a/sphinx/util/nodes.py +++ b/sphinx/util/nodes.py @@ -196,6 +196,7 @@ def traverse_translatable_index(doctree): # type: (nodes.Node) -> Iterable[Tuple[nodes.Node, List[unicode]]] """Traverse translatable index node from a document tree.""" def is_block_index(node): + # type: (nodes.Node) -> bool return isinstance(node, addnodes.index) and \ node.get('inline') is False for node in doctree.traverse(is_block_index): diff --git a/sphinx/util/osutil.py b/sphinx/util/osutil.py index ba2e1abf4..acf236027 100644 --- a/sphinx/util/osutil.py +++ b/sphinx/util/osutil.py @@ -21,7 +21,7 @@ import filecmp from os import path import contextlib from io import BytesIO, StringIO -from six import PY2, text_type +from six import PY2, PY3, text_type if False: # For type annotation @@ -33,6 +33,9 @@ ENOENT = getattr(errno, 'ENOENT', 0) EPIPE = getattr(errno, 'EPIPE', 0) EINVAL = getattr(errno, 'EINVAL', 0) +if PY3: + unicode = str # special alias for static typing... + # SEP separates path elements in the canonical file names # # Define SEP as a manifest constant, not so much because we expect it to change @@ -256,14 +259,14 @@ class FileAvoidWrite(object): self._io = None # type: Union[StringIO, BytesIO] def write(self, data): - # type: (Union[str, bytes]) -> None + # type: (Union[str, unicode]) -> None if not self._io: if isinstance(data, text_type): self._io = StringIO() else: self._io = BytesIO() - self._io.write(data) + self._io.write(data) # type: ignore def close(self): # type: () -> None @@ -294,12 +297,15 @@ class FileAvoidWrite(object): f.write(buf) def __enter__(self): + # type: () -> FileAvoidWrite return self def __exit__(self, type, value, traceback): + # type: (unicode, unicode, unicode) -> None self.close() def __getattr__(self, name): + # type: (str) -> Any # Proxy to _io instance. if not self._io: raise Exception('Must write to FileAvoidWrite before other ' diff --git a/sphinx/util/png.py b/sphinx/util/png.py index 476d45ccd..1543e4a5b 100644 --- a/sphinx/util/png.py +++ b/sphinx/util/png.py @@ -22,6 +22,7 @@ IEND_CHUNK = b'\x00\x00\x00\x00IEND\xAE\x42\x60\x82' def read_png_depth(filename): + # type: (unicode) -> int """Read the special tEXt chunk indicating the depth from a PNG file.""" with open(filename, 'rb') as f: f.seek(- (LEN_IEND + LEN_DEPTH), 2) @@ -34,6 +35,7 @@ def read_png_depth(filename): def write_png_depth(filename, depth): + # type: (unicode, int) -> None """Write the special tEXt chunk indicating the depth to a PNG file. The chunk is placed immediately before the special IEND chunk. diff --git a/sphinx/util/pycompat.py b/sphinx/util/pycompat.py index d4be08267..69a4351bf 100644 --- a/sphinx/util/pycompat.py +++ b/sphinx/util/pycompat.py @@ -16,7 +16,7 @@ from six import PY3, text_type, exec_ if False: # For type annotation - from typing import Any, Callable # NOQA + from typing import Any, Callable, Generator # NOQA NoneType = type(None) @@ -103,7 +103,8 @@ else: methods in Python 2 or 3.""" def __str__(self): - return self.__unicode__().encode('utf8') + # type: () -> str + return self.__unicode__().encode('utf8') # type: ignore # indent() @@ -115,9 +116,11 @@ else: # type: (unicode, unicode, Callable) -> unicode if predicate is None: def predicate(line): + # type: (unicode) -> unicode return line.strip() def prefixed_lines(): + # type: () -> Generator for line in text.splitlines(True): yield (prefix + line if predicate(line) else line) return ''.join(prefixed_lines()) diff --git a/sphinx/util/requests.py b/sphinx/util/requests.py index 14264e318..ec5aa232e 100644 --- a/sphinx/util/requests.py +++ b/sphinx/util/requests.py @@ -61,11 +61,17 @@ except pkg_resources.UnknownExtra: 'install requests-2.4.1+.' ) +if False: + # For type annotation + from typing import Any, Generator, Union # NOQA + from sphinx.config import Config # NOQA + useragent_header = [('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:25.0) Gecko/20100101 Firefox/25.0')] def is_ssl_error(exc): + # type: (Exception) -> bool """Check an exception is SSLError.""" if isinstance(exc, SSLError): return True @@ -79,6 +85,7 @@ def is_ssl_error(exc): @contextmanager def ignore_insecure_warning(**kwargs): + # type: (Any) -> Generator with warnings.catch_warnings(): if not kwargs.get('verify') and InsecureRequestWarning: # ignore InsecureRequestWarning if verify=False @@ -87,6 +94,7 @@ def ignore_insecure_warning(**kwargs): def _get_tls_cacert(url, config): + # type: (unicode, Config) -> Union[str, bool] """Get addiotinal CA cert for a specific URL. This also returns ``False`` if verification is disabled. @@ -99,7 +107,7 @@ def _get_tls_cacert(url, config): if not certs: return True elif isinstance(certs, (string_types, tuple)): # type: ignore - return certs + return certs # type: ignore else: hostname = urlsplit(url)[1] if '@' in hostname: @@ -109,6 +117,7 @@ def _get_tls_cacert(url, config): def get(url, **kwargs): + # type: (unicode, Any) -> requests.Response """Sends a GET request like requests.get(). This sets up User-Agent header and TLS verification automatically.""" @@ -122,6 +131,7 @@ def get(url, **kwargs): def head(url, **kwargs): + # type: (unicode, Any) -> requests.Response """Sends a HEAD request like requests.head(). This sets up User-Agent header and TLS verification automatically.""" diff --git a/sphinx/util/rst.py b/sphinx/util/rst.py index 437ba516b..afb452f45 100644 --- a/sphinx/util/rst.py +++ b/sphinx/util/rst.py @@ -15,4 +15,5 @@ symbols_re = re.compile('([!-/:-@\[-`{-~])') def escape(text): - return symbols_re.sub(r'\\\1', text) + # type: (unicode) -> unicode + return symbols_re.sub(r'\\\1', text) # type: ignore diff --git a/sphinx/util/smartypants.py b/sphinx/util/smartypants.py index dee2f50ba..0146ba6e9 100644 --- a/sphinx/util/smartypants.py +++ b/sphinx/util/smartypants.py @@ -73,11 +73,16 @@ smartypants.py license:: import re +if False: + # For type annotation + from typing import Tuple # NOQA + def sphinx_smarty_pants(t): + # type: (unicode) -> unicode t = t.replace('"', '"') t = educate_dashes_oldschool(t) - t = educate_quotes(t) + t = educate_quotes(t) # type: ignore t = t.replace('"', '"') return t @@ -155,6 +160,7 @@ closing_single_quotes_regex_2 = re.compile(r""" def educate_quotes(s): + # type: (str) -> str """ Parameter: String. @@ -194,6 +200,7 @@ def educate_quotes(s): def educate_quotes_latex(s, dquotes=("``", "''")): + # type: (str, Tuple[str, str]) -> unicode """ Parameter: String. @@ -237,6 +244,7 @@ def educate_quotes_latex(s, dquotes=("``", "''")): def educate_backticks(s): + # type: (unicode) -> unicode """ Parameter: String. Returns: The string, with ``backticks'' -style double quotes @@ -248,6 +256,7 @@ def educate_backticks(s): def educate_single_backticks(s): + # type: (unicode) -> unicode """ Parameter: String. Returns: The string, with `backticks' -style single quotes @@ -260,6 +269,7 @@ def educate_single_backticks(s): def educate_dashes_oldschool(s): + # type: (unicode) -> unicode """ Parameter: String. @@ -271,6 +281,7 @@ def educate_dashes_oldschool(s): def educate_dashes_oldschool_inverted(s): + # type: (unicode) -> unicode """ Parameter: String. @@ -289,6 +300,7 @@ def educate_dashes_oldschool_inverted(s): def educate_ellipses(s): + # type: (unicode) -> unicode """ Parameter: String. Returns: The string, with each instance of "..." translated to diff --git a/sphinx/util/stemmer/porter.py b/sphinx/util/stemmer/porter.py index 5f8d14ed6..beb860c9e 100644 --- a/sphinx/util/stemmer/porter.py +++ b/sphinx/util/stemmer/porter.py @@ -32,6 +32,7 @@ class PorterStemmer(object): def __init__(self): + # type: () -> None """The main part of the stemming algorithm starts here. b is a buffer holding a word to be stemmed. The letters are in b[k0], b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is @@ -42,12 +43,14 @@ class PorterStemmer(object): should be done before stem(...) is called. """ - self.b = "" # buffer for word to be stemmed + self.b = "" # type: unicode + # buffer for word to be stemmed self.k = 0 self.k0 = 0 - self.j = 0 # j is a general offset into the string + self.j = 0 # j is a general offset into the string def cons(self, i): + # type: (int) -> int """cons(i) is TRUE <=> b[i] is a consonant.""" if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' \ or self.b[i] == 'o' or self.b[i] == 'u': @@ -60,6 +63,7 @@ class PorterStemmer(object): return 1 def m(self): + # type: () -> int """m() measures the number of consonant sequences between k0 and j. if c is a consonant sequence and v a vowel sequence, and <..> indicates arbitrary presence, @@ -97,6 +101,7 @@ class PorterStemmer(object): i = i + 1 def vowelinstem(self): + # type: () -> int """vowelinstem() is TRUE <=> k0,...j contains a vowel""" for i in range(self.k0, self.j + 1): if not self.cons(i): @@ -104,6 +109,7 @@ class PorterStemmer(object): return 0 def doublec(self, j): + # type: (int) -> int """doublec(j) is TRUE <=> j,(j-1) contain a double consonant.""" if j < (self.k0 + 1): return 0 @@ -112,6 +118,7 @@ class PorterStemmer(object): return self.cons(j) def cvc(self, i): + # type: (int) -> int """cvc(i) is TRUE <=> i-2,i-1,i has the form consonant - vowel - consonant and also if the second c is not w,x or y. this is used when trying to @@ -129,6 +136,7 @@ class PorterStemmer(object): return 1 def ends(self, s): + # type: (unicode) -> int """ends(s) is TRUE <=> k0,...k ends with the string s.""" length = len(s) if s[length - 1] != self.b[self.k]: # tiny speed-up @@ -141,6 +149,7 @@ class PorterStemmer(object): return 1 def setto(self, s): + # type: (unicode) -> None """setto(s) sets (j+1),...k to the characters in the string s, readjusting k.""" length = len(s) @@ -148,11 +157,13 @@ class PorterStemmer(object): self.k = self.j + length def r(self, s): + # type: (unicode) -> None """r(s) is used further down.""" if self.m() > 0: self.setto(s) def step1ab(self): + # type: () -> None """step1ab() gets rid of plurals and -ed or -ing. e.g. caresses -> caress @@ -200,12 +211,14 @@ class PorterStemmer(object): self.setto("e") def step1c(self): + # type: () -> None """step1c() turns terminal y to i when there is another vowel in the stem.""" if (self.ends("y") and self.vowelinstem()): self.b = self.b[:self.k] + 'i' + self.b[self.k + 1:] def step2(self): + # type: () -> None """step2() maps double suffices to single ones. so -ization ( = -ize plus -ation) maps to -ize etc. note that the string before the suffix must give m() > 0. @@ -265,6 +278,7 @@ class PorterStemmer(object): # To match the published algorithm, delete this phrase def step3(self): + # type: () -> None """step3() dels with -ic-, -full, -ness etc. similar strategy to step2.""" if self.b[self.k] == 'e': @@ -287,6 +301,7 @@ class PorterStemmer(object): self.r("") def step4(self): + # type: () -> None """step4() takes off -ant, -ence etc., in context <c>vcvc<v>.""" if self.b[self.k - 1] == 'a': if self.ends("al"): @@ -370,6 +385,7 @@ class PorterStemmer(object): self.k = self.j def step5(self): + # type: () -> None """step5() removes a final -e if m() > 1, and changes -ll to -l if m() > 1. """ @@ -382,6 +398,7 @@ class PorterStemmer(object): self.k = self.k - 1 def stem(self, p, i, j): + # type: (unicode, int, int) -> unicode """In stem(p,i,j), p is a char pointer, and the string to be stemmed is from p[i] to p[j] inclusive. Typically i is zero and j is the offset to the last character of a string, (p[j+1] == '\0'). The diff --git a/sphinx/util/tags.py b/sphinx/util/tags.py index 180cb49ec..78ef7a22f 100644 --- a/sphinx/util/tags.py +++ b/sphinx/util/tags.py @@ -14,6 +14,10 @@ from jinja2.environment import Environment env = Environment() +if False: + # For type annotation + from typing import Iterator # NOQA + class BooleanParser(Parser): """ @@ -21,6 +25,7 @@ class BooleanParser(Parser): """ def parse_compare(self): + # type: () -> None token = self.stream.current if token.type == 'name': if token.value in ('true', 'false', 'True', 'False'): @@ -42,23 +47,29 @@ class BooleanParser(Parser): class Tags(object): def __init__(self, tags=None): + # type: (List[unicode]) -> None self.tags = dict.fromkeys(tags or [], True) def has(self, tag): + # type: (unicode) -> bool return tag in self.tags __contains__ = has def __iter__(self): + # type: () -> Iterator[unicode] return iter(self.tags) def add(self, tag): + # type: (unicode) -> None self.tags[tag] = True def remove(self, tag): + # type: (unicode) -> None self.tags.pop(tag, None) def eval_condition(self, condition): + # type: (unicode) -> bool # exceptions are handled by the caller parser = BooleanParser(env, condition, state='variable') expr = parser.parse_expression() @@ -66,6 +77,7 @@ class Tags(object): raise ValueError('chunk after expression') def eval_node(node): + # type: (nodes.Node) -> bool if isinstance(node, nodes.CondExpr): if eval_node(node.test): return eval_node(node.expr1) diff --git a/sphinx/util/template.py b/sphinx/util/template.py index f6db8034b..c742bbd17 100644 --- a/sphinx/util/template.py +++ b/sphinx/util/template.py @@ -16,27 +16,36 @@ from sphinx import package_dir from sphinx.jinja2glue import SphinxFileSystemLoader from sphinx.locale import get_translator +if False: + # For type annotation + from jinja2.loaders import BaseLoader # NOQA + class BaseRenderer(object): def __init__(self, loader=None): + # type: (BaseLoader) -> None self.env = SandboxedEnvironment(loader=loader, extensions=['jinja2.ext.i18n']) self.env.filters['repr'] = repr self.env.install_gettext_translations(get_translator()) def render(self, template_name, context): + # type: (unicode, Dict) -> unicode return self.env.get_template(template_name).render(context) def render_string(self, source, context): + # type: (unicode, Dict) -> unicode return self.env.from_string(source).render(context) class FileRenderer(BaseRenderer): def __init__(self, search_path): + # type: (unicode) -> None loader = SphinxFileSystemLoader(search_path) super(FileRenderer, self).__init__(loader) @classmethod def render_from_file(cls, filename, context): + # type: (unicode, Dict) -> unicode dirname = os.path.dirname(filename) basename = os.path.basename(filename) return cls(dirname).render(basename, context) @@ -44,17 +53,20 @@ class FileRenderer(BaseRenderer): class SphinxRenderer(FileRenderer): def __init__(self, template_path=None): + # type: (unicode) -> None if template_path is None: template_path = os.path.join(package_dir, 'templates') super(SphinxRenderer, self).__init__(template_path) @classmethod def render_from_file(cls, filename, context): + # type: (unicode, Dict) -> unicode return FileRenderer.render_from_file(filename, context) class LaTeXRenderer(SphinxRenderer): def __init__(self): + # type: () -> None template_path = os.path.join(package_dir, 'templates', 'latex') super(LaTeXRenderer, self).__init__(template_path) diff --git a/sphinx/util/texescape.py b/sphinx/util/texescape.py index 60f3d8322..2a6d7233d 100644 --- a/sphinx/util/texescape.py +++ b/sphinx/util/texescape.py @@ -126,6 +126,7 @@ tex_hl_escape_map_new = {} def init(): + # type: () -> None for a, b in tex_replacements: tex_escape_map[ord(a)] = b tex_replace_map[ord(a)] = '_' diff --git a/sphinx/util/websupport.py b/sphinx/util/websupport.py index f91cca97a..a416fa48c 100644 --- a/sphinx/util/websupport.py +++ b/sphinx/util/websupport.py @@ -7,7 +7,12 @@ :license: BSD, see LICENSE for details. """ +if False: + # For type annotation + from docutils import nodes # NOQA + def is_commentable(node): + # type: (nodes.Node) -> bool # return node.__class__.__name__ in ('paragraph', 'literal_block') return node.__class__.__name__ == 'paragraph' diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 7b27eb3ab..8292367d8 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -2472,7 +2472,7 @@ class LaTeXTranslator(nodes.NodeVisitor): # type: (nodes.Node) -> None text = self.encode(node.astext()) if not self.no_contractions and not self.in_parsed_literal: - text = educate_quotes_latex(text) + text = educate_quotes_latex(text) # type: ignore self.body.append(text) def depart_Text(self, node): From c106c2f1f051baa0dc6af6bf568dcf64b33b4013 Mon Sep 17 00:00:00 2001 From: Colin Marquardt <github@marquardt-home.de> Date: Wed, 8 Feb 2017 11:10:37 +0100 Subject: [PATCH 175/190] Fix link to Google Python Style. Small typofixes. --- doc/develop.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/develop.rst b/doc/develop.rst index f2f336cfa..2f14c945f 100644 --- a/doc/develop.rst +++ b/doc/develop.rst @@ -55,17 +55,17 @@ This is the current list of contributed extensions in that repository: - hyphenator: client-side hyphenation of HTML using hyphenator_ - inlinesyntaxhighlight_: inline syntax highlighting - lassodomain: a domain for documenting Lasso_ source code -- libreoffice: an extension to include any drawing supported by LibreOffice (e.g. odg, vsd...). +- libreoffice: an extension to include any drawing supported by LibreOffice (e.g. odg, vsd, ...). - lilypond: an extension inserting music scripts from Lilypond_ in PNG format. - makedomain_: a domain for `GNU Make`_ - matlabdomain: document MATLAB_ code. - mockautodoc: mock imports. - mscgen: embed mscgen-formatted MSC (Message Sequence Chart)s. - napoleon: supports `Google style`_ and `NumPy style`_ docstrings. -- nicoviceo: embed videos from nicovideo +- nicovideo: embed videos from nicovideo - nwdiag: embed network diagrams by using nwdiag_ - omegat: support tools to collaborate with OmegaT_ (Sphinx 1.1 needed) -- osaka: convert standard Japanese doc to Osaka dialect (it is joke extension) +- osaka: convert standard Japanese doc to Osaka dialect (this is a joke extension) - paverutils: an alternate integration of Sphinx with Paver_. - phpdomain: an extension for PHP support - plantuml: embed UML diagram by using PlantUML_ @@ -113,7 +113,7 @@ own extensions. .. _Google Analytics: http://www.google.com/analytics/ .. _Google Chart: https://developers.google.com/chart/ .. _Google Maps: https://www.google.com/maps -.. _Google style: http://google-styleguide.googlecode.com/svn/trunk/pyguide.html +.. _Google style: https://google.github.io/styleguide/pyguide.html .. _NumPy style: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt .. _hyphenator: https://github.com/mnater/hyphenator .. _exceltable: http://pythonhosted.org/sphinxcontrib-exceltable/ From 2968ef3c2656c0ba3a81c28e430a44cf67921414 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Thu, 9 Feb 2017 12:55:07 +0900 Subject: [PATCH 176/190] Fix #3402: Allow to suppress "download file not readable" warnings using ``suppress_warnings`` --- CHANGES | 2 ++ doc/config.rst | 2 ++ sphinx/environment/collectors/asset.py | 6 +++--- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/CHANGES b/CHANGES index c3f6e2852..05bf86137 100644 --- a/CHANGES +++ b/CHANGES @@ -46,6 +46,8 @@ Features added * #3303: Add ``:pyversion:`` option to the doctest directive. * #3378: (latex) support for ``:widths:`` option of table directives (refs: #3379, #3381) +* #3402: Allow to suppress "download file not readable" warnings using + :confval:`suppress_warnings`. Bugs fixed ---------- diff --git a/doc/config.rst b/doc/config.rst index c082dba2f..9b1079f47 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -226,8 +226,10 @@ General configuration * app.add_role * app.add_generic_role * app.add_source_parser + * download.not_readable * image.data_uri * image.nonlocal_uri + * image.not_readable * ref.term * ref.ref * ref.numref diff --git a/sphinx/environment/collectors/asset.py b/sphinx/environment/collectors/asset.py index 54283790c..38deeeca4 100644 --- a/sphinx/environment/collectors/asset.py +++ b/sphinx/environment/collectors/asset.py @@ -96,7 +96,7 @@ class ImageCollector(EnvironmentCollector): app.env.dependencies[docname].add(imgpath) if not os.access(path.join(app.srcdir, imgpath), os.R_OK): logger.warning('image file not readable: %s' % imgpath, - location=node) + location=node, type='image', subtype='not_readable') continue app.env.images.add_file(docname, imgpath) @@ -112,7 +112,7 @@ class ImageCollector(EnvironmentCollector): globbed.setdefault(mimetype, []).append(new_imgpath) except (OSError, IOError) as err: logger.warning('image file %s not readable: %s' % (filename, err), - location=node) + location=node, type='image', subtype='not_readable') for key, files in iteritems(globbed): candidates[key] = sorted(files, key=len)[0] # select by similarity @@ -137,7 +137,7 @@ class DownloadFileCollector(EnvironmentCollector): app.env.dependencies[app.env.docname].add(rel_filename) if not os.access(filename, os.R_OK): logger.warning('download file not readable: %s' % filename, - location=node) + location=node, type='download', subtype='not_readable') continue node['filename'] = app.env.dlfiles.add_file(app.env.docname, filename) From 82e011cae615ea9bb716ff0a1c2f1482e557f275 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Tue, 7 Feb 2017 23:43:56 +0900 Subject: [PATCH 177/190] Refactor: split .rst file for text-latex-table --- tests/roots/test-latex-table/index.rst | 237 +-------------------- tests/roots/test-latex-table/longtable.rst | 117 ++++++++++ tests/roots/test-latex-table/tabular.rst | 123 +++++++++++ tests/test_build_latex.py | 8 +- 4 files changed, 247 insertions(+), 238 deletions(-) create mode 100644 tests/roots/test-latex-table/longtable.rst create mode 100644 tests/roots/test-latex-table/tabular.rst diff --git a/tests/roots/test-latex-table/index.rst b/tests/roots/test-latex-table/index.rst index 129d024a0..a0003b740 100644 --- a/tests/roots/test-latex-table/index.rst +++ b/tests/roots/test-latex-table/index.rst @@ -1,238 +1,7 @@ test-latex-table ================ -simple table ------------- +.. toctree:: -======= ======= -header1 header2 -======= ======= -cell1-1 cell1-2 -cell2-1 cell2-2 -cell3-1 cell3-2 -======= ======= - -grid table ----------- - -+---------+---------+---------+ -| header1 | header2 | header3 | -+=========+=========+=========+ -| cell1-1 | cell1-2 | cell1-3 | -+---------+ +---------+ -| cell2-1 | | cell2-2 | -+ +---------+---------+ -| | cell3-2 | -+---------+ | -| cell4-1 | | -+---------+---------+---------+ -| cell5-1 | -+---------+---------+---------+ - -table having :widths: option ----------------------------- - -.. table:: - :widths: 30,70 - - ======= ======= - header1 header2 - ======= ======= - cell1-1 cell1-2 - cell2-1 cell2-2 - cell3-1 cell3-2 - ======= ======= - -table with tabularcolumn ------------------------- - -.. tabularcolumns:: |c|c| - -======= ======= -header1 header2 -======= ======= -cell1-1 cell1-2 -cell2-1 cell2-2 -cell3-1 cell3-2 -======= ======= - -table having caption --------------------- - -.. list-table:: caption for table - :header-rows: 1 - - * - header1 - - header2 - * - cell1-1 - - cell1-2 - * - cell2-1 - - cell2-2 - * - cell3-1 - - cell3-2 - -table having verbatim ---------------------- - -.. list-table:: - :header-rows: 1 - - * - header1 - - header2 - * - :: - - hello world - - - cell1-2 - * - cell2-1 - - cell2-2 - * - cell3-1 - - cell3-2 - -table having both :widths: and problematic cell ------------------------------------------------ - -.. list-table:: - :header-rows: 1 - :widths: 30,70 - - * - header1 - - header2 - * - + item1 - + item2 - - cell1-2 - * - cell2-1 - - cell2-2 - * - cell3-1 - - cell3-2 - -table having problematic cell ------------------------------ - -.. list-table:: - :header-rows: 1 - - * - header1 - - header2 - * - + item1 - + item2 - - cell1-2 - * - cell2-1 - - cell2-2 - * - cell3-1 - - cell3-2 - -longtable ---------- - -.. table:: - :class: longtable - - ======= ======= - header1 header2 - ======= ======= - cell1-1 cell1-2 - cell2-1 cell2-2 - cell3-1 cell3-2 - ======= ======= - -longtable having :widths: option --------------------------------- - -.. table:: - :class: longtable - :widths: 30,70 - - ======= ======= - header1 header2 - ======= ======= - cell1-1 cell1-2 - cell2-1 cell2-2 - cell3-1 cell3-2 - ======= ======= - -longtable with tabularcolumn ----------------------------- - -.. tabularcolumns:: |c|c| - -.. table:: - :class: longtable - - ======= ======= - header1 header2 - ======= ======= - cell1-1 cell1-2 - cell2-1 cell2-2 - cell3-1 cell3-2 - ======= ======= - -longtable having caption ------------------------- - -.. list-table:: caption for longtable - :class: longtable - :header-rows: 1 - - * - header1 - - header2 - * - cell1-1 - - cell1-2 - * - cell2-1 - - cell2-2 - * - cell3-1 - - cell3-2 - -longtable having verbatim -------------------------- - -.. list-table:: - :class: longtable - :header-rows: 1 - - * - header1 - - header2 - * - :: - - hello world - - - cell1-2 - * - cell2-1 - - cell2-2 - * - cell3-1 - - cell3-2 - -longtable having both :widths: and problematic cell ---------------------------------------------------- - -.. list-table:: - :class: longtable - :header-rows: 1 - :widths: 30,70 - - * - header1 - - header2 - * - + item1 - + item2 - - cell1-2 - * - cell2-1 - - cell2-2 - * - cell3-1 - - cell3-2 - -longtable having problematic cell ---------------------------------- - -.. list-table:: - :class: longtable - :header-rows: 1 - - * - header1 - - header2 - * - + item1 - + item2 - - cell1-2 - * - cell2-1 - - cell2-2 - * - cell3-1 - - cell3-2 + tabular + longtable diff --git a/tests/roots/test-latex-table/longtable.rst b/tests/roots/test-latex-table/longtable.rst new file mode 100644 index 000000000..333c5b544 --- /dev/null +++ b/tests/roots/test-latex-table/longtable.rst @@ -0,0 +1,117 @@ +longtables +========== + +longtable +--------- + +.. table:: + :class: longtable + + ======= ======= + header1 header2 + ======= ======= + cell1-1 cell1-2 + cell2-1 cell2-2 + cell3-1 cell3-2 + ======= ======= + +longtable having :widths: option +-------------------------------- + +.. table:: + :class: longtable + :widths: 30,70 + + ======= ======= + header1 header2 + ======= ======= + cell1-1 cell1-2 + cell2-1 cell2-2 + cell3-1 cell3-2 + ======= ======= + +longtable with tabularcolumn +---------------------------- + +.. tabularcolumns:: |c|c| + +.. table:: + :class: longtable + + ======= ======= + header1 header2 + ======= ======= + cell1-1 cell1-2 + cell2-1 cell2-2 + cell3-1 cell3-2 + ======= ======= + +longtable having caption +------------------------ + +.. list-table:: caption for longtable + :class: longtable + :header-rows: 1 + + * - header1 + - header2 + * - cell1-1 + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 + +longtable having verbatim +------------------------- + +.. list-table:: + :class: longtable + :header-rows: 1 + + * - header1 + - header2 + * - :: + + hello world + + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 + +longtable having both :widths: and problematic cell +--------------------------------------------------- + +.. list-table:: + :class: longtable + :header-rows: 1 + :widths: 30,70 + + * - header1 + - header2 + * - + item1 + + item2 + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 + +longtable having problematic cell +--------------------------------- + +.. list-table:: + :class: longtable + :header-rows: 1 + + * - header1 + - header2 + * - + item1 + + item2 + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 diff --git a/tests/roots/test-latex-table/tabular.rst b/tests/roots/test-latex-table/tabular.rst new file mode 100644 index 000000000..f7fdaf44a --- /dev/null +++ b/tests/roots/test-latex-table/tabular.rst @@ -0,0 +1,123 @@ +taburar and taburary +==================== + +simple table +------------ + +======= ======= +header1 header2 +======= ======= +cell1-1 cell1-2 +cell2-1 cell2-2 +cell3-1 cell3-2 +======= ======= + +grid table +---------- + ++---------+---------+---------+ +| header1 | header2 | header3 | ++=========+=========+=========+ +| cell1-1 | cell1-2 | cell1-3 | ++---------+ +---------+ +| cell2-1 | | cell2-2 | ++ +---------+---------+ +| | cell3-2 | ++---------+ | +| cell4-1 | | ++---------+---------+---------+ +| cell5-1 | ++---------+---------+---------+ + +table having :widths: option +---------------------------- + +.. table:: + :widths: 30,70 + + ======= ======= + header1 header2 + ======= ======= + cell1-1 cell1-2 + cell2-1 cell2-2 + cell3-1 cell3-2 + ======= ======= + +table with tabularcolumn +------------------------ + +.. tabularcolumns:: |c|c| + +======= ======= +header1 header2 +======= ======= +cell1-1 cell1-2 +cell2-1 cell2-2 +cell3-1 cell3-2 +======= ======= + +table having caption +-------------------- + +.. list-table:: caption for table + :header-rows: 1 + + * - header1 + - header2 + * - cell1-1 + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 + +table having verbatim +--------------------- + +.. list-table:: + :header-rows: 1 + + * - header1 + - header2 + * - :: + + hello world + + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 + +table having both :widths: and problematic cell +----------------------------------------------- + +.. list-table:: + :header-rows: 1 + :widths: 30,70 + + * - header1 + - header2 + * - + item1 + + item2 + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 + +table having problematic cell +----------------------------- + +.. list-table:: + :header-rows: 1 + + * - header1 + - header2 + * - + item1 + + item2 + - cell1-2 + * - cell2-1 + - cell2-2 + * - cell3-1 + - cell3-2 diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py index 17fa61957..30ca73eda 100644 --- a/tests/test_build_latex.py +++ b/tests/test_build_latex.py @@ -824,9 +824,9 @@ def test_latex_table(app, status, warning): app.builder.build_all() result = (app.outdir / 'test.tex').text(encoding='utf8') tables = {} - for chap in re.split(r'\\chapter(?={.*})', result)[1:]: + for chap in re.split(r'\\section{', result)[1:]: sectname, content = chap.split('}', 1) - tables[sectname[1:]] = content.strip() + tables[sectname] = content.strip() # simple_table table = tables['simple table'] @@ -874,7 +874,7 @@ def test_latex_table(app, status, warning): # table having caption table = tables['table having caption'] assert ('\\begin{threeparttable}\n\\capstart\\caption{caption for table}' - '\\label{\\detokenize{index:id1}}' in table) + '\\label{\\detokenize{tabular:id1}}' in table) assert ('\\noindent\\begin{tabulary}{\\linewidth}{|L|L|}' in table) assert ('\\hline\n' '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' @@ -929,7 +929,7 @@ def test_latex_table(app, status, warning): # longtable having caption table = tables['longtable having caption'] assert ('\\begin{longtable}{|l|l|}\n\\caption{caption for longtable}' - '\\label{\\detokenize{index:id2}}\\\\\n\\hline' in table) + '\\label{\\detokenize{longtable:id1}}\\\\\n\\hline' in table) # longtable having verbatim table = tables['longtable having verbatim'] From da6f4c00199ac39990bffaf40e00057996d94455 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Fri, 10 Feb 2017 15:53:42 +0900 Subject: [PATCH 178/190] Add mypy annotations --- sphinx/__init__.py | 9 +- sphinx/builders/html.py | 2 +- sphinx/builders/latex.py | 2 +- sphinx/domains/__init__.py | 6 +- sphinx/domains/std.py | 1 + sphinx/environment/__init__.py | 3 +- sphinx/environment/adapters/indexentries.py | 3 + sphinx/environment/adapters/toctree.py | 5 +- sphinx/ext/autodoc.py | 3 + sphinx/ext/doctest.py | 1 + sphinx/highlighting.py | 22 ++- sphinx/jinja2glue.py | 1 + sphinx/locale/__init__.py | 45 ++++++- sphinx/quickstart.py | 30 ++++- sphinx/roles.py | 33 ++++- sphinx/transforms/i18n.py | 1 + sphinx/util/nodes.py | 4 +- sphinx/writers/html.py | 142 +++++++++++++++++++- sphinx/writers/latex.py | 1 + sphinx/writers/manpage.py | 87 ++++++++++++ sphinx/writers/texinfo.py | 2 + sphinx/writers/text.py | 1 + 22 files changed, 368 insertions(+), 36 deletions(-) diff --git a/sphinx/__init__.py b/sphinx/__init__.py index 0c931cd4d..7823cc9de 100644 --- a/sphinx/__init__.py +++ b/sphinx/__init__.py @@ -60,6 +60,7 @@ if __version__.endswith('+'): def main(argv=sys.argv): + # type: (List[str]) -> None if sys.argv[1:2] == ['-M']: sys.exit(make_main(argv)) else: @@ -67,6 +68,7 @@ def main(argv=sys.argv): def build_main(argv=sys.argv): + # type: (List[str]) -> int """Sphinx build "main" command-line entry.""" if (sys.version_info[:3] < (2, 7, 0) or (3, 0, 0) <= sys.version_info[:3] < (3, 4, 0)): @@ -104,14 +106,15 @@ def build_main(argv=sys.argv): sys.stderr.write('Error: Sphinx requires at least Docutils 0.10 to ' 'run.\n') return 1 - return cmdline.main(argv) + return cmdline.main(argv) # type: ignore def make_main(argv=sys.argv): + # type: (List[str]) -> int """Sphinx build "make mode" entry.""" from sphinx import make_mode - return make_mode.run_make_mode(argv[2:]) + return make_mode.run_make_mode(argv[2:]) # type: ignore if __name__ == '__main__': - sys.exit(main(sys.argv)) + sys.exit(main(sys.argv)) # type: ignore diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index 027c06205..7914d41bd 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -621,7 +621,7 @@ class StandaloneHTMLBuilder(Builder): ensuredir(path.join(self.outdir, '_static')) # first, create pygments style file with open(path.join(self.outdir, '_static', 'pygments.css'), 'w') as f: - f.write(self.highlighter.get_stylesheet()) + f.write(self.highlighter.get_stylesheet()) # type: ignore # then, copy translations JavaScript file if self.config.language is not None: jsfile = self._get_translations_js() diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py index d3bc50322..493da2ccb 100644 --- a/sphinx/builders/latex.py +++ b/sphinx/builders/latex.py @@ -104,7 +104,7 @@ class LaTeXBuilder(Builder): f.write('\\NeedsTeXFormat{LaTeX2e}[1995/12/01]\n') f.write('\\ProvidesPackage{sphinxhighlight}' '[2016/05/29 stylesheet for highlighting with pygments]\n\n') - f.write(highlighter.get_stylesheet()) + f.write(highlighter.get_stylesheet()) # type: ignore def write(self, *ignored): # type: (Any) -> None diff --git a/sphinx/domains/__init__.py b/sphinx/domains/__init__.py index a0fcafcc6..f0c866afb 100644 --- a/sphinx/domains/__init__.py +++ b/sphinx/domains/__init__.py @@ -21,6 +21,7 @@ if False: # For type annotation from typing import Any, Callable, Iterable, Tuple, Type, Union # NOQA from docutils import nodes # NOQA + from docutils.parsers.rst.states import Inliner # NOQA from sphinx.builders import Builder # NOQA from sphinx.environment import BuildEnvironment # NOQA @@ -189,8 +190,8 @@ class Domain(object): return None fullname = '%s:%s' % (self.name, name) - def role_adapter(typ, rawtext, text, lineno, inliner, - options={}, content=[]): + def role_adapter(typ, rawtext, text, lineno, inliner, options={}, content=[]): + # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> nodes.Node # NOQA return self.roles[name](fullname, rawtext, text, lineno, inliner, options, content) self._role_cache[name] = role_adapter @@ -210,6 +211,7 @@ class Domain(object): class DirectiveAdapter(BaseDirective): # type: ignore def run(self): + # type: () -> List[nodes.Node] self.name = fullname return BaseDirective.run(self) self._directive_cache[name] = DirectiveAdapter diff --git a/sphinx/domains/std.py b/sphinx/domains/std.py index 1c064ee65..fbe779b10 100644 --- a/sphinx/domains/std.py +++ b/sphinx/domains/std.py @@ -895,6 +895,7 @@ class StandardDomain(Domain): # type: (nodes.Node) -> unicode """Get figure type of nodes.""" def has_child(node, cls): + # type: (nodes.Node, Type) -> bool return any(isinstance(child, cls) for child in node) if isinstance(node, nodes.section): diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py index 2140193ef..3e6e49ff3 100644 --- a/sphinx/environment/__init__.py +++ b/sphinx/environment/__init__.py @@ -746,7 +746,7 @@ class BuildEnvironment(object): @property def currmodule(self): - # type () -> None + # type: () -> None """Backwards compatible alias. Will be removed.""" logger.warning('env.currmodule is being referenced by an ' 'extension; this API will be removed in the future', @@ -1024,6 +1024,7 @@ class BuildEnvironment(object): traversed = set() def traverse_toctree(parent, docname): + # type: (unicode, unicode) -> Iterator[Tuple[unicode, unicode]] if parent == docname: logger.warning('self referenced toctree found. Ignored.', location=docname) return diff --git a/sphinx/environment/adapters/indexentries.py b/sphinx/environment/adapters/indexentries.py index 9eeb50833..d298d0973 100644 --- a/sphinx/environment/adapters/indexentries.py +++ b/sphinx/environment/adapters/indexentries.py @@ -42,6 +42,7 @@ class IndexEntries(object): new = {} # type: Dict[unicode, List] def add_entry(word, subword, main, link=True, dic=new, key=None): + # type: (unicode, unicode, unicode, bool, Dict, unicode) -> None # Force the word to be unicode if it's a ASCII bytestring. # This will solve problems with unicode normalization later. # For instance the RFC role will add bytestrings at the moment @@ -96,6 +97,7 @@ class IndexEntries(object): # sort the index entries; put all symbols at the front, even those # following the letters in ASCII, this is where the chr(127) comes from def keyfunc(entry, lcletters=string.ascii_lowercase + '_'): + # type: (Tuple[unicode, List], unicode) -> Tuple[unicode, unicode] key, (void, void, category_key) = entry if category_key: # using specified category key to sort @@ -140,6 +142,7 @@ class IndexEntries(object): # group the entries by letter def keyfunc2(item, letters=string.ascii_uppercase + '_'): + # type: (Tuple[unicode, List], unicode) -> unicode # hack: mutating the subitems dicts to a list in the keyfunc k, v = item v[1] = sorted((si, se) for (si, (se, void, void)) in iteritems(v[1])) diff --git a/sphinx/environment/adapters/toctree.py b/sphinx/environment/adapters/toctree.py index 1ab3e229f..bde8d1ac4 100644 --- a/sphinx/environment/adapters/toctree.py +++ b/sphinx/environment/adapters/toctree.py @@ -85,6 +85,7 @@ class TocTree(object): toctree_ancestors = self.get_toctree_ancestors(docname) def _toctree_add_classes(node, depth): + # type: (nodes.Node, int) -> None """Add 'toctree-l%d' and 'current' classes to the toctree.""" for subnode in node.children: if isinstance(subnode, (addnodes.compact_paragraph, @@ -114,8 +115,8 @@ class TocTree(object): subnode['iscurrent'] = True subnode = subnode.parent - def _entries_from_toctree(toctreenode, parents, - separate=False, subtree=False): + def _entries_from_toctree(toctreenode, parents, separate=False, subtree=False): + # type: (addnodes.toctree, List[nodes.Node], bool, bool) -> List[nodes.Node] """Return TOC entries for a toctree node.""" refs = [(e[0], e[1]) for e in toctreenode['entries']] entries = [] diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index 7f77d3a3f..606953811 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -975,6 +975,7 @@ class Documenter(object): tagorder = self.analyzer.tagorder def keyfunc(entry): + # type: (Tuple[Documenter, bool]) -> int fullname = entry[0].name.split('::')[1] return tagorder.get(fullname, len(tagorder)) memberdocumenters.sort(key=keyfunc) @@ -1828,7 +1829,9 @@ class testcls: """test doc string""" def __getattr__(self, x): + # type: (Any) -> Any return x def __setattr__(self, x, y): + # type: (Any, Any) -> None """Attr setter.""" diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index a918a925c..9cc1a06af 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -58,6 +58,7 @@ else: def compare_version(ver1, ver2, operand): + # type: (unicode, unicode, unicode) -> bool """Compare `ver1` to `ver2`, relying on `operand`. Some examples: diff --git a/sphinx/highlighting.py b/sphinx/highlighting.py index 493ecb7a7..8996fb4a4 100644 --- a/sphinx/highlighting.py +++ b/sphinx/highlighting.py @@ -27,6 +27,12 @@ from pygments.styles import get_style_by_name from pygments.util import ClassNotFound from sphinx.pygments_styles import SphinxStyle, NoneStyle +if False: + # For type annotation + from typing import Any # NOQA + from pygments.formatter import Formatter # NOQA + + logger = logging.getLogger(__name__) lexers = dict( @@ -59,8 +65,8 @@ class PygmentsBridge(object): html_formatter = HtmlFormatter latex_formatter = LatexFormatter - def __init__(self, dest='html', stylename='sphinx', - trim_doctest_flags=False): + def __init__(self, dest='html', stylename='sphinx', trim_doctest_flags=False): + # type: (unicode, unicode, bool) -> None self.dest = dest if stylename is None or stylename == 'sphinx': style = SphinxStyle @@ -73,7 +79,7 @@ class PygmentsBridge(object): else: style = get_style_by_name(stylename) self.trim_doctest_flags = trim_doctest_flags - self.formatter_args = {'style': style} + self.formatter_args = {'style': style} # type: Dict[unicode, Any] if dest == 'html': self.formatter = self.html_formatter else: @@ -81,10 +87,12 @@ class PygmentsBridge(object): self.formatter_args['commandprefix'] = 'PYG' def get_formatter(self, **kwargs): - kwargs.update(self.formatter_args) + # type: (Any) -> Formatter + kwargs.update(self.formatter_args) # type: ignore return self.formatter(**kwargs) def unhighlighted(self, source): + # type: (unicode) -> unicode if self.dest == 'html': return '<pre>' + htmlescape(source) + '</pre>\n' else: @@ -96,6 +104,7 @@ class PygmentsBridge(object): source + '\\end{Verbatim}\n' def highlight_block(self, source, lang, opts=None, location=None, force=False, **kwargs): + # type: (unicode, unicode, Any, Any, bool, Any) -> unicode if not isinstance(source, text_type): source = source.decode() @@ -131,8 +140,8 @@ class PygmentsBridge(object): # trim doctest options if wanted if isinstance(lexer, PythonConsoleLexer) and self.trim_doctest_flags: - source = doctest.blankline_re.sub('', source) - source = doctest.doctestopt_re.sub('', source) + source = doctest.blankline_re.sub('', source) # type: ignore + source = doctest.doctestopt_re.sub('', source) # type: ignore # highlight via Pygments formatter = self.get_formatter(**kwargs) @@ -157,6 +166,7 @@ class PygmentsBridge(object): return hlsource.translate(tex_hl_escape_map_new) def get_stylesheet(self): + # type: () -> unicode formatter = self.get_formatter() if self.dest == 'html': return formatter.get_style_defs('.highlight') diff --git a/sphinx/jinja2glue.py b/sphinx/jinja2glue.py index 6ebb1353f..f879771e7 100644 --- a/sphinx/jinja2glue.py +++ b/sphinx/jinja2glue.py @@ -113,6 +113,7 @@ class SphinxFileSystemLoader(FileSystemLoader): mtime = path.getmtime(filename) def uptodate(): + # type: () -> bool try: return path.getmtime(filename) == mtime except OSError: diff --git a/sphinx/locale/__init__.py b/sphinx/locale/__init__.py index 135b76b9d..f814b665c 100644 --- a/sphinx/locale/__init__.py +++ b/sphinx/locale/__init__.py @@ -16,7 +16,7 @@ from six.moves import UserString if False: # For type annotation - from typing import Any, Tuple # NOQA + from typing import Any, Callable, Iterator, Tuple # NOQA class _TranslationProxy(UserString, object): @@ -35,24 +35,31 @@ class _TranslationProxy(UserString, object): __slots__ = ('_func', '_args') def __new__(cls, func, *args): + # type: (Callable, unicode) -> object if not args: # not called with "function" and "arguments", but a plain string return text_type(func) - return object.__new__(cls) + return object.__new__(cls) # type: ignore def __getnewargs__(self): - return (self._func,) + self._args + # type: () -> Tuple + return (self._func,) + self._args # type: ignore def __init__(self, func, *args): + # type: (Callable, unicode) -> None self._func = func self._args = args - data = property(lambda x: x._func(*x._args)) + @property + def data(self): + # type: () -> unicode + return self._func(*self._args) # replace function from UserString; it instantiates a self.__class__ # for the encoding result def encode(self, encoding=None, errors=None): + # type: (unicode, unicode) -> str if encoding: if errors: return self.data.encode(encoding, errors) @@ -62,81 +69,106 @@ class _TranslationProxy(UserString, object): return self.data.encode() def __contains__(self, key): + # type: (Any) -> bool return key in self.data def __bool__(self): + # type: () -> bool return bool(self.data) __nonzero__ = __bool__ # for python2 compatibility def __dir__(self): + # type: () -> List[str] return dir(text_type) def __iter__(self): + # type: () -> Iterator[unicode] return iter(self.data) def __len__(self): + # type: () -> int return len(self.data) def __str__(self): + # type: () -> str return str(self.data) def __unicode__(self): + # type: () -> unicode return text_type(self.data) def __add__(self, other): + # type: (unicode) -> unicode return self.data + other def __radd__(self, other): + # type: (unicode) -> unicode return other + self.data def __mod__(self, other): + # type: (unicode) -> unicode return self.data % other def __rmod__(self, other): + # type: (unicode) -> unicode return other % self.data def __mul__(self, other): + # type: (Any) -> unicode return self.data * other def __rmul__(self, other): + # type: (Any) -> unicode return other * self.data def __lt__(self, other): + # type: (unicode) -> bool return self.data < other def __le__(self, other): + # type: (unicode) -> bool return self.data <= other def __eq__(self, other): + # type: (Any) -> bool return self.data == other def __ne__(self, other): + # type: (Any) -> bool return self.data != other def __gt__(self, other): + # type: (unicode) -> bool return self.data > other def __ge__(self, other): + # type: (unicode) -> bool return self.data >= other def __getattr__(self, name): + # type: (unicode) -> Any if name == '__members__': return self.__dir__() return getattr(self.data, name) def __getstate__(self): + # type: () -> Tuple[Callable, Tuple[unicode, ...]] return self._func, self._args def __setstate__(self, tup): + # type: (Tuple[Callable, Tuple[unicode]]) -> None self._func, self._args = tup def __getitem__(self, key): + # type: (Any) -> unicode return self.data[key] def __copy__(self): + # type: () -> _TranslationProxy return self def __repr__(self): + # type: () -> str try: return 'i' + repr(text_type(self.data)) except: @@ -173,13 +205,13 @@ admonitionlabels = { 'seealso': l_('See also'), 'tip': l_('Tip'), 'warning': l_('Warning'), -} +} # type: Dict[unicode, unicode] versionlabels = { 'versionadded': l_('New in version %s'), 'versionchanged': l_('Changed in version %s'), 'deprecated': l_('Deprecated since version %s'), -} +} # type: Dict[unicode, unicode] # XXX Python specific pairindextypes = { @@ -242,6 +274,7 @@ def init(locale_dirs, language, catalog='sphinx'): def get_translator(catalog='sphinx'): + # type: (unicode) -> gettext.NullTranslations global translators translator = translators.get(catalog) if translator is None: diff --git a/sphinx/quickstart.py b/sphinx/quickstart.py index 1d128b878..c599f079a 100644 --- a/sphinx/quickstart.py +++ b/sphinx/quickstart.py @@ -42,6 +42,10 @@ from sphinx.util.console import ( # type: ignore from sphinx.util.template import SphinxRenderer from sphinx.util import texescape +if False: + # For type annotation + from typing import Any, Callable, Pattern # NOQA + TERM_ENCODING = getattr(sys.stdin, 'encoding', None) DEFAULT_VALUE = { @@ -66,6 +70,7 @@ PROMPT_PREFIX = '> ' def mkdir_p(dir): + # type: (unicode) -> None if path.isdir(dir): return os.makedirs(dir) @@ -73,6 +78,7 @@ def mkdir_p(dir): # function to get input from terminal -- overridden by the test suite def term_input(prompt): + # type: (unicode) -> unicode print(prompt, end='') return input('') @@ -82,6 +88,7 @@ class ValidationError(Exception): def is_path(x): + # type: (unicode) -> unicode x = path.expanduser(x) if path.exists(x) and not path.isdir(x): raise ValidationError("Please enter a valid path name.") @@ -89,30 +96,36 @@ def is_path(x): def allow_empty(x): + # type: (unicode) -> unicode return x def nonempty(x): + # type: (unicode) -> unicode if not x: raise ValidationError("Please enter some text.") return x def choice(*l): + # type: (List[unicode]) -> Callable[[unicode], unicode] def val(x): + # type: (unicode) -> unicode if x not in l: - raise ValidationError('Please enter one of %s.' % ', '.join(l)) + raise ValidationError('Please enter one of %s.' % ', '.join(l)) # type: ignore return x return val def boolean(x): + # type: (unicode) -> bool if x.upper() not in ('Y', 'YES', 'N', 'NO'): raise ValidationError("Please enter either 'y' or 'n'.") return x.upper() in ('Y', 'YES') def suffix(x): + # type: (unicode) -> unicode if not (x[0:1] == '.' and len(x) > 1): raise ValidationError("Please enter a file suffix, " "e.g. '.rst' or '.txt'.") @@ -120,10 +133,12 @@ def suffix(x): def ok(x): + # type: (unicode) -> unicode return x def term_decode(text): + # type: (unicode) -> unicode if isinstance(text, text_type): return text @@ -145,9 +160,10 @@ def term_decode(text): def do_prompt(d, key, text, default=None, validator=nonempty): + # type: (Dict, unicode, unicode, unicode, Callable[[unicode], Any]) -> None while True: if default is not None: - prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default) + prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default) # type: unicode else: prompt = PROMPT_PREFIX + text + ': ' if PY2: @@ -179,6 +195,7 @@ def do_prompt(d, key, text, default=None, validator=nonempty): def convert_python_source(source, rex=re.compile(r"[uU]('.*?')")): + # type: (unicode, Pattern) -> unicode # remove Unicode literal prefixes if PY3: return rex.sub('\\1', source) @@ -188,10 +205,12 @@ def convert_python_source(source, rex=re.compile(r"[uU]('.*?')")): class QuickstartRenderer(SphinxRenderer): def __init__(self, templatedir): + # type: (unicode) -> None self.templatedir = templatedir or '' super(QuickstartRenderer, self).__init__() def render(self, template_name, context): + # type: (unicode, Dict) -> unicode user_template = path.join(self.templatedir, path.basename(template_name)) if self.templatedir and path.exists(user_template): return self.render_from_file(user_template, context) @@ -200,6 +219,7 @@ class QuickstartRenderer(SphinxRenderer): def ask_user(d): + # type: (Dict) -> None """Ask the user for quickstart values missing from *d*. Values are: @@ -375,6 +395,7 @@ directly.''') def generate(d, overwrite=True, silent=False, templatedir=None): + # type: (Dict, bool, bool, unicode) -> None """Generate project based on values in *d*.""" template = QuickstartRenderer(templatedir=templatedir) @@ -432,6 +453,7 @@ def generate(d, overwrite=True, silent=False, templatedir=None): mkdir_p(path.join(srcdir, d['dot'] + 'static')) def write_file(fpath, content, newline=None): + # type: (unicode, unicode, unicode) -> None if overwrite or not path.isfile(fpath): print('Creating file %s.' % fpath) with open(fpath, 'wt', encoding='utf-8', newline=newline) as f: @@ -488,6 +510,7 @@ where "builder" is one of the supported builders, e.g. html, latex or linkcheck. def usage(argv, msg=None): + # type: (List[unicode], unicode) -> None if msg: print(msg, file=sys.stderr) print(file=sys.stderr) @@ -504,6 +527,7 @@ For more information, visit <http://sphinx-doc.org/>. def valid_dir(d): + # type: (Dict) -> bool dir = d['path'] if not path.exists(dir): return True @@ -534,6 +558,7 @@ def valid_dir(d): class MyFormatter(optparse.IndentedHelpFormatter): def format_usage(self, usage): + # type: (str) -> str return usage def format_help(self, formatter): @@ -546,6 +571,7 @@ class MyFormatter(optparse.IndentedHelpFormatter): def main(argv=sys.argv): + # type: (List[str]) -> int if not color_terminal(): nocolor() diff --git a/sphinx/roles.py b/sphinx/roles.py index 32894a8e7..5dd815547 100644 --- a/sphinx/roles.py +++ b/sphinx/roles.py @@ -21,6 +21,13 @@ from sphinx.util import ws_re from sphinx.util.nodes import split_explicit_title, process_index_entry, \ set_role_source_info +if False: + # For type annotation + from typing import Any, Tuple, Type # NOQA + from docutils.parsers.rst.states import Inliner # NOQA + from sphinx.application import Sphinx # NOQA + from sphinx.environment import BuildEnvironment # NOQA + generic_docroles = { 'command': addnodes.literal_strong, @@ -67,6 +74,7 @@ class XRefRole(object): def __init__(self, fix_parens=False, lowercase=False, nodeclass=None, innernodeclass=None, warn_dangling=False): + # type: (bool, bool, Type[nodes.Node], Type[nodes.Node], bool) -> None self.fix_parens = fix_parens self.lowercase = lowercase self.warn_dangling = warn_dangling @@ -76,6 +84,7 @@ class XRefRole(object): self.innernodeclass = innernodeclass def _fix_parens(self, env, has_explicit_title, title, target): + # type: (BuildEnvironment, bool, unicode, unicode) -> Tuple[unicode, unicode] if not has_explicit_title: if title.endswith('()'): # remove parentheses @@ -90,6 +99,7 @@ class XRefRole(object): def __call__(self, typ, rawtext, text, lineno, inliner, options={}, content=[]): + # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA env = inliner.document.settings.env if not typ: typ = env.temp_data.get('default_role') @@ -100,7 +110,7 @@ class XRefRole(object): else: typ = typ.lower() if ':' not in typ: - domain, role = '', typ + domain, role = '', typ # type: unicode, unicode classes = ['xref', role] else: domain, role = typ.split(':', 1) @@ -127,7 +137,7 @@ class XRefRole(object): refnode = self.nodeclass(rawtext, reftype=role, refdomain=domain, refexplicit=has_explicit_title) # we may need the line number for warnings - set_role_source_info(inliner, lineno, refnode) + set_role_source_info(inliner, lineno, refnode) # type: ignore title, target = self.process_link( env, refnode, has_explicit_title, title, target) # now that the target and title are finally determined, set them @@ -142,6 +152,7 @@ class XRefRole(object): # methods that can be overwritten def process_link(self, env, refnode, has_explicit_title, title, target): + # type: (BuildEnvironment, nodes.reference, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA """Called after parsing title and target text, and creating the reference node (given in *refnode*). This method can alter the reference node and must return a new (or the same) ``(title, target)`` @@ -150,6 +161,7 @@ class XRefRole(object): return title, ws_re.sub(' ', target) def result_nodes(self, document, env, node, is_ref): + # type: (nodes.document, BuildEnvironment, nodes.Node, bool) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA """Called before returning the finished nodes. *node* is the reference node if one was created (*is_ref* is then true), else the content node. This method can add other nodes and must return a ``(nodes, messages)`` @@ -160,6 +172,7 @@ class XRefRole(object): class AnyXRefRole(XRefRole): def process_link(self, env, refnode, has_explicit_title, title, target): + # type: (BuildEnvironment, nodes.reference, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA result = XRefRole.process_link(self, env, refnode, has_explicit_title, title, target) # add all possible context info (i.e. std:program, py:module etc.) @@ -169,13 +182,14 @@ class AnyXRefRole(XRefRole): def indexmarkup_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): + # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA """Role for PEP/RFC references that generate an index entry.""" env = inliner.document.settings.env if not typ: typ = env.config.default_role else: typ = typ.lower() - has_explicit_title, title, target = split_explicit_title(text) # type: bool, unicode, unicode # NOQA + has_explicit_title, title, target = split_explicit_title(text) title = utils.unescape(title) target = utils.unescape(target) targetid = 'index-%s' % env.new_serialno('index') @@ -233,10 +247,11 @@ _amp_re = re.compile(r'(?<!&)&(?![&\s])') def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): + # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA text = utils.unescape(text) if typ == 'menuselection': text = text.replace('-->', u'\N{TRIANGULAR BULLET}') - spans = _amp_re.split(text) + spans = _amp_re.split(text) # type: ignore node = nodes.inline(rawtext=rawtext) for i, span in enumerate(spans): @@ -263,10 +278,11 @@ _litvar_re = re.compile('{([^}]+)}') def emph_literal_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): + # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA text = utils.unescape(text) pos = 0 retnode = nodes.literal(role=typ.lower(), classes=[typ]) - for m in _litvar_re.finditer(text): + for m in _litvar_re.finditer(text): # type: ignore if m.start() > pos: txt = text[pos:m.start()] retnode += nodes.Text(txt, txt) @@ -281,8 +297,9 @@ _abbr_re = re.compile('\((.*)\)$', re.S) def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): + # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA text = utils.unescape(text) - m = _abbr_re.search(text) + m = _abbr_re.search(text) # type: ignore if m is None: return [addnodes.abbreviation(text, text, **options)], [] abbr = text[:m.start()].strip() @@ -293,6 +310,7 @@ def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): + # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA # create new reference target env = inliner.document.settings.env targetid = 'index-%s' % env.new_serialno('index') @@ -315,7 +333,7 @@ def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): entries = [('single', target, targetid, main, None)] indexnode = addnodes.index() indexnode['entries'] = entries - set_role_source_info(inliner, lineno, indexnode) + set_role_source_info(inliner, lineno, indexnode) # type: ignore textnode = nodes.Text(title, title) return [indexnode, targetnode, textnode], [] @@ -338,6 +356,7 @@ specific_docroles = { def setup(app): + # type: (Sphinx) -> Dict[unicode, Any] from docutils.parsers.rst import roles for rolename, nodeclass in iteritems(generic_docroles): diff --git a/sphinx/transforms/i18n.py b/sphinx/transforms/i18n.py index 04bc4f8ee..bd49077d8 100644 --- a/sphinx/transforms/i18n.py +++ b/sphinx/transforms/i18n.py @@ -304,6 +304,7 @@ class Locale(Transform): # * use translated refname for section refname. # * inline reference "`Python <...>`_" has no 'refname'. def is_refnamed_ref(node): + # type: (nodes.Node) -> bool return isinstance(node, nodes.reference) and \ 'refname' in node old_refs = node.traverse(is_refnamed_ref) diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py index 44c28f6b2..f5ddd036c 100644 --- a/sphinx/util/nodes.py +++ b/sphinx/util/nodes.py @@ -239,9 +239,9 @@ def clean_astext(node): def split_explicit_title(text): - # type: (str) -> Tuple[bool, unicode, unicode] + # type: (unicode) -> Tuple[bool, unicode, unicode] """Split role content into title and target, if given.""" - match = explicit_title_re.match(text) + match = explicit_title_re.match(text) # type: ignore if match: return True, match.group(1), match.group(2) return False, text, text diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py index bc46b5b92..b76758ae7 100644 --- a/sphinx/writers/html.py +++ b/sphinx/writers/html.py @@ -24,6 +24,12 @@ from sphinx.util import logging from sphinx.util.images import get_image_size from sphinx.util.smartypants import sphinx_smarty_pants +if False: + # For type annotation + from typing import Any # NOQA + from sphinx.builders.html import StandaloneHTMLBuilder # NOQA + + logger = logging.getLogger(__name__) # A good overview of the purpose behind these classes can be found here: @@ -39,10 +45,12 @@ class HTMLWriter(Writer): _setting[2]['default'] = 0 def __init__(self, builder): + # type: (StandaloneHTMLBuilder) -> None Writer.__init__(self) self.builder = builder def translate(self): + # type: () -> None # sadly, this is mostly copied from parent class self.visitor = visitor = self.builder.translator_class(self.builder, self.document) @@ -63,6 +71,7 @@ class HTMLTranslator(BaseTranslator): """ def __init__(self, builder, *args, **kwds): + # type: (StandaloneHTMLBuilder, Any, Any) -> None BaseTranslator.__init__(self, *args, **kwds) self.highlighter = builder.highlighter self.no_smarty = 0 @@ -82,22 +91,28 @@ class HTMLTranslator(BaseTranslator): self.param_separator = '' self.optional_param_level = 0 self._table_row_index = 0 + self.required_params_left = 0 def visit_start_of_file(self, node): + # type: (nodes.Node) -> None # only occurs in the single-file builder self.docnames.append(node['docname']) self.body.append('<span id="document-%s"></span>' % node['docname']) def depart_start_of_file(self, node): + # type: (nodes.Node) -> None self.docnames.pop() def visit_desc(self, node): + # type: (nodes.Node) -> None self.body.append(self.starttag(node, 'dl', CLASS=node['objtype'])) def depart_desc(self, node): + # type: (nodes.Node) -> None self.body.append('</dl>\n\n') def visit_desc_signature(self, node): + # type: (nodes.Node) -> None # the id is set automatically self.body.append(self.starttag(node, 'dt')) # anchor for per-desc interactive data @@ -106,44 +121,56 @@ class HTMLTranslator(BaseTranslator): self.body.append('<!--[%s]-->' % node['ids'][0]) def depart_desc_signature(self, node): + # type: (nodes.Node) -> None if not node.get('is_multiline'): self.add_permalink_ref(node, _('Permalink to this definition')) self.body.append('</dt>\n') def visit_desc_signature_line(self, node): + # type: (nodes.Node) -> None pass def depart_desc_signature_line(self, node): + # type: (nodes.Node) -> None if node.get('add_permalink'): # the permalink info is on the parent desc_signature node self.add_permalink_ref(node.parent, _('Permalink to this definition')) self.body.append('<br />') def visit_desc_addname(self, node): + # type: (nodes.Node) -> None self.body.append(self.starttag(node, 'code', '', CLASS='descclassname')) def depart_desc_addname(self, node): + # type: (nodes.Node) -> None self.body.append('</code>') def visit_desc_type(self, node): + # type: (nodes.Node) -> None pass def depart_desc_type(self, node): + # type: (nodes.Node) -> None pass def visit_desc_returns(self, node): + # type: (nodes.Node) -> None self.body.append(' → ') def depart_desc_returns(self, node): + # type: (nodes.Node) -> None pass def visit_desc_name(self, node): + # type: (nodes.Node) -> None self.body.append(self.starttag(node, 'code', '', CLASS='descname')) def depart_desc_name(self, node): + # type: (nodes.Node) -> None self.body.append('</code>') def visit_desc_parameterlist(self, node): + # type: (nodes.Node) -> None self.body.append('<span class="sig-paren">(</span>') self.first_param = 1 self.optional_param_level = 0 @@ -153,6 +180,7 @@ class HTMLTranslator(BaseTranslator): self.param_separator = node.child_text_separator def depart_desc_parameterlist(self, node): + # type: (nodes.Node) -> None self.body.append('<span class="sig-paren">)</span>') # If required parameters are still to come, then put the comma after @@ -162,6 +190,7 @@ class HTMLTranslator(BaseTranslator): # foo([a, ]b, c[, d]) # def visit_desc_parameter(self, node): + # type: (nodes.Node) -> None if self.first_param: self.first_param = 0 elif not self.required_params_left: @@ -172,39 +201,49 @@ class HTMLTranslator(BaseTranslator): self.body.append('<em>') def depart_desc_parameter(self, node): + # type: (nodes.Node) -> None if not node.hasattr('noemph'): self.body.append('</em>') if self.required_params_left: self.body.append(self.param_separator) def visit_desc_optional(self, node): + # type: (nodes.Node) -> None self.optional_param_level += 1 self.body.append('<span class="optional">[</span>') def depart_desc_optional(self, node): + # type: (nodes.Node) -> None self.optional_param_level -= 1 self.body.append('<span class="optional">]</span>') def visit_desc_annotation(self, node): + # type: (nodes.Node) -> None self.body.append(self.starttag(node, 'em', '', CLASS='property')) def depart_desc_annotation(self, node): + # type: (nodes.Node) -> None self.body.append('</em>') def visit_desc_content(self, node): + # type: (nodes.Node) -> None self.body.append(self.starttag(node, 'dd', '')) def depart_desc_content(self, node): + # type: (nodes.Node) -> None self.body.append('</dd>') def visit_versionmodified(self, node): + # type: (nodes.Node) -> None self.body.append(self.starttag(node, 'div', CLASS=node['type'])) def depart_versionmodified(self, node): + # type: (nodes.Node) -> None self.body.append('</div>\n') # overwritten def visit_reference(self, node): + # type: (nodes.Node) -> None atts = {'class': 'reference'} if node.get('internal') or 'refuri' not in node: atts['class'] += ' internal' @@ -234,17 +273,21 @@ class HTMLTranslator(BaseTranslator): '.'.join(map(str, node['secnumber']))) def visit_number_reference(self, node): + # type: (nodes.Node) -> None self.visit_reference(node) def depart_number_reference(self, node): + # type: (nodes.Node) -> None self.depart_reference(node) # overwritten -- we don't want source comments to show up in the HTML def visit_comment(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode # overwritten def visit_admonition(self, node, name=''): + # type: (nodes.Node, unicode) -> None self.body.append(self.starttag( node, 'div', CLASS=('admonition ' + name))) if name: @@ -252,12 +295,15 @@ class HTMLTranslator(BaseTranslator): self.set_first_last(node) def visit_seealso(self, node): + # type: (nodes.Node) -> None self.visit_admonition(node, 'seealso') def depart_seealso(self, node): + # type: (nodes.Node) -> None self.depart_admonition(node) def add_secnumber(self, node): + # type: (nodes.Node) -> None if node.get('secnumber'): self.body.append('.'.join(map(str, node['secnumber'])) + self.secnumber_suffix) @@ -279,7 +325,9 @@ class HTMLTranslator(BaseTranslator): self.secnumber_suffix) def add_fignumber(self, node): + # type: (nodes.Node) -> None def append_fignumber(figtype, figure_id): + # type: (unicode, unicode) -> None if self.builder.name == 'singlehtml': key = (self.docnames[-1], figtype) else: @@ -296,7 +344,7 @@ class HTMLTranslator(BaseTranslator): self.body.append(prefix % '.'.join(map(str, numbers)) + ' ') self.body.append('</span>') - figtype = self.builder.env.domains['std'].get_figtype(node) + figtype = self.builder.env.domains['std'].get_figtype(node) # type: ignore if figtype: if len(node['ids']) == 0: msg = 'Any IDs not assigned for %s node' % node.tagname @@ -305,11 +353,13 @@ class HTMLTranslator(BaseTranslator): append_fignumber(figtype, node['ids'][0]) def add_permalink_ref(self, node, title): + # type: (nodes.Node, unicode) -> None if node['ids'] and self.permalink_text and self.builder.add_permalinks: format = u'<a class="headerlink" href="#%s" title="%s">%s</a>' self.body.append(format % (node['ids'][0], title, self.permalink_text)) def generate_targets_for_listing(self, node): + # type: (nodes.Node) -> None """Generate hyperlink targets for listings. Original visit_bullet_list(), visit_definition_list() and visit_enumerated_list() @@ -325,6 +375,7 @@ class HTMLTranslator(BaseTranslator): # overwritten def visit_bullet_list(self, node): + # type: (nodes.Node) -> None if len(node) == 1 and node[0].tagname == 'toctree': # avoid emitting empty <ul></ul> raise nodes.SkipNode @@ -333,11 +384,13 @@ class HTMLTranslator(BaseTranslator): # overwritten def visit_enumerated_list(self, node): + # type: (nodes.Node) -> None self.generate_targets_for_listing(node) BaseTranslator.visit_enumerated_list(self, node) # overwritten def visit_title(self, node): + # type: (nodes.Node) -> None BaseTranslator.visit_title(self, node) self.add_secnumber(node) self.add_fignumber(node.parent) @@ -345,6 +398,7 @@ class HTMLTranslator(BaseTranslator): self.body.append('<span class="caption-text">') def depart_title(self, node): + # type: (nodes.Node) -> None close_tag = self.context[-1] if (self.permalink_text and self.builder.add_permalinks and node.parent.hasattr('ids') and node.parent['ids']): @@ -367,6 +421,7 @@ class HTMLTranslator(BaseTranslator): # overwritten def visit_literal_block(self, node): + # type: (nodes.Node) -> None if node.rawsource != node.astext(): # most probably a parsed-literal block -- don't highlight return BaseTranslator.visit_literal_block(self, node) @@ -396,6 +451,7 @@ class HTMLTranslator(BaseTranslator): raise nodes.SkipNode def visit_caption(self, node): + # type: (nodes.Node) -> None if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'): self.body.append('<div class="code-block-caption">') else: @@ -404,6 +460,7 @@ class HTMLTranslator(BaseTranslator): self.body.append(self.starttag(node, 'span', '', CLASS='caption-text')) def depart_caption(self, node): + # type: (nodes.Node) -> None self.body.append('</span>') # append permalink if available @@ -422,26 +479,32 @@ class HTMLTranslator(BaseTranslator): BaseTranslator.depart_caption(self, node) def visit_doctest_block(self, node): + # type: (nodes.Node) -> None self.visit_literal_block(node) # overwritten to add the <div> (for XHTML compliance) def visit_block_quote(self, node): + # type: (nodes.Node) -> None self.body.append(self.starttag(node, 'blockquote') + '<div>') def depart_block_quote(self, node): + # type: (nodes.Node) -> None self.body.append('</div></blockquote>\n') # overwritten def visit_literal(self, node): + # type: (nodes.Node) -> None self.body.append(self.starttag(node, 'code', '', CLASS='docutils literal')) self.protect_literal_text += 1 def depart_literal(self, node): + # type: (nodes.Node) -> None self.protect_literal_text -= 1 self.body.append('</code>') def visit_productionlist(self, node): + # type: (nodes.Node) -> None self.body.append(self.starttag(node, 'pre')) names = [] for production in node: @@ -461,23 +524,29 @@ class HTMLTranslator(BaseTranslator): raise nodes.SkipNode def depart_productionlist(self, node): + # type: (nodes.Node) -> None pass def visit_production(self, node): + # type: (nodes.Node) -> None pass def depart_production(self, node): + # type: (nodes.Node) -> None pass def visit_centered(self, node): + # type: (nodes.Node) -> None self.body.append(self.starttag(node, 'p', CLASS="centered") + '<strong>') def depart_centered(self, node): + # type: (nodes.Node) -> None self.body.append('</strong></p>') # overwritten def should_be_compact_paragraph(self, node): + # type: (nodes.Node) -> bool """Determine if the <p> tags around paragraph can be omitted.""" if isinstance(node.parent, addnodes.desc_content): # Never compact desc_content items. @@ -488,19 +557,24 @@ class HTMLTranslator(BaseTranslator): return BaseTranslator.should_be_compact_paragraph(self, node) def visit_compact_paragraph(self, node): + # type: (nodes.Node) -> None pass def depart_compact_paragraph(self, node): + # type: (nodes.Node) -> None pass def visit_highlightlang(self, node): + # type: (nodes.Node) -> None self.highlightlang = node['lang'] self.highlightlinenothreshold = node['linenothreshold'] def depart_highlightlang(self, node): + # type: (nodes.Node) -> None pass def visit_download_reference(self, node): + # type: (nodes.Node) -> None if self.builder.download_support and node.hasattr('filename'): self.body.append( '<a class="reference download internal" href="%s" download="">' % @@ -510,10 +584,12 @@ class HTMLTranslator(BaseTranslator): self.context.append('') def depart_download_reference(self, node): + # type: (nodes.Node) -> None self.body.append(self.context.pop()) # overwritten def visit_image(self, node): + # type: (nodes.Node) -> None olduri = node['uri'] # rewrite the URI if the environment knows about it if olduri in self.builder.images: @@ -555,51 +631,65 @@ class HTMLTranslator(BaseTranslator): # overwritten def depart_image(self, node): + # type: (nodes.Node) -> None if node['uri'].lower().endswith(('svg', 'svgz')): self.body.append(self.context.pop()) else: BaseTranslator.depart_image(self, node) def visit_toctree(self, node): + # type: (nodes.Node) -> None # this only happens when formatting a toc from env.tocs -- in this # case we don't want to include the subtree raise nodes.SkipNode def visit_index(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_tabular_col_spec(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_glossary(self, node): + # type: (nodes.Node) -> None pass def depart_glossary(self, node): + # type: (nodes.Node) -> None pass def visit_acks(self, node): + # type: (nodes.Node) -> None pass def depart_acks(self, node): + # type: (nodes.Node) -> None pass def visit_hlist(self, node): + # type: (nodes.Node) -> None self.body.append('<table class="hlist"><tr>') def depart_hlist(self, node): + # type: (nodes.Node) -> None self.body.append('</tr></table>\n') def visit_hlistcol(self, node): + # type: (nodes.Node) -> None self.body.append('<td>') def depart_hlistcol(self, node): + # type: (nodes.Node) -> None self.body.append('</td>') def bulk_text_processor(self, text): + # type: (unicode) -> unicode return text # overwritten def visit_Text(self, node): + # type: (nodes.Node) -> None text = node.astext() encoded = self.encode(text) if self.protect_literal_text: @@ -623,94 +713,122 @@ class HTMLTranslator(BaseTranslator): self.body.append(encoded) def visit_note(self, node): + # type: (nodes.Node) -> None self.visit_admonition(node, 'note') def depart_note(self, node): + # type: (nodes.Node) -> None self.depart_admonition(node) def visit_warning(self, node): + # type: (nodes.Node) -> None self.visit_admonition(node, 'warning') def depart_warning(self, node): + # type: (nodes.Node) -> None self.depart_admonition(node) def visit_attention(self, node): + # type: (nodes.Node) -> None self.visit_admonition(node, 'attention') def depart_attention(self, node): + # type: (nodes.Node) -> None self.depart_admonition() def visit_caution(self, node): + # type: (nodes.Node) -> None self.visit_admonition(node, 'caution') def depart_caution(self, node): + # type: (nodes.Node) -> None self.depart_admonition() def visit_danger(self, node): + # type: (nodes.Node) -> None self.visit_admonition(node, 'danger') def depart_danger(self, node): + # type: (nodes.Node) -> None self.depart_admonition() def visit_error(self, node): + # type: (nodes.Node) -> None self.visit_admonition(node, 'error') def depart_error(self, node): + # type: (nodes.Node) -> None self.depart_admonition() def visit_hint(self, node): + # type: (nodes.Node) -> None self.visit_admonition(node, 'hint') def depart_hint(self, node): + # type: (nodes.Node) -> None self.depart_admonition() def visit_important(self, node): + # type: (nodes.Node) -> None self.visit_admonition(node, 'important') def depart_important(self, node): + # type: (nodes.Node) -> None self.depart_admonition() def visit_tip(self, node): + # type: (nodes.Node) -> None self.visit_admonition(node, 'tip') def depart_tip(self, node): + # type: (nodes.Node) -> None self.depart_admonition() # these are only handled specially in the SmartyPantsHTMLTranslator def visit_literal_emphasis(self, node): + # type: (nodes.Node) -> None return self.visit_emphasis(node) def depart_literal_emphasis(self, node): + # type: (nodes.Node) -> None return self.depart_emphasis(node) def visit_literal_strong(self, node): + # type: (nodes.Node) -> None return self.visit_strong(node) def depart_literal_strong(self, node): + # type: (nodes.Node) -> None return self.depart_strong(node) def visit_abbreviation(self, node): + # type: (nodes.Node) -> None attrs = {} if node.hasattr('explanation'): attrs['title'] = node['explanation'] self.body.append(self.starttag(node, 'abbr', '', **attrs)) def depart_abbreviation(self, node): + # type: (nodes.Node) -> None self.body.append('</abbr>') def visit_manpage(self, node): - return self.visit_literal_emphasis(node) + # type: (nodes.Node) -> None + self.visit_literal_emphasis(node) def depart_manpage(self, node): - return self.depart_literal_emphasis(node) + # type: (nodes.Node) -> None + self.depart_literal_emphasis(node) # overwritten to add even/odd classes def visit_table(self, node): + # type: (nodes.Node) -> None self._table_row_index = 0 return BaseTranslator.visit_table(self, node) def visit_row(self, node): + # type: (nodes.Node) -> None self._table_row_index += 1 if self._table_row_index % 2 == 0: node['classes'].append('row-even') @@ -720,10 +838,12 @@ class HTMLTranslator(BaseTranslator): node.column = 0 def visit_field_list(self, node): + # type: (nodes.Node) -> None self._fieldlist_row_index = 0 return BaseTranslator.visit_field_list(self, node) def visit_field(self, node): + # type: (nodes.Node) -> None self._fieldlist_row_index += 1 if self._fieldlist_row_index % 2 == 0: node['classes'].append('field-even') @@ -732,6 +852,7 @@ class HTMLTranslator(BaseTranslator): self.body.append(self.starttag(node, 'tr', '', CLASS='field')) def visit_math(self, node, math_env=''): + # type: (nodes.Node, unicode) -> None logger.warning('using "math" markup without a Sphinx math extension ' 'active, please use one of the math extensions ' 'described at http://sphinx-doc.org/ext/math.html', @@ -739,6 +860,7 @@ class HTMLTranslator(BaseTranslator): raise nodes.SkipNode def unknown_visit(self, node): + # type: (nodes.Node) -> None raise NotImplementedError('Unknown node: ' + node.__class__.__name__) @@ -749,10 +871,12 @@ class SmartyPantsHTMLTranslator(HTMLTranslator): """ def __init__(self, *args, **kwds): + # type: (Any, Any) -> None self.no_smarty = 0 HTMLTranslator.__init__(self, *args, **kwds) def visit_literal(self, node): + # type: (nodes.Node) -> None self.no_smarty += 1 try: # this raises SkipNode @@ -761,6 +885,7 @@ class SmartyPantsHTMLTranslator(HTMLTranslator): self.no_smarty -= 1 def visit_literal_block(self, node): + # type: (nodes.Node) -> None self.no_smarty += 1 try: HTMLTranslator.visit_literal_block(self, node) @@ -771,34 +896,42 @@ class SmartyPantsHTMLTranslator(HTMLTranslator): raise def depart_literal_block(self, node): + # type: (nodes.Node) -> None HTMLTranslator.depart_literal_block(self, node) self.no_smarty -= 1 def visit_literal_emphasis(self, node): + # type: (nodes.Node) -> None self.no_smarty += 1 self.visit_emphasis(node) def depart_literal_emphasis(self, node): + # type: (nodes.Node) -> None self.depart_emphasis(node) self.no_smarty -= 1 def visit_literal_strong(self, node): + # type: (nodes.Node) -> None self.no_smarty += 1 self.visit_strong(node) def depart_literal_strong(self, node): + # type: (nodes.Node) -> None self.depart_strong(node) self.no_smarty -= 1 def visit_desc_signature(self, node): + # type: (nodes.Node) -> None self.no_smarty += 1 HTMLTranslator.visit_desc_signature(self, node) def depart_desc_signature(self, node): + # type: (nodes.Node) -> None self.no_smarty -= 1 HTMLTranslator.depart_desc_signature(self, node) def visit_productionlist(self, node): + # type: (nodes.Node) -> None self.no_smarty += 1 try: HTMLTranslator.visit_productionlist(self, node) @@ -806,14 +939,17 @@ class SmartyPantsHTMLTranslator(HTMLTranslator): self.no_smarty -= 1 def visit_option(self, node): + # type: (nodes.Node) -> None self.no_smarty += 1 HTMLTranslator.visit_option(self, node) def depart_option(self, node): + # type: (nodes.Node) -> None self.no_smarty -= 1 HTMLTranslator.depart_option(self, node) def bulk_text_processor(self, text): + # type: (unicode) -> unicode if self.no_smarty <= 0: return sphinx_smarty_pants(text) return text diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index a7fc0690f..5d24026f1 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -1794,6 +1794,7 @@ class LaTeXTranslator(nodes.NodeVisitor): self.body.append('\\end{sphinxadmonition}\n') def _make_visit_admonition(name): + # type: (unicode) -> Callable[[LaTeXTranslator, nodes.Node], None] def visit_admonition(self, node): # type: (nodes.Node) -> None self.body.append(u'\n\\begin{sphinxadmonition}{%s}{%s:}' % diff --git a/sphinx/writers/manpage.py b/sphinx/writers/manpage.py index e3ce3ed7a..c0ba28bbf 100644 --- a/sphinx/writers/manpage.py +++ b/sphinx/writers/manpage.py @@ -22,17 +22,24 @@ from sphinx.util import logging import sphinx.util.docutils from sphinx.util.i18n import format_date +if False: + # For type annotation + from typing import Any # NOQA + from sphinx.builders import Builder # NOQA + logger = logging.getLogger(__name__) class ManualPageWriter(Writer): def __init__(self, builder): + # type: (Builder) -> None Writer.__init__(self) self.builder = builder self.translator_class = ( self.builder.translator_class or ManualPageTranslator) def translate(self): + # type: () -> None transform = NestedInlineTransform(self.document) transform.apply() visitor = self.translator_class(self.builder, self.document) @@ -53,10 +60,13 @@ class NestedInlineTransform(object): <strong>&bar=</strong><emphasis>2</emphasis> """ def __init__(self, document): + # type: (nodes.document) -> None self.document = document def apply(self): + # type: () -> None def is_inline(node): + # type: (nodes.Node) -> bool return isinstance(node, (nodes.literal, nodes.emphasis, nodes.strong)) for node in self.document.traverse(is_inline): @@ -77,6 +87,7 @@ class ManualPageTranslator(BaseTranslator): """ def __init__(self, builder, *args, **kwds): + # type: (Builder, Any, Any) -> None BaseTranslator.__init__(self, *args, **kwds) self.builder = builder @@ -114,6 +125,7 @@ class ManualPageTranslator(BaseTranslator): # overwritten -- added quotes around all .TH arguments def header(self): + # type: () -> unicode tmpl = (".TH \"%(title_upper)s\" \"%(manual_section)s\"" " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n" ".SH NAME\n" @@ -121,96 +133,125 @@ class ManualPageTranslator(BaseTranslator): return tmpl % self._docinfo def visit_start_of_file(self, node): + # type: (nodes.Node) -> None pass def depart_start_of_file(self, node): + # type: (nodes.Node) -> None pass def visit_desc(self, node): + # type: (nodes.Node) -> None self.visit_definition_list(node) def depart_desc(self, node): + # type: (nodes.Node) -> None self.depart_definition_list(node) def visit_desc_signature(self, node): + # type: (nodes.Node) -> None self.visit_definition_list_item(node) self.visit_term(node) def depart_desc_signature(self, node): + # type: (nodes.Node) -> None self.depart_term(node) def visit_desc_signature_line(self, node): + # type: (nodes.Node) -> None pass def depart_desc_signature_line(self, node): + # type: (nodes.Node) -> None self.body.append(' ') def visit_desc_addname(self, node): + # type: (nodes.Node) -> None pass def depart_desc_addname(self, node): + # type: (nodes.Node) -> None pass def visit_desc_type(self, node): + # type: (nodes.Node) -> None pass def depart_desc_type(self, node): + # type: (nodes.Node) -> None pass def visit_desc_returns(self, node): + # type: (nodes.Node) -> None self.body.append(' -> ') def depart_desc_returns(self, node): + # type: (nodes.Node) -> None pass def visit_desc_name(self, node): + # type: (nodes.Node) -> None pass def depart_desc_name(self, node): + # type: (nodes.Node) -> None pass def visit_desc_parameterlist(self, node): + # type: (nodes.Node) -> None self.body.append('(') self.first_param = 1 def depart_desc_parameterlist(self, node): + # type: (nodes.Node) -> None self.body.append(')') def visit_desc_parameter(self, node): + # type: (nodes.Node) -> None if not self.first_param: self.body.append(', ') else: self.first_param = 0 def depart_desc_parameter(self, node): + # type: (nodes.Node) -> None pass def visit_desc_optional(self, node): + # type: (nodes.Node) -> None self.body.append('[') def depart_desc_optional(self, node): + # type: (nodes.Node) -> None self.body.append(']') def visit_desc_annotation(self, node): + # type: (nodes.Node) -> None pass def depart_desc_annotation(self, node): + # type: (nodes.Node) -> None pass def visit_desc_content(self, node): + # type: (nodes.Node) -> None self.visit_definition(node) def depart_desc_content(self, node): + # type: (nodes.Node) -> None self.depart_definition(node) def visit_versionmodified(self, node): + # type: (nodes.Node) -> None self.visit_paragraph(node) def depart_versionmodified(self, node): + # type: (nodes.Node) -> None self.depart_paragraph(node) # overwritten -- don't make whole of term bold if it includes strong node def visit_term(self, node): + # type: (nodes.Node) -> None if node.traverse(nodes.strong): self.body.append('\n') else: @@ -218,15 +259,18 @@ class ManualPageTranslator(BaseTranslator): # overwritten -- we don't want source comments to show up def visit_comment(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode # overwritten -- added ensure_eol() def visit_footnote(self, node): + # type: (nodes.Node) -> None self.ensure_eol() BaseTranslator.visit_footnote(self, node) # overwritten -- handle footnotes rubric def visit_rubric(self, node): + # type: (nodes.Node) -> None self.ensure_eol() if len(node.children) == 1: rubtitle = node.children[0].astext() @@ -238,15 +282,19 @@ class ManualPageTranslator(BaseTranslator): self.body.append('.sp\n') def depart_rubric(self, node): + # type: (nodes.Node) -> None pass def visit_seealso(self, node): + # type: (nodes.Node) -> None self.visit_admonition(node, 'seealso') def depart_seealso(self, node): + # type: (nodes.Node) -> None self.depart_admonition(node) def visit_productionlist(self, node): + # type: (nodes.Node) -> None self.ensure_eol() names = [] self.in_productionlist += 1 @@ -271,13 +319,16 @@ class ManualPageTranslator(BaseTranslator): raise nodes.SkipNode def visit_production(self, node): + # type: (nodes.Node) -> None pass def depart_production(self, node): + # type: (nodes.Node) -> None pass # overwritten -- don't emit a warning for images def visit_image(self, node): + # type: (nodes.Node) -> None if 'alt' in node.attributes: self.body.append(_('[image: %s]') % node['alt'] + '\n') self.body.append(_('[image]') + '\n') @@ -285,6 +336,7 @@ class ManualPageTranslator(BaseTranslator): # overwritten -- don't visit inner marked up nodes def visit_reference(self, node): + # type: (nodes.Node) -> None self.body.append(self.defs['reference'][0]) # avoid repeating escaping code... fine since # visit_Text calls astext() and only works on that afterwards @@ -306,51 +358,66 @@ class ManualPageTranslator(BaseTranslator): raise nodes.SkipNode def visit_number_reference(self, node): + # type: (nodes.Node) -> None text = nodes.Text(node.get('title', '#')) self.visit_Text(text) raise nodes.SkipNode def visit_centered(self, node): + # type: (nodes.Node) -> None self.ensure_eol() self.body.append('.sp\n.ce\n') def depart_centered(self, node): + # type: (nodes.Node) -> None self.body.append('\n.ce 0\n') def visit_compact_paragraph(self, node): + # type: (nodes.Node) -> None pass def depart_compact_paragraph(self, node): + # type: (nodes.Node) -> None pass def visit_highlightlang(self, node): + # type: (nodes.Node) -> None pass def depart_highlightlang(self, node): + # type: (nodes.Node) -> None pass def visit_download_reference(self, node): + # type: (nodes.Node) -> None pass def depart_download_reference(self, node): + # type: (nodes.Node) -> None pass def visit_toctree(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_index(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_tabular_col_spec(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_glossary(self, node): + # type: (nodes.Node) -> None pass def depart_glossary(self, node): + # type: (nodes.Node) -> None pass def visit_acks(self, node): + # type: (nodes.Node) -> None self.ensure_eol() self.body.append(', '.join(n.astext() for n in node.children[0].children) + '.') @@ -358,43 +425,56 @@ class ManualPageTranslator(BaseTranslator): raise nodes.SkipNode def visit_hlist(self, node): + # type: (nodes.Node) -> None self.visit_bullet_list(node) def depart_hlist(self, node): + # type: (nodes.Node) -> None self.depart_bullet_list(node) def visit_hlistcol(self, node): + # type: (nodes.Node) -> None pass def depart_hlistcol(self, node): + # type: (nodes.Node) -> None pass def visit_literal_emphasis(self, node): + # type: (nodes.Node) -> None return self.visit_emphasis(node) def depart_literal_emphasis(self, node): + # type: (nodes.Node) -> None return self.depart_emphasis(node) def visit_literal_strong(self, node): + # type: (nodes.Node) -> None return self.visit_strong(node) def depart_literal_strong(self, node): + # type: (nodes.Node) -> None return self.depart_strong(node) def visit_abbreviation(self, node): + # type: (nodes.Node) -> None pass def depart_abbreviation(self, node): + # type: (nodes.Node) -> None pass def visit_manpage(self, node): + # type: (nodes.Node) -> None return self.visit_strong(node) def depart_manpage(self, node): + # type: (nodes.Node) -> None return self.depart_strong(node) # overwritten: handle section titles better than in 0.6 release def visit_title(self, node): + # type: (nodes.Node) -> None if isinstance(node.parent, addnodes.seealso): self.body.append('.IP "') return @@ -409,26 +489,32 @@ class ManualPageTranslator(BaseTranslator): return BaseTranslator.visit_title(self, node) def depart_title(self, node): + # type: (nodes.Node) -> None if isinstance(node.parent, addnodes.seealso): self.body.append('"\n') return return BaseTranslator.depart_title(self, node) def visit_raw(self, node): + # type: (nodes.Node) -> None if 'manpage' in node.get('format', '').split(): self.body.append(node.astext()) raise nodes.SkipNode def visit_meta(self, node): + # type: (nodes.Node) -> None raise nodes.SkipNode def visit_inline(self, node): + # type: (nodes.Node) -> None pass def depart_inline(self, node): + # type: (nodes.Node) -> None pass def visit_math(self, node): + # type: (nodes.Node) -> None logger.warning('using "math" markup without a Sphinx math extension ' 'active, please use one of the math extensions ' 'described at http://sphinx-doc.org/ext/math.html') @@ -437,4 +523,5 @@ class ManualPageTranslator(BaseTranslator): visit_math_block = visit_math def unknown_visit(self, node): + # type: (nodes.Node) -> None raise NotImplementedError('Unknown node: ' + node.__class__.__name__) diff --git a/sphinx/writers/texinfo.py b/sphinx/writers/texinfo.py index 741009995..a6908d113 100644 --- a/sphinx/writers/texinfo.py +++ b/sphinx/writers/texinfo.py @@ -1055,6 +1055,7 @@ class TexinfoTranslator(nodes.NodeVisitor): pass def visit_term(self, node): + # type: (nodes.Node) -> None for id in node.get('ids'): self.add_anchor(id, node) # anchors and indexes need to go in front @@ -1209,6 +1210,7 @@ class TexinfoTranslator(nodes.NodeVisitor): '@end cartouche\n') def _make_visit_admonition(name): + # type: (unicode) -> Callable[[TexinfoTranslator, nodes.Node], None] def visit(self, node): # type: (nodes.Node) -> None self.visit_admonition(node, admonitionlabels[name]) diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py index 46de7a582..8b4296057 100644 --- a/sphinx/writers/text.py +++ b/sphinx/writers/text.py @@ -863,6 +863,7 @@ class TextTranslator(nodes.NodeVisitor): self.add_text(self.nl) def _make_depart_admonition(name): + # type: (unicode) -> Callable[[TextTranslator, nodes.Node], None] def depart_admonition(self, node): # type: (nodes.NodeVisitor, nodes.Node) -> None self.end_state(first=admonitionlabels[name] + ': ') From 78c7206deea808c4e5a3c4bd9cae2b246f0b640e Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 8 Feb 2017 10:48:12 +0900 Subject: [PATCH 179/190] Refactor testcase --- tests/test_build_latex.py | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py index 30ca73eda..e39d30599 100644 --- a/tests/test_build_latex.py +++ b/tests/test_build_latex.py @@ -820,7 +820,8 @@ def test_maxlistdepth_at_ten(app, status, warning): @pytest.mark.skipif(docutils.__version_info__ < (0, 13), reason='docutils-0.13 or above is required') @pytest.mark.sphinx('latex', testroot='latex-table') -def test_latex_table(app, status, warning): +@pytest.mark.test_params(shared_result='test_latex_table') +def test_latex_table_tabulars(app, status, warning): app.builder.build_all() result = (app.outdir / 'test.tex').text(encoding='utf8') tables = {} @@ -859,12 +860,6 @@ def test_latex_table(app, status, warning): # table having :widths: option table = tables['table having :widths: option'] assert ('\\noindent\\begin{tabular}{|\\X{30}{100}|\\X{70}{100}|}' in table) - assert ('\\hline\n' - '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' - '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax' in table) - assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in table) - assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table) - assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) assert ('\\hline\n\\end{tabular}' in table) # table with tabularcolumn @@ -876,14 +871,7 @@ def test_latex_table(app, status, warning): assert ('\\begin{threeparttable}\n\\capstart\\caption{caption for table}' '\\label{\\detokenize{tabular:id1}}' in table) assert ('\\noindent\\begin{tabulary}{\\linewidth}{|L|L|}' in table) - assert ('\\hline\n' - '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' - '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax' in table) - assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in table) - assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table) - assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) - assert ('\\hline\n\\end{tabulary}' in table) - assert ('\\end{threeparttable}' in table) + assert ('\\hline\n\\end{tabulary}\n\\end{threeparttable}' in table) # table having verbatim table = tables['table having verbatim'] @@ -897,6 +885,19 @@ def test_latex_table(app, status, warning): table = tables['table having both :widths: and problematic cell'] assert ('\\noindent\\begin{tabular}{|\\X{30}{100}|\\X{70}{100}|}' in table) + +@pytest.mark.skipif(docutils.__version_info__ < (0, 13), + reason='docutils-0.13 or above is required') +@pytest.mark.sphinx('latex', testroot='latex-table') +@pytest.mark.test_params(shared_result='test_latex_table') +def test_latex_table_longtable(app, status, warning): + app.builder.build_all() + result = (app.outdir / 'test.tex').text(encoding='utf8') + tables = {} + for chap in re.split(r'\\section{', result)[1:]: + sectname, content = chap.split('}', 1) + tables[sectname] = content.strip() + # longtable table = tables['longtable'] assert ('\\begin{longtable}{|l|l|}\n\\hline' in table) From fe2daffb4a854d79feb9434611bae886b695b84e Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Wed, 8 Feb 2017 10:34:57 +0900 Subject: [PATCH 180/190] Fix #3377: Add support for Docutils 0.13 ``:align:`` option for tables --- CHANGES | 1 + sphinx/templates/latex/longtable.tex_t | 10 +++++++- sphinx/templates/latex/tabular.tex_t | 14 +++++++++++ sphinx/templates/latex/tabulary.tex_t | 14 +++++++++++ sphinx/writers/latex.py | 1 + tests/roots/test-latex-table/longtable.rst | 15 +++++++++++ tests/roots/test-latex-table/tabular.rst | 29 ++++++++++++++++++++++ tests/test_build_latex.py | 16 ++++++++++++ 8 files changed, 99 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index c8e8c85e0..8e25b84ff 100644 --- a/CHANGES +++ b/CHANGES @@ -48,6 +48,7 @@ Features added (refs: #3379, #3381) * #3402: Allow to suppress "download file not readable" warnings using :confval:`suppress_warnings`. +* #3377: latex: Add support for Docutils 0.13 ``:align:`` option for tables Bugs fixed ---------- diff --git a/sphinx/templates/latex/longtable.tex_t b/sphinx/templates/latex/longtable.tex_t index 516f6d0fa..703a61418 100644 --- a/sphinx/templates/latex/longtable.tex_t +++ b/sphinx/templates/latex/longtable.tex_t @@ -1,4 +1,12 @@ -\begin{longtable}<%= table.get_colspec() %> +\begin{longtable} +<%- if table.align == 'center' -%> + [c] +<%- elif table.align == 'left' -%> + [l] +<%- elif table.align == 'right' -%> + [r] +<%- endif -%> +<%= table.get_colspec() %> <%- if table.caption -%> \caption{<%= ''.join(table.caption) %>}<%= labels %>\\ <% endif -%> diff --git a/sphinx/templates/latex/tabular.tex_t b/sphinx/templates/latex/tabular.tex_t index 27e5c30c0..7aeb7e85b 100644 --- a/sphinx/templates/latex/tabular.tex_t +++ b/sphinx/templates/latex/tabular.tex_t @@ -1,3 +1,10 @@ +<%- if table.align %> + <%- if table.align == 'center' -%> + \begin{center} + <%- elif table.align in ('left', 'right') -%> + \begin{flush<%= table.align%>} + <%- endif -%> +<% endif -%> <%- if table.caption -%> \begin{threeparttable} \capstart\caption{<%= ''.join(table.caption) %>}<%= labels %> @@ -10,3 +17,10 @@ <%- if table.caption %> \end{threeparttable} <%- endif %> +<%- if table.align %> + <%- if table.align == 'center' -%> + \end{center} + <%- elif table.align in ('left', 'right') -%> + \end{flush<%= table.align%>} + <% endif -%> +<% endif -%> diff --git a/sphinx/templates/latex/tabulary.tex_t b/sphinx/templates/latex/tabulary.tex_t index 11ec79b33..06b84ea7d 100644 --- a/sphinx/templates/latex/tabulary.tex_t +++ b/sphinx/templates/latex/tabulary.tex_t @@ -1,3 +1,10 @@ +<%- if table.align %> + <%- if table.align == 'center' -%> + \begin{center} + <%- elif table.align in ('left', 'right') -%> + \begin{flush<%= table.align%>} + <%- endif -%> +<% endif -%> <%- if table.caption -%> \begin{threeparttable} \capstart\caption{<%= ''.join(table.caption) %>}<%= labels %> @@ -7,6 +14,13 @@ <%= ''.join(table.header) %> <%=- ''.join(table.body) %> \end{tabulary} +<%- if table.align %> + <%- if table.align == 'center' -%> + \end{center} + <%- elif table.align in ('left', 'right') -%> + \end{flush<%= table.align%>} + <% endif -%> +<% endif -%> <%- if table.caption %> \end{threeparttable} <%- endif %> diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 5d24026f1..9547b5ef5 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -320,6 +320,7 @@ class Table(object): # type: (nodes.table) -> None self.header = [] # type: List[unicode] self.body = [] # type: List[unicode] + self.align = node.get('align') self.colcount = 0 self.colspec = None # type: unicode self.colwidths = [] # type: List[int] diff --git a/tests/roots/test-latex-table/longtable.rst b/tests/roots/test-latex-table/longtable.rst index 333c5b544..316dab775 100644 --- a/tests/roots/test-latex-table/longtable.rst +++ b/tests/roots/test-latex-table/longtable.rst @@ -30,6 +30,21 @@ longtable having :widths: option cell3-1 cell3-2 ======= ======= +longtable having :align: option +------------------------------- + +.. table:: + :align: right + :class: longtable + + ======= ======= + header1 header2 + ======= ======= + cell1-1 cell1-2 + cell2-1 cell2-2 + cell3-1 cell3-2 + ======= ======= + longtable with tabularcolumn ---------------------------- diff --git a/tests/roots/test-latex-table/tabular.rst b/tests/roots/test-latex-table/tabular.rst index f7fdaf44a..d7bfbbd02 100644 --- a/tests/roots/test-latex-table/tabular.rst +++ b/tests/roots/test-latex-table/tabular.rst @@ -43,6 +43,35 @@ table having :widths: option cell3-1 cell3-2 ======= ======= +table having :align: option (tabulary) +-------------------------------------- + +.. table:: + :align: center + + ======= ======= + header1 header2 + ======= ======= + cell1-1 cell1-2 + cell2-1 cell2-2 + cell3-1 cell3-2 + ======= ======= + +table having :align: option (tabular) +------------------------------------- + +.. table:: + :align: left + :widths: 30,70 + + ======= ======= + header1 header2 + ======= ======= + cell1-1 cell1-2 + cell2-1 cell2-2 + cell3-1 cell3-2 + ======= ======= + table with tabularcolumn ------------------------ diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py index e39d30599..e0282501b 100644 --- a/tests/test_build_latex.py +++ b/tests/test_build_latex.py @@ -862,6 +862,17 @@ def test_latex_table_tabulars(app, status, warning): assert ('\\noindent\\begin{tabular}{|\\X{30}{100}|\\X{70}{100}|}' in table) assert ('\\hline\n\\end{tabular}' in table) + # table having :align: option (tabulary) + table = tables['table having :align: option (tabulary)'] + assert ('\\begin{center}\\noindent\\begin{tabulary}{\\linewidth}{|L|L|}\n' in table) + assert ('\\hline\n\\end{tabulary}\\end{center}' in table) + + # table having :align: option (tabular) + table = tables['table having :align: option (tabular)'] + assert ('\\begin{flushleft}' + '\\noindent\\begin{tabular}{|\X{30}{100}|\X{70}{100}|}\n' in table) + assert ('\\hline\n\\end{tabular}\\end{flushleft}' in table) + # table with tabularcolumn table = tables['table with tabularcolumn'] assert ('\\noindent\\begin{tabulary}{\\linewidth}{|c|c|}' in table) @@ -923,6 +934,11 @@ def test_latex_table_longtable(app, status, warning): table = tables['longtable having :widths: option'] assert ('\\begin{longtable}{|\\X{30}{100}|\\X{70}{100}|}' in table) + # longtable having :align: option + table = tables['longtable having :align: option'] + assert ('\\begin{longtable}[r]{|l|l|}\n' in table) + assert ('\\hline\n\\end{longtable}' in table) + # longtable with tabularcolumn table = tables['longtable with tabularcolumn'] assert ('\\begin{longtable}{|c|c|}' in table) From 37f3050933e1828e485876b570235b95c5ac3edb Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 11 Feb 2017 12:19:12 +0900 Subject: [PATCH 181/190] latex: Fix assertion error for complex table (refs: #3395) --- sphinx/writers/latex.py | 34 ++++++++----- tests/roots/test-latex-table/complex.rst | 35 +++++++++++++ tests/roots/test-latex-table/index.rst | 1 + tests/roots/test-latex-table/tabular.rst | 17 ------- tests/test_build_latex.py | 63 +++++++++++++++++------- 5 files changed, 103 insertions(+), 47 deletions(-) create mode 100644 tests/roots/test-latex-table/complex.rst diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 9547b5ef5..8f2984637 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -1351,12 +1351,17 @@ class LaTeXTranslator(nodes.NodeVisitor): # type: (nodes.Node) -> None self.table.col = 0 - # fill column if first one is a wide-multirow - cell = self.table.cell(self.table.row, 0) - if cell and cell.row != self.table.row: # bottom part of multirow cell - self.table.col += cell.width - if cell.width > 1: # use \multicolumn for wide multirow cell - self.body.append('\\multicolumn{%d}{|l|}{}\\relax ' % cell.width) + # fill columns if the row starts with the bottom of multirow cell + while True: + cell = self.table.cell(self.table.row, self.table.col) + if cell is None: # not a bottom of multirow cell + break + else: # a bottom of multirow cell + self.table.col += cell.width + if cell.col != 0: + self.body.append('&') + if cell.width > 1: # use \multicolumn for wide multirow cell + self.body.append('\\multicolumn{%d}{|l|}{}\\relax ' % cell.width) def depart_row(self, node): # type: (nodes.Node) -> None @@ -1427,13 +1432,16 @@ class LaTeXTranslator(nodes.NodeVisitor): cell = self.table.cell() self.table.col += cell.width - # fill column if next one is a wide-multirow - nextcell = self.table.cell() - if nextcell and nextcell.row != self.table.row: # bottom part of multirow cell - self.table.col += nextcell.width - self.body.append('&') - if nextcell.width > 1: # use \multicolumn for wide multirow cell - self.body.append('\\multicolumn{%d}{l|}{}\\relax ' % nextcell.width) + # fill columns if next ones are a bottom of wide-multirow cell + while True: + nextcell = self.table.cell() + if nextcell is None: # not a bottom of multirow cell + break + else: # a bottom part of multirow cell + self.table.col += nextcell.width + self.body.append('&') + if nextcell.width > 1: # use \multicolumn for wide multirow cell + self.body.append('\\multicolumn{%d}{l|}{}\\relax ' % nextcell.width) def visit_acks(self, node): # type: (nodes.Node) -> None diff --git a/tests/roots/test-latex-table/complex.rst b/tests/roots/test-latex-table/complex.rst new file mode 100644 index 000000000..deca1e03f --- /dev/null +++ b/tests/roots/test-latex-table/complex.rst @@ -0,0 +1,35 @@ +complex tables +============== + +grid table +---------- + ++---------+---------+---------+ +| header1 | header2 | header3 | ++=========+=========+=========+ +| cell1-1 | cell1-2 | cell1-3 | ++---------+ +---------+ +| cell2-1 | | cell2-3 | ++ +---------+---------+ +| | cell3-2 | ++---------+ | +| cell4-1 | | ++---------+---------+---------+ +| cell5-1 | ++---------+---------+---------+ + +complex spanning cell +--------------------- + +table having ... + +* consecutive multicol at top of row (1-1 and 1-2) +* consecutive multicol at end of row (1-4 and 1-5) + ++-----------+-----------+-----------+-----------+-----------+ +| | | cell1-3 | | | +| | +-----------+ | cell1-5 | +| cell1-1 | cell1-2 | | cell1-4 | | +| | | cell2-3 | +-----------+ +| | | | | cell3-5 | ++-----------+-----------+-----------+-----------+-----------+ diff --git a/tests/roots/test-latex-table/index.rst b/tests/roots/test-latex-table/index.rst index a0003b740..80dd11064 100644 --- a/tests/roots/test-latex-table/index.rst +++ b/tests/roots/test-latex-table/index.rst @@ -5,3 +5,4 @@ test-latex-table tabular longtable + complex diff --git a/tests/roots/test-latex-table/tabular.rst b/tests/roots/test-latex-table/tabular.rst index d7bfbbd02..9dad36edd 100644 --- a/tests/roots/test-latex-table/tabular.rst +++ b/tests/roots/test-latex-table/tabular.rst @@ -12,23 +12,6 @@ cell2-1 cell2-2 cell3-1 cell3-2 ======= ======= -grid table ----------- - -+---------+---------+---------+ -| header1 | header2 | header3 | -+=========+=========+=========+ -| cell1-1 | cell1-2 | cell1-3 | -+---------+ +---------+ -| cell2-1 | | cell2-2 | -+ +---------+---------+ -| | cell3-2 | -+---------+ | -| cell4-1 | | -+---------+---------+---------+ -| cell5-1 | -+---------+---------+---------+ - table having :widths: option ---------------------------- diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py index e0282501b..81b9b79b1 100644 --- a/tests/test_build_latex.py +++ b/tests/test_build_latex.py @@ -840,23 +840,6 @@ def test_latex_table_tabulars(app, status, warning): assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) assert ('\\hline\n\\end{tabulary}' in table) - # grid table - table = tables['grid table'] - assert ('\\noindent\\begin{tabulary}{\\linewidth}{|L|L|L|}' in table) - assert ('\\hline\n' - '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' - '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax &' - '\\sphinxstylethead{\\relax \nheader3\n\\unskip}\\relax \\\\' in table) - assert ('\\hline\ncell1-1\n&\\multirow{2}{*}{\\relax \ncell1-2\n\\unskip}\\relax &\n' - 'cell1-3\n\\\\' in table) - assert ('\\cline{1-1}\\cline{3-3}\\multirow{2}{*}{\\relax \ncell2-1\n\\unskip}\\relax &&\n' - 'cell2-2\n\\\\' in table) - assert ('\\cline{2-3}&\\multicolumn{2}{l|}{\\relax \\multirow{2}{*}{\\relax \n' - 'cell3-2\n\\unskip}\\relax \\unskip}\\relax \\\\' in table) - assert ('\\cline{1-1}\ncell4-1\n&\\multicolumn{2}{l|}{}\\relax \\\\' in table) - assert ('\\hline\\multicolumn{3}{|l|}{\\relax \ncell5-1\n\\unskip}\\relax \\\\\n' - '\\hline\n\\end{tabulary}' in table) - # table having :widths: option table = tables['table having :widths: option'] assert ('\\noindent\\begin{tabular}{|\\X{30}{100}|\\X{70}{100}|}' in table) @@ -959,3 +942,49 @@ def test_latex_table_longtable(app, status, warning): # longtable having both :widths: and problematic cell table = tables['longtable having both :widths: and problematic cell'] assert ('\\begin{longtable}{|\\X{30}{100}|\\X{70}{100}|}' in table) + + +@pytest.mark.skipif(docutils.__version_info__ < (0, 13), + reason='docutils-0.13 or above is required') +@pytest.mark.sphinx('latex', testroot='latex-table') +@pytest.mark.test_params(shared_result='test_latex_table') +def test_latex_table_complex_tables(app, status, warning): + app.builder.build_all() + result = (app.outdir / 'test.tex').text(encoding='utf8') + tables = {} + for chap in re.split(r'\\section{', result)[1:]: + sectname, content = chap.split('}', 1) + tables[sectname] = content.strip() + + # grid table + table = tables['grid table'] + assert ('\\noindent\\begin{tabulary}{\\linewidth}{|L|L|L|}' in table) + assert ('\\hline\n' + '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' + '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax &' + '\\sphinxstylethead{\\relax \nheader3\n\\unskip}\\relax \\\\' in table) + assert ('\\hline\ncell1-1\n&\\multirow{2}{*}{\\relax \ncell1-2\n\\unskip}\\relax &\n' + 'cell1-3\n\\\\' in table) + assert ('\\cline{1-1}\\cline{3-3}\\multirow{2}{*}{\\relax \ncell2-1\n\\unskip}\\relax &&\n' + 'cell2-3\n\\\\' in table) + assert ('\\cline{2-3}&\\multicolumn{2}{l|}{\\relax \\multirow{2}{*}{\\relax \n' + 'cell3-2\n\\unskip}\\relax \\unskip}\\relax \\\\' in table) + assert ('\\cline{1-1}\ncell4-1\n&\\multicolumn{2}{l|}{}\\relax \\\\' in table) + assert ('\\hline\\multicolumn{3}{|l|}{\\relax \ncell5-1\n\\unskip}\\relax \\\\\n' + '\\hline\n\\end{tabulary}' in table) + + # complex spanning cell + table = tables['complex spanning cell'] + assert ('\\noindent\\begin{tabulary}{\\linewidth}{|L|L|L|L|L|}' in table) + assert ('\\hline\n' + '\\multirow{3}{*}{\\relax \ncell1-1\n\\unskip}\\relax &' + '\\multirow{3}{*}{\\relax \ncell1-2\n\\unskip}\\relax &' + '\ncell1-3\n&' + '\\multirow{3}{*}{\\relax \ncell1-4\n\\unskip}\\relax &' + '\\multirow{2}{*}{\\relax \ncell1-5\n\\unskip}\\relax \\\\\n' + in table) + assert ('\\cline{3-3}&&' + '\\multirow{2}{*}{\\relax \ncell2-3\n\\unskip}\\relax &&\\\\\n' + in table) + assert ('\\cline{5-5}&&&&\ncell3-5\n\\\\\n' in table) + assert ('\\hline\n\\end{tabulary}' in table) From 424041f049c9cb87c3e6b41c606d9add3528edc9 Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Sat, 11 Feb 2017 09:21:56 +0100 Subject: [PATCH 182/190] fix tabulary template closing environments in wrong order (refs: #3405) --- sphinx/templates/latex/tabulary.tex_t | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sphinx/templates/latex/tabulary.tex_t b/sphinx/templates/latex/tabulary.tex_t index 06b84ea7d..d09a4b14b 100644 --- a/sphinx/templates/latex/tabulary.tex_t +++ b/sphinx/templates/latex/tabulary.tex_t @@ -14,6 +14,9 @@ <%= ''.join(table.header) %> <%=- ''.join(table.body) %> \end{tabulary} +<%- if table.caption %> +\end{threeparttable} +<%- endif %> <%- if table.align %> <%- if table.align == 'center' -%> \end{center} @@ -21,6 +24,3 @@ \end{flush<%= table.align%>} <% endif -%> <% endif -%> -<%- if table.caption %> -\end{threeparttable} -<%- endif %> From 1d3f5c2c55dda7175bff8fe6d8e6fa8203d0d76d Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 11 Feb 2017 17:33:49 +0900 Subject: [PATCH 183/190] Fix typo --- tests/roots/test-latex-table/complex.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/roots/test-latex-table/complex.rst b/tests/roots/test-latex-table/complex.rst index deca1e03f..f3f927a3e 100644 --- a/tests/roots/test-latex-table/complex.rst +++ b/tests/roots/test-latex-table/complex.rst @@ -23,8 +23,8 @@ complex spanning cell table having ... -* consecutive multicol at top of row (1-1 and 1-2) -* consecutive multicol at end of row (1-4 and 1-5) +* consecutive multirow at top of row (1-1 and 1-2) +* consecutive multirow at end of row (1-4 and 1-5) +-----------+-----------+-----------+-----------+-----------+ | | | cell1-3 | | | From 58ea3caeac4a41e4fd7a823a107a8fd3edbecf95 Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Sat, 11 Feb 2017 12:08:37 +0100 Subject: [PATCH 184/190] =?UTF-8?q?Revert=20"Improve=20docs=20about=20``:l?= =?UTF-8?q?ines:=CC=80``=20vs=20=CC=80``:start-after:``=20(refs=20#3412)"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 839e924808ad5c8cad8bed567e86eb7cd6970d7c. Indeed, it is better not to document how to use ``lines`` with ``start-after`` now if this is to be changed at next major release (refs --- doc/markup/code.rst | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/doc/markup/code.rst b/doc/markup/code.rst index ff0f692fa..c7cb0f911 100644 --- a/doc/markup/code.rst +++ b/doc/markup/code.rst @@ -182,9 +182,7 @@ Includes ``start-after`` is given as a string option, only lines that follow the first line containing that string are included. If ``end-before`` is given as a string option, only lines that precede the first lines containing that string - are included. If used in combination with ``lines``, make sure the latter - allows the lines (whose count start at ``1`` at top of file) containing the - looked-for strings. + are included. When specifying particular parts of a file to display, it can be useful to display exactly which lines are being presented. From 8f4379ef91b4a63bbe6275d4bd09cff0a948b109 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 11 Feb 2017 23:42:39 +0900 Subject: [PATCH 185/190] Add testcase for parselinenos --- tests/test_util.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/tests/test_util.py b/tests/test_util.py index d97329668..2ee022604 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -8,7 +8,12 @@ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ -from sphinx.util import encode_uri, split_docinfo + +import pytest + +from sphinx.util import ( + encode_uri, parselinenos, split_docinfo +) def test_encode_uri(): @@ -46,3 +51,17 @@ def test_splitdocinfo(): docinfo, content = split_docinfo(source) assert docinfo == ":multiline: one\n\ttwo\n\tthree\n" assert content == '\nHello world.\n' + + +def test_parselinenos(): + assert parselinenos('1,2,3', 10) == [0, 1, 2] + assert parselinenos('4, 5, 6', 10) == [3, 4, 5] + assert parselinenos('7-9', 10) == [6, 7, 8] + assert parselinenos('7-', 10) == [6, 7, 8, 9] + assert parselinenos('1,7-', 10) == [0, 6, 7, 8, 9] + with pytest.raises(ValueError): + parselinenos('1-2-3', 10) + with pytest.raises(ValueError): + parselinenos('abc-def', 10) + with pytest.raises(ValueError): + parselinenos('-', 10) From 929e67ffca546af1e0aad2e04dc3ac18b02292c7 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Sat, 11 Feb 2017 23:43:28 +0900 Subject: [PATCH 186/190] Fix parselinenos() could not parse left half open range (cf. "-4") --- CHANGES | 1 + sphinx/util/__init__.py | 13 ++++++++----- tests/test_util.py | 1 + 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/CHANGES b/CHANGES index c21146bc7..1dc59cbc5 100644 --- a/CHANGES +++ b/CHANGES @@ -85,6 +85,7 @@ Bugs fixed * #3295: Could not import extension sphinx.builders.linkcheck * #3285: autosummary: asterisks are escaped twice * LaTeX, pass dvipdfm option to geometry package for Japanese documents (ref #3363) +* Fix parselinenos() could not parse left half open range (cf. "-4") Release 1.5.1 (released Dec 13, 2016) diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index dfe1f60b9..f619cd02e 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -357,14 +357,17 @@ def parselinenos(spec, total): for part in parts: try: begend = part.strip().split('-') - if len(begend) > 2: + if ['', ''] == begend: raise ValueError - if len(begend) == 1: + elif len(begend) == 1: items.append(int(begend[0]) - 1) + elif len(begend) == 2: + start, end = begend + start = start or 1 # left half open (cf. -10) + end = end or total # right half open (cf. 10-) + items.extend(range(int(start) - 1, int(end))) else: - start = (begend[0] == '') and 0 or int(begend[0]) - 1 - end = (begend[1] == '') and total or int(begend[1]) - items.extend(range(start, end)) + raise ValueError except Exception: raise ValueError('invalid line number spec: %r' % spec) return items diff --git a/tests/test_util.py b/tests/test_util.py index 2ee022604..f5d8af0f0 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -56,6 +56,7 @@ def test_splitdocinfo(): def test_parselinenos(): assert parselinenos('1,2,3', 10) == [0, 1, 2] assert parselinenos('4, 5, 6', 10) == [3, 4, 5] + assert parselinenos('-4', 10) == [0, 1, 2, 3] assert parselinenos('7-9', 10) == [6, 7, 8] assert parselinenos('7-', 10) == [6, 7, 8, 9] assert parselinenos('1,7-', 10) == [0, 6, 7, 8, 9] From dcb211f5de2fb18f7162b0fc261fa368b1ac7a18 Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Sat, 11 Feb 2017 11:58:38 +0100 Subject: [PATCH 187/190] refactor latex table templates to avoid extra vertical spaces (refs #3405) also, make "center" the default for short table horizontal alignment (it is already so for longtable) --- sphinx/templates/latex/tabular.tex_t | 30 +++++++++++++------------- sphinx/templates/latex/tabulary.tex_t | 30 +++++++++++++------------- tests/test_build_latex.py | 31 ++++++++++++++------------- 3 files changed, 46 insertions(+), 45 deletions(-) diff --git a/sphinx/templates/latex/tabular.tex_t b/sphinx/templates/latex/tabular.tex_t index 7aeb7e85b..ea8649ccf 100644 --- a/sphinx/templates/latex/tabular.tex_t +++ b/sphinx/templates/latex/tabular.tex_t @@ -1,15 +1,20 @@ -<%- if table.align %> +\begingroup +<% if table.align -%> <%- if table.align == 'center' -%> - \begin{center} - <%- elif table.align in ('left', 'right') -%> - \begin{flush<%= table.align%>} - <%- endif -%> -<% endif -%> -<%- if table.caption -%> + \centering + <%- elif table.align == 'left' -%> + \raggedright + <%- else -%> + \raggedleft + <%- endif %> +<%- else -%> + \centering +<%- endif %> +<% if table.caption -%> \begin{threeparttable} \capstart\caption{<%= ''.join(table.caption) %>}<%= labels %> <% endif -%> -\noindent\begin{tabular}<%= table.get_colspec() -%> +\begin{tabular}<%= table.get_colspec() -%> \hline <%= ''.join(table.header) %> <%=- ''.join(table.body) %> @@ -17,10 +22,5 @@ <%- if table.caption %> \end{threeparttable} <%- endif %> -<%- if table.align %> - <%- if table.align == 'center' -%> - \end{center} - <%- elif table.align in ('left', 'right') -%> - \end{flush<%= table.align%>} - <% endif -%> -<% endif -%> +\par +\endgroup diff --git a/sphinx/templates/latex/tabulary.tex_t b/sphinx/templates/latex/tabulary.tex_t index d09a4b14b..fed923f8b 100644 --- a/sphinx/templates/latex/tabulary.tex_t +++ b/sphinx/templates/latex/tabulary.tex_t @@ -1,15 +1,20 @@ -<%- if table.align %> +\begingroup +<% if table.align -%> <%- if table.align == 'center' -%> - \begin{center} - <%- elif table.align in ('left', 'right') -%> - \begin{flush<%= table.align%>} - <%- endif -%> -<% endif -%> -<%- if table.caption -%> + \centering + <%- elif table.align == 'left' -%> + \raggedright + <%- else -%> + \raggedleft + <%- endif %> +<%- else -%> + \centering +<%- endif %> +<% if table.caption -%> \begin{threeparttable} \capstart\caption{<%= ''.join(table.caption) %>}<%= labels %> <% endif -%> -\noindent\begin{tabulary}{\linewidth}<%= table.get_colspec() -%> +\begin{tabulary}{\linewidth}<%= table.get_colspec() -%> \hline <%= ''.join(table.header) %> <%=- ''.join(table.body) %> @@ -17,10 +22,5 @@ <%- if table.caption %> \end{threeparttable} <%- endif %> -<%- if table.align %> - <%- if table.align == 'center' -%> - \end{center} - <%- elif table.align in ('left', 'right') -%> - \end{flush<%= table.align%>} - <% endif -%> -<% endif -%> +\par +\endgroup diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py index 81b9b79b1..95b7273f0 100644 --- a/tests/test_build_latex.py +++ b/tests/test_build_latex.py @@ -481,7 +481,7 @@ def test_footnote(app, status, warning): '\ncite\n}') in result assert '\\caption{Table caption \\sphinxfootnotemark[4]' in result assert 'name \\sphinxfootnotemark[5]' in result - assert ('\\end{threeparttable}\n%\n' + assert ('\\end{threeparttable}\n\\par\n\\endgroup\n%\n' '\\begin{footnotetext}[4]\sphinxAtStartFootnote\n' 'footnotes in table caption\n%\n\\end{footnotetext}%\n' '\\begin{footnotetext}[5]\sphinxAtStartFootnote\n' @@ -831,7 +831,7 @@ def test_latex_table_tabulars(app, status, warning): # simple_table table = tables['simple table'] - assert ('\\noindent\\begin{tabulary}{\\linewidth}{|L|L|}' in table) + assert ('\\begin{tabulary}{\\linewidth}{|L|L|}' in table) assert ('\\hline\n' '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax' in table) @@ -842,42 +842,43 @@ def test_latex_table_tabulars(app, status, warning): # table having :widths: option table = tables['table having :widths: option'] - assert ('\\noindent\\begin{tabular}{|\\X{30}{100}|\\X{70}{100}|}' in table) + assert ('\\begin{tabular}{|\\X{30}{100}|\\X{70}{100}|}' in table) assert ('\\hline\n\\end{tabular}' in table) # table having :align: option (tabulary) table = tables['table having :align: option (tabulary)'] - assert ('\\begin{center}\\noindent\\begin{tabulary}{\\linewidth}{|L|L|}\n' in table) - assert ('\\hline\n\\end{tabulary}\\end{center}' in table) + assert ('\\begingroup\n\\centering\n' + '\\begin{tabulary}{\\linewidth}{|L|L|}\n' in table) + assert ('\\hline\n\\end{tabulary}\n\\par\n\\endgroup' in table) # table having :align: option (tabular) table = tables['table having :align: option (tabular)'] - assert ('\\begin{flushleft}' - '\\noindent\\begin{tabular}{|\X{30}{100}|\X{70}{100}|}\n' in table) - assert ('\\hline\n\\end{tabular}\\end{flushleft}' in table) + assert ('\\begingroup\n\\raggedright\n' + '\\begin{tabular}{|\X{30}{100}|\X{70}{100}|}\n' in table) + assert ('\\hline\n\\end{tabular}\n\\par\n\\endgroup' in table) # table with tabularcolumn table = tables['table with tabularcolumn'] - assert ('\\noindent\\begin{tabulary}{\\linewidth}{|c|c|}' in table) + assert ('\\begin{tabulary}{\\linewidth}{|c|c|}' in table) # table having caption table = tables['table having caption'] assert ('\\begin{threeparttable}\n\\capstart\\caption{caption for table}' '\\label{\\detokenize{tabular:id1}}' in table) - assert ('\\noindent\\begin{tabulary}{\\linewidth}{|L|L|}' in table) + assert ('\\begin{tabulary}{\\linewidth}{|L|L|}' in table) assert ('\\hline\n\\end{tabulary}\n\\end{threeparttable}' in table) # table having verbatim table = tables['table having verbatim'] - assert ('\\noindent\\begin{tabular}{|*{2}{\\X{1}{2}|}}\n\\hline' in table) + assert ('\\begin{tabular}{|*{2}{\\X{1}{2}|}}\n\\hline' in table) # table having problematic cell table = tables['table having problematic cell'] - assert ('\\noindent\\begin{tabular}{|*{2}{\\X{1}{2}|}}\n\\hline' in table) + assert ('\\begin{tabular}{|*{2}{\\X{1}{2}|}}\n\\hline' in table) # table having both :widths: and problematic cell table = tables['table having both :widths: and problematic cell'] - assert ('\\noindent\\begin{tabular}{|\\X{30}{100}|\\X{70}{100}|}' in table) + assert ('\\begin{tabular}{|\\X{30}{100}|\\X{70}{100}|}' in table) @pytest.mark.skipif(docutils.__version_info__ < (0, 13), @@ -958,7 +959,7 @@ def test_latex_table_complex_tables(app, status, warning): # grid table table = tables['grid table'] - assert ('\\noindent\\begin{tabulary}{\\linewidth}{|L|L|L|}' in table) + assert ('\\begin{tabulary}{\\linewidth}{|L|L|L|}' in table) assert ('\\hline\n' '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax &' @@ -975,7 +976,7 @@ def test_latex_table_complex_tables(app, status, warning): # complex spanning cell table = tables['complex spanning cell'] - assert ('\\noindent\\begin{tabulary}{\\linewidth}{|L|L|L|L|L|}' in table) + assert ('\\begin{tabulary}{\\linewidth}{|L|L|L|L|L|}' in table) assert ('\\hline\n' '\\multirow{3}{*}{\\relax \ncell1-1\n\\unskip}\\relax &' '\\multirow{3}{*}{\\relax \ncell1-2\n\\unskip}\\relax &' From 1c38710ed282f5d3d38d027f9a5190e558b27be3 Mon Sep 17 00:00:00 2001 From: jfbu <jfbu@free.fr> Date: Sat, 11 Feb 2017 17:52:36 +0100 Subject: [PATCH 188/190] update latex table tests and CHANGES for PR#3415 --- CHANGES | 4 ++++ tests/roots/test-latex-table/tabular.rst | 2 +- tests/test_build_latex.py | 17 ++++++++++------- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/CHANGES b/CHANGES index 8e25b84ff..64ad089dd 100644 --- a/CHANGES +++ b/CHANGES @@ -10,6 +10,9 @@ Incompatible changes has the custom code to fit image to available width if oversized. * The subclasses of ``sphinx.domains.Index`` should override ``generate()`` method. The default implementation raises NotImplementedError +* LaTeX positioned long tables horizontally centered, and short ones + flushed left (no text flow around table.) The position now defaults to center in + both cases, and it will obey Docutils 0.13 ``:align:`` option (refs #3415, #3377) Features removed ---------------- @@ -49,6 +52,7 @@ Features added * #3402: Allow to suppress "download file not readable" warnings using :confval:`suppress_warnings`. * #3377: latex: Add support for Docutils 0.13 ``:align:`` option for tables + (but does not implement text flow around table). Bugs fixed ---------- diff --git a/tests/roots/test-latex-table/tabular.rst b/tests/roots/test-latex-table/tabular.rst index 9dad36edd..5577c496e 100644 --- a/tests/roots/test-latex-table/tabular.rst +++ b/tests/roots/test-latex-table/tabular.rst @@ -30,7 +30,7 @@ table having :align: option (tabulary) -------------------------------------- .. table:: - :align: center + :align: right ======= ======= header1 header2 diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py index 95b7273f0..9a91bf0b5 100644 --- a/tests/test_build_latex.py +++ b/tests/test_build_latex.py @@ -831,23 +831,24 @@ def test_latex_table_tabulars(app, status, warning): # simple_table table = tables['simple table'] - assert ('\\begin{tabulary}{\\linewidth}{|L|L|}' in table) + assert ('\\begingroup\n\\centering\n\\begin{tabulary}{\\linewidth}{|L|L|}' in table) assert ('\\hline\n' '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &' '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax' in table) assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in table) assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table) assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table) - assert ('\\hline\n\\end{tabulary}' in table) + assert ('\\hline\n\\end{tabulary}\n\\par\n\\endgroup' in table) # table having :widths: option table = tables['table having :widths: option'] - assert ('\\begin{tabular}{|\\X{30}{100}|\\X{70}{100}|}' in table) - assert ('\\hline\n\\end{tabular}' in table) + assert ('\\begingroup\n\\centering\n' + '\\begin{tabular}{|\\X{30}{100}|\\X{70}{100}|}' in table) + assert ('\\hline\n\\end{tabular}\n\\par\n\\endgroup' in table) # table having :align: option (tabulary) table = tables['table having :align: option (tabulary)'] - assert ('\\begingroup\n\\centering\n' + assert ('\\begingroup\n\\raggedleft\n' '\\begin{tabulary}{\\linewidth}{|L|L|}\n' in table) assert ('\\hline\n\\end{tabulary}\n\\par\n\\endgroup' in table) @@ -863,10 +864,12 @@ def test_latex_table_tabulars(app, status, warning): # table having caption table = tables['table having caption'] - assert ('\\begin{threeparttable}\n\\capstart\\caption{caption for table}' + assert ('\\begingroup\n\\centering\n' + '\\begin{threeparttable}\n\\capstart\\caption{caption for table}' '\\label{\\detokenize{tabular:id1}}' in table) assert ('\\begin{tabulary}{\\linewidth}{|L|L|}' in table) - assert ('\\hline\n\\end{tabulary}\n\\end{threeparttable}' in table) + assert ('\\hline\n\\end{tabulary}\n\\end{threeparttable}' + '\n\\par\n\\endgroup' in table) # table having verbatim table = tables['table having verbatim'] From fb3e77bb78b30b7c8d026f51725397cdb61a2c99 Mon Sep 17 00:00:00 2001 From: Takeshi KOMIYA <i.tkomiya@gmail.com> Date: Mon, 13 Feb 2017 21:35:01 +0900 Subject: [PATCH 189/190] travis: Specify the version of pypy to avoid travis's issue --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index c787aa8d9..113daedc0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ cache: directories: - $HOME/.cache/pip python: - - "pypy" + - "pypy-5.4.1" - "2.7" - "3.4" - "3.5" @@ -27,7 +27,7 @@ matrix: env: DOCUTILS=0.12 - python: nightly env: DOCUTILS=0.12 - - python: pypy + - python: "pypy-5.4.1" env: DOCUTILS=0.12 addons: apt: From c76898f5a05d31b84d4b7f872905a85fee851aa5 Mon Sep 17 00:00:00 2001 From: Martin Drawitsch <mdraw.gh@gmail.com> Date: Mon, 13 Feb 2017 17:55:08 +0100 Subject: [PATCH 190/190] Fix "documenting" typo in reST primer --- doc/rest.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/rest.rst b/doc/rest.rst index 7b2b92ddc..f39ade669 100644 --- a/doc/rest.rst +++ b/doc/rest.rst @@ -226,7 +226,7 @@ as long as the text:: Normally, there are no heading levels assigned to certain characters as the structure is determined from the succession of headings. However, this -convention is used in `Python's Style Guide for documentating +convention is used in `Python's Style Guide for documenting <https://docs.python.org/devguide/documenting.html#style-guide>`_ which you may follow: