Merge branch 'master' into 5770_doctest_refers_highlight_language

This commit is contained in:
Takeshi KOMIYA 2018-12-16 00:32:10 +09:00 committed by GitHub
commit c70e65fc6c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
179 changed files with 2014 additions and 2212 deletions

View File

@ -81,6 +81,7 @@ Other contributors, listed alphabetically, are:
* Hong Xu -- svg support in imgmath extension and various bug fixes
* Stephen Finucane -- setup command improvements and documentation
* Daniel Pizetta -- inheritance diagram improvements
* KINEBUCHI Tomohiko -- typing Sphinx as well as docutils
Many thanks for all contributions!

11
CHANGES
View File

@ -26,11 +26,14 @@ Incompatible changes
* LaTeX: Move message resources to ``sphinxmessage.sty``
* LaTeX: Stop using ``\captions<lang>`` macro for some labels
* LaTeX: for ``'xelatex'`` and ``'lualatex'``, use the ``FreeFont`` OpenType
fonts as default choice (refs #5645)
fonts as default choice (refs: #5645)
* LaTeX: Greek letters in text are not escaped to math mode mark-up, and they
will use the text font not the math font. The ``LGR`` font encoding must be
added to the ``'fontenc'`` key of :confval:`latex_elements` for this to work
(only if it is needed by the document, of course).
* LaTeX: setting the :confval:`language` to ``'en'`` triggered ``Sonny`` option
of ``fncychap``, now it is ``Bjarne`` to match case of no language specified.
(refs: #5772)
* #5770: doctest: Follow :confval:`highlight_language` on highlighting doctest
block. As a result, they are highlighted as python3 by default.
@ -48,6 +51,7 @@ Deprecated
is_meta_keywords()``
* The ``suffix`` argument of ``env.doc2path()`` is deprecated.
* The string style ``base`` argument of ``env.doc2path()`` is deprecated.
* ``sphinx.addnodes.abbreviation``
* ``sphinx.application.Sphinx._setting_up_extension``
* ``sphinx.config.check_unicode()``
* ``sphinx.ext.autodoc.importer._MockImporter``
@ -60,7 +64,8 @@ Deprecated
* ``sphinx.testing.util.remove_unicode_literal()``
* ``sphinx.util.attrdict``
* ``sphinx.util.force_decode()``
* ``sphinx.util.get_matching_docs()`` is deprecated
* ``sphinx.util.get_matching_docs()``
* ``sphinx.util.inspect.Parameter``
* ``sphinx.util.osutil.walk()``
* ``sphinx.util.PeekableIterator``
* ``sphinx.util.pycompat.u``
@ -103,6 +108,8 @@ Bugs fixed
language and ``'xelatex'`` or ``'lualatex'`` as :confval:`latex_engine`
(refs: #5251)
* #5248: LaTeX: Greek letters in section titles disappear from PDF bookmarks
* #5772: LaTeX: should the Bjarne style of fncychap be used for English also
if passed as language option?
Testing
--------

View File

@ -147,6 +147,11 @@ The following is a list of deprecated interfaces.
- 4.0
- ``os.path.join()``
* - ``sphinx.addnodes.abbreviation``
- 2.0
- 4.0
- ``docutils.nodes.abbreviation``
* - ``sphinx.config.check_unicode()``
- 2.0
- 4.0
@ -197,6 +202,11 @@ The following is a list of deprecated interfaces.
- 4.0
- ``sphinx.util.get_matching_files()``
* - ``sphinx.util.inspect.Parameter``
- 2.0
- 3.0
- N/A
* - ``sphinx.util.osutil.walk()``
- 2.0
- 4.0

View File

@ -2094,6 +2094,8 @@ information.
"fncychap" styles you can try are "Lenny", "Glenn", "Conny", "Rejne" and
"Bjornstrup". You can also set this to ``''`` to disable fncychap.
The default is ``''`` for Japanese documents.
``'preamble'``
Additional preamble content, default empty. See :doc:`/latex`.

View File

@ -22,7 +22,6 @@ from .deprecation import RemovedInNextVersionWarning
if False:
# For type annotation
# note: Don't use typing.TYPE_CHECK here (for py27 and py34).
from typing import Any # NOQA

View File

@ -13,13 +13,12 @@ import warnings
from docutils import nodes
from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
if False:
# For type annotation
from typing import Any, Dict, List, Sequence # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
class translatable(nodes.Node):
@ -42,12 +41,12 @@ class translatable(nodes.Node):
raise NotImplementedError
def apply_translated_message(self, original_message, translated_message):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
"""Apply translated message."""
raise NotImplementedError
def extract_original_messages(self):
# type: () -> Sequence[unicode]
# type: () -> Sequence[str]
"""Extract translation messages.
:returns: list of extracted messages or messages generator
@ -69,12 +68,12 @@ class toctree(nodes.General, nodes.Element, translatable):
self['rawcaption'] = self['caption']
def apply_translated_message(self, original_message, translated_message):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
if self.get('rawcaption') == original_message:
self['caption'] = translated_message
def extract_original_messages(self):
# type: () -> List[unicode]
# type: () -> List[str]
if 'rawcaption' in self:
return [self['rawcaption']]
else:
@ -128,7 +127,7 @@ class desc_type(nodes.Part, nodes.Inline, nodes.FixedTextElement):
class desc_returns(desc_type):
"""Node for a "returns" annotation (a la -> in Python)."""
def astext(self):
# type: () -> unicode
# type: () -> str
return ' -> ' + super(desc_returns, self).astext()
@ -150,7 +149,7 @@ class desc_optional(nodes.Part, nodes.Inline, nodes.FixedTextElement):
child_text_separator = ', '
def astext(self):
# type: () -> unicode
# type: () -> str
return '[' + super(desc_optional, self).astext() + ']'
@ -344,8 +343,18 @@ class literal_strong(nodes.strong, not_smartquotable):
"""
class abbreviation(nodes.Inline, nodes.TextElement):
"""Node for abbreviations with explanations."""
class abbreviation(nodes.abbreviation):
"""Node for abbreviations with explanations.
.. deprecated:: 2.0
"""
def __init__(self, rawsource='', text='', *children, **attributes):
# type: (str, str, *nodes.Node, **Any) -> None
warnings.warn("abbrevition node for Sphinx was replaced by docutils'.",
RemovedInSphinx40Warning, stacklevel=2)
super(abbreviation, self).__init__(rawsource, text, *children, **attributes)
class manpage(nodes.Inline, nodes.FixedTextElement):
@ -353,7 +362,7 @@ class manpage(nodes.Inline, nodes.FixedTextElement):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_node(toctree)
app.add_node(desc)
app.add_node(desc_signature)
@ -389,7 +398,6 @@ def setup(app):
app.add_node(download_reference)
app.add_node(literal_emphasis)
app.add_node(literal_strong)
app.add_node(abbreviation, override=True)
app.add_node(manpage)
return {

View File

@ -10,7 +10,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import pickle
@ -60,7 +59,7 @@ if False:
from sphinx.extension import Extension # NOQA
from sphinx.roles import XRefRole # NOQA
from sphinx.theming import Theme # NOQA
from sphinx.util.typing import RoleFunction, TitleGetter, unicode # NOQA
from sphinx.util.typing import RoleFunction, TitleGetter # NOQA
builtin_extensions = (
'sphinx.addnodes',
@ -113,7 +112,7 @@ builtin_extensions = (
# Strictly, alabaster theme is not a builtin extension,
# but it is loaded automatically to use it as default theme.
'alabaster',
) # type: Tuple[unicode, ...]
)
ENV_PICKLE_FILENAME = 'environment.pickle'
@ -133,20 +132,20 @@ class Sphinx:
confoverrides=None, status=sys.stdout, warning=sys.stderr,
freshenv=False, warningiserror=False, tags=None, verbosity=0,
parallel=0, keep_going=False):
# type: (unicode, unicode, unicode, unicode, unicode, Dict, IO, IO, bool, bool, List[unicode], int, int, bool) -> None # NOQA
# type: (str, str, str, str, str, Dict, IO, IO, bool, bool, List[str], int, int, bool) -> None # NOQA
self.phase = BuildPhase.INITIALIZATION
self.verbosity = verbosity
self.extensions = {} # type: Dict[unicode, Extension]
self.extensions = {} # type: Dict[str, Extension]
self.builder = None # type: Builder
self.env = None # type: BuildEnvironment
self.project = None # type: Project
self.registry = SphinxComponentRegistry()
self.html_themes = {} # type: Dict[unicode, unicode]
self.html_themes = {} # type: Dict[str, str]
# validate provided directories
self.srcdir = abspath(srcdir) # type: unicode
self.outdir = abspath(outdir) # type: unicode
self.doctreedir = abspath(doctreedir) # type: unicode
self.srcdir = abspath(srcdir)
self.outdir = abspath(outdir)
self.doctreedir = abspath(doctreedir)
self.confdir = confdir
if self.confdir: # confdir is optional
self.confdir = abspath(self.confdir)
@ -306,11 +305,11 @@ class Sphinx:
self._init_env(freshenv=True)
def preload_builder(self, name):
# type: (unicode) -> None
# type: (str) -> None
self.registry.preload_builder(self, name)
def create_builder(self, name):
# type: (unicode) -> Builder
# type: (str) -> Builder
if name is None:
logger.info(__('No builder selected, using default: html'))
name = 'html'
@ -326,7 +325,7 @@ class Sphinx:
# ---- main "build" method -------------------------------------------------
def build(self, force_all=False, filenames=None):
# type: (bool, List[unicode]) -> None
# type: (bool, List[str]) -> None
self.phase = BuildPhase.READING
try:
if force_all:
@ -371,7 +370,7 @@ class Sphinx:
# ---- general extensibility interface -------------------------------------
def setup_extension(self, extname):
# type: (unicode) -> None
# type: (str) -> None
"""Import and setup a Sphinx extension module.
Load the extension given by the module *name*. Use this if your
@ -382,7 +381,7 @@ class Sphinx:
self.registry.load_extension(self, extname)
def require_sphinx(self, version):
# type: (unicode) -> None
# type: (str) -> None
"""Check the Sphinx version if requested.
Compare *version* (which must be a ``major.minor`` version string, e.g.
@ -395,7 +394,7 @@ class Sphinx:
raise VersionRequirementError(version)
def import_object(self, objname, source=None):
# type: (str, unicode) -> Any
# type: (str, str) -> Any
"""Import an object from a ``module.name`` string.
.. deprecated:: 1.8
@ -408,7 +407,7 @@ class Sphinx:
# event interface
def connect(self, event, callback):
# type: (unicode, Callable) -> int
# type: (str, Callable) -> int
"""Register *callback* to be called when *event* is emitted.
For details on available core events and the arguments of callback
@ -428,7 +427,7 @@ class Sphinx:
self.events.disconnect(listener_id)
def emit(self, event, *args):
# type: (unicode, Any) -> List
# type: (str, Any) -> List
"""Emit *event* and pass *arguments* to the callback functions.
Return the return values of all callbacks as a list. Do not emit core
@ -443,7 +442,7 @@ class Sphinx:
return self.events.emit(event, self, *args)
def emit_firstresult(self, event, *args):
# type: (unicode, Any) -> Any
# type: (str, Any) -> Any
"""Emit *event* and pass *arguments* to the callback functions.
Return the result of the first callback that doesn't return ``None``.
@ -468,7 +467,7 @@ class Sphinx:
# TODO(stephenfin): Describe 'types' parameter
def add_config_value(self, name, default, rebuild, types=()):
# type: (unicode, Any, Union[bool, unicode], Any) -> None
# type: (str, Any, Union[bool, str], Any) -> None
"""Register a configuration value.
This is necessary for Sphinx to recognize new values and set default
@ -501,7 +500,7 @@ class Sphinx:
self.config.add(name, default, rebuild, types)
def add_event(self, name):
# type: (unicode) -> None
# type: (str) -> None
"""Register an event called *name*.
This is needed to be able to emit it.
@ -510,7 +509,7 @@ class Sphinx:
self.events.add(name)
def set_translator(self, name, translator_class, override=False):
# type: (unicode, Type[nodes.NodeVisitor], bool) -> None
# type: (str, Type[nodes.NodeVisitor], bool) -> None
"""Register or override a Docutils translator class.
This is used to register a custom output translator or to replace a
@ -563,7 +562,7 @@ class Sphinx:
self.registry.add_translation_handlers(node, **kwds)
def add_enumerable_node(self, node, figtype, title_getter=None, override=False, **kwds):
# type: (Type[nodes.Element], unicode, TitleGetter, bool, Any) -> None
# type: (Type[nodes.Element], str, TitleGetter, bool, Any) -> None
"""Register a Docutils node class as a numfig target.
Sphinx numbers the node automatically. And then the users can refer it
@ -592,14 +591,14 @@ class Sphinx:
@property
def enumerable_nodes(self):
# type: () -> Dict[Type[nodes.Node], Tuple[unicode, TitleGetter]]
# type: () -> Dict[Type[nodes.Node], Tuple[str, TitleGetter]]
warnings.warn('app.enumerable_nodes() is deprecated. '
'Use app.get_domain("std").enumerable_nodes instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.registry.enumerable_nodes
def add_directive(self, name, obj, content=None, arguments=None, override=False, **options): # NOQA
# type: (unicode, Any, bool, Tuple[int, int, bool], bool, Any) -> None
# type: (str, Any, bool, Tuple[int, int, bool], bool, Any) -> None
"""Register a Docutils directive.
*name* must be the prospective directive name. There are two possible
@ -663,7 +662,7 @@ class Sphinx:
docutils.register_directive(name, obj)
def add_role(self, name, role, override=False):
# type: (unicode, Any, bool) -> None
# type: (str, Any, bool) -> None
"""Register a Docutils role.
*name* must be the role name that occurs in the source, *role* the role
@ -681,7 +680,7 @@ class Sphinx:
docutils.register_role(name, role)
def add_generic_role(self, name, nodeclass, override=False):
# type: (unicode, Any, bool) -> None
# type: (str, Any, bool) -> None
"""Register a generic Docutils role.
Register a Docutils role that does nothing but wrap its contents in the
@ -732,7 +731,7 @@ class Sphinx:
def add_directive_to_domain(self, domain, name, obj, has_content=None, argument_spec=None,
override=False, **option_spec):
# type: (unicode, unicode, Any, bool, Any, bool, Any) -> None
# type: (str, str, Any, bool, Any, bool, Any) -> None
"""Register a Docutils directive in a domain.
Like :meth:`add_directive`, but the directive is added to the domain
@ -747,7 +746,7 @@ class Sphinx:
**option_spec)
def add_role_to_domain(self, domain, name, role, override=False):
# type: (unicode, unicode, Union[RoleFunction, XRefRole], bool) -> None
# type: (str, str, Union[RoleFunction, XRefRole], bool) -> None
"""Register a Docutils role in a domain.
Like :meth:`add_role`, but the role is added to the domain named
@ -760,7 +759,7 @@ class Sphinx:
self.registry.add_role_to_domain(domain, name, role, override=override)
def add_index_to_domain(self, domain, index, override=False):
# type: (unicode, Type[Index], bool) -> None
# type: (str, Type[Index], bool) -> None
"""Register a custom index for a domain.
Add a custom *index* class to the domain named *domain*. *index* must
@ -775,7 +774,7 @@ class Sphinx:
def add_object_type(self, directivename, rolename, indextemplate='',
parse_node=None, ref_nodeclass=None, objname='',
doc_field_types=[], override=False):
# type: (unicode, unicode, unicode, Callable, Type[nodes.TextElement], unicode, List, bool) -> None # NOQA
# type: (str, str, str, Callable, Type[nodes.TextElement], str, List, bool) -> None
"""Register a new object type.
This method is a very convenient way to add a new :term:`object` type
@ -841,7 +840,7 @@ class Sphinx:
def add_crossref_type(self, directivename, rolename, indextemplate='',
ref_nodeclass=None, objname='', override=False):
# type: (unicode, unicode, unicode, Type[nodes.TextElement], unicode, bool) -> None
# type: (str, str, str, Type[nodes.TextElement], str, bool) -> None
"""Register a new crossref object type.
This method is very similar to :meth:`add_object_type` except that the
@ -920,7 +919,7 @@ class Sphinx:
self.registry.add_post_transform(transform)
def add_javascript(self, filename, **kwargs):
# type: (unicode, **unicode) -> None
# type: (str, **str) -> None
"""An alias of :meth:`add_js_file`."""
warnings.warn('The app.add_javascript() is deprecated. '
'Please use app.add_js_file() instead.',
@ -928,7 +927,7 @@ class Sphinx:
self.add_js_file(filename, **kwargs)
def add_js_file(self, filename, **kwargs):
# type: (unicode, **unicode) -> None
# type: (str, **str) -> None
"""Register a JavaScript file to include in the HTML output.
Add *filename* to the list of JavaScript files that the default HTML
@ -955,7 +954,7 @@ class Sphinx:
self.builder.add_js_file(filename, **kwargs) # type: ignore
def add_css_file(self, filename, **kwargs):
# type: (unicode, **unicode) -> None
# type: (str, **str) -> None
"""Register a stylesheet to include in the HTML output.
Add *filename* to the list of CSS files that the default HTML template
@ -995,13 +994,13 @@ class Sphinx:
self.builder.add_css_file(filename, **kwargs) # type: ignore
def add_stylesheet(self, filename, alternate=False, title=None):
# type: (unicode, bool, unicode) -> None
# type: (str, bool, str) -> None
"""An alias of :meth:`add_css_file`."""
warnings.warn('The app.add_stylesheet() is deprecated. '
'Please use app.add_css_file() instead.',
RemovedInSphinx40Warning, stacklevel=2)
attributes = {} # type: Dict[unicode, unicode]
attributes = {} # type: Dict[str, str]
if alternate:
attributes['rel'] = 'alternate stylesheet'
else:
@ -1013,7 +1012,7 @@ class Sphinx:
self.add_css_file(filename, **attributes)
def add_latex_package(self, packagename, options=None):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
r"""Register a package to include in the LaTeX source code.
Add *packagename* to the list of packages that LaTeX source code will
@ -1032,7 +1031,7 @@ class Sphinx:
self.registry.add_latex_package(packagename, options)
def add_lexer(self, alias, lexer):
# type: (unicode, Any) -> None
# type: (str, Any) -> None
"""Register a new lexer for source code.
Use *lexer*, which must be an instance of a Pygments lexer class, to
@ -1066,7 +1065,7 @@ class Sphinx:
self.add_directive('auto' + cls.objtype, AutodocDirective)
def add_autodoc_attrgetter(self, typ, getter):
# type: (Type, Callable[[Any, unicode, Any], Any]) -> None
# type: (Type, Callable[[Any, str, Any], Any]) -> None
"""Register a new ``getattr``-like function for the autodoc extension.
Add *getter*, which must be a function with an interface compatible to
@ -1098,7 +1097,7 @@ class Sphinx:
languages[cls.lang] = cls
def add_source_suffix(self, suffix, filetype, override=False):
# type: (unicode, unicode, bool) -> None
# type: (str, str, bool) -> None
"""Register a suffix of source files.
Same as :confval:`source_suffix`. The users can override this
@ -1133,7 +1132,7 @@ class Sphinx:
collector().enable(self)
def add_html_theme(self, name, theme_path):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
"""Register a HTML Theme.
The *name* is a name of theme, and *path* is a full path to the theme
@ -1145,7 +1144,7 @@ class Sphinx:
self.html_themes[name] = theme_path
def add_html_math_renderer(self, name, inline_renderers=None, block_renderers=None):
# type: (unicode, Tuple[Callable, Callable], Tuple[Callable, Callable]) -> None
# type: (str, Tuple[Callable, Callable], Tuple[Callable, Callable]) -> None
"""Register a math renderer for HTML.
The *name* is a name of the math renderer. Both *inline_renderers* and
@ -1159,7 +1158,7 @@ class Sphinx:
self.registry.add_html_math_renderer(name, inline_renderers, block_renderers)
def add_message_catalog(self, catalog, locale_dir):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
"""Register a message catalog.
The *catalog* is a name of catalog, and *locale_dir* is a base path
@ -1173,7 +1172,7 @@ class Sphinx:
# ---- other methods -------------------------------------------------
def is_parallel_allowed(self, typ):
# type: (unicode) -> bool
# type: (str) -> bool
"""Check parallel processing is allowed or not.
``typ`` is a type of processing; ``'read'`` or ``'write'``.
@ -1206,7 +1205,7 @@ class Sphinx:
@property
def _setting_up_extension(self):
# type: () -> List[unicode]
# type: () -> List[str]
warnings.warn('app._setting_up_extension is deprecated.',
RemovedInSphinx30Warning)
return ['?']
@ -1219,7 +1218,7 @@ class TemplateBridge:
"""
def init(self, builder, theme=None, dirs=None):
# type: (Builder, Theme, List[unicode]) -> None
# type: (Builder, Theme, List[str]) -> None
"""Called by the builder to initialize the template system.
*builder* is the builder object; you'll probably want to look at the
@ -1239,14 +1238,14 @@ class TemplateBridge:
return 0
def render(self, template, context):
# type: (unicode, Dict) -> None
# type: (str, Dict) -> None
"""Called by the builder to render a template given as a filename with
a specified context (a Python dictionary).
"""
raise NotImplementedError('must be implemented in subclasses')
def render_string(self, template, context):
# type: (unicode, Dict) -> unicode
# type: (str, Dict) -> str
"""Called by the builder to render a template given as a string with a
specified context (a Python dictionary).
"""

View File

@ -47,7 +47,6 @@ if False:
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.i18n import CatalogInfo # NOQA
from sphinx.util.tags import Tags # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -59,19 +58,19 @@ class Builder:
"""
#: The builder's name, for the -b command line option.
name = '' # type: unicode
name = ''
#: The builder's output format, or '' if no document output is produced.
format = '' # type: unicode
format = ''
#: The message emitted upon successful build completion. This can be a
#: printf-style template string with the following keys: ``outdir``,
#: ``project``
epilog = '' # type: unicode
epilog = ''
#: default translator class for the builder. This can be overrided by
#: :py:meth:`app.set_translator()`.
default_translator_class = None # type: Type[nodes.NodeVisitor]
# doctree versioning method
versioning_method = 'none' # type: unicode
versioning_method = 'none'
versioning_compare = False
# allow parallel write_doc() calls
allow_parallel = False
@ -80,7 +79,7 @@ class Builder:
#: The list of MIME types of image formats supported by the builder.
#: Image files are searched in the order in which they appear here.
supported_image_types = [] # type: List[unicode]
supported_image_types = [] # type: List[str]
#: The builder supports remote images or not.
supported_remote_images = False
#: The builder supports data URIs or not.
@ -104,11 +103,11 @@ class Builder:
self.tags.add("builder_%s" % self.name)
# images that need to be copied over (source -> dest)
self.images = {} # type: Dict[unicode, unicode]
self.images = {} # type: Dict[str, str]
# basename of images directory
self.imagedir = ""
# relative path to image directory from current docname (used at writing docs)
self.imgpath = "" # type: unicode
self.imgpath = ""
# these get set later
self.parallel_ok = False
@ -154,7 +153,7 @@ class Builder:
self.templates = BuiltinTemplateLoader()
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
"""Return the target URI for a document name.
*typ* can be used to qualify the link characteristic for individual
@ -163,7 +162,7 @@ class Builder:
raise NotImplementedError
def get_relative_uri(self, from_, to, typ=None):
# type: (unicode, unicode, unicode) -> unicode
# type: (str, str, str) -> str
"""Return a relative URI between two source filenames.
May raise environment.NoUri if there's no way to return a sensible URI.
@ -172,7 +171,7 @@ class Builder:
self.get_target_uri(to, typ))
def get_outdated_docs(self):
# type: () -> Union[unicode, Iterable[unicode]]
# type: () -> Union[str, Iterable[str]]
"""Return an iterable of output files that are outdated, or a string
describing what an update build will build.
@ -183,7 +182,7 @@ class Builder:
raise NotImplementedError
def get_asset_paths(self):
# type: () -> List[unicode]
# type: () -> List[str]
"""Return list of paths for assets (ex. templates, CSS, etc.)."""
return []
@ -222,12 +221,12 @@ class Builder:
# compile po methods
def compile_catalogs(self, catalogs, message):
# type: (Set[CatalogInfo], unicode) -> None
# type: (Set[CatalogInfo], str) -> None
if not self.config.gettext_auto_build:
return
def cat2relpath(cat):
# type: (CatalogInfo) -> unicode
# type: (CatalogInfo) -> str
return relpath(cat.mo_path, self.env.srcdir).replace(path.sep, SEP)
logger.info(bold(__('building [mo]: ')) + message)
@ -248,9 +247,9 @@ class Builder:
self.compile_catalogs(catalogs, message)
def compile_specific_catalogs(self, specified_files):
# type: (List[unicode]) -> None
# type: (List[str]) -> None
def to_domain(fpath):
# type: (unicode) -> unicode
# type: (str) -> str
docname = self.env.path2doc(path.abspath(fpath))
if docname:
return find_catalog(docname, self.config.gettext_compact)
@ -286,13 +285,13 @@ class Builder:
self.build(None, summary=__('all source files'), method='all')
def build_specific(self, filenames):
# type: (List[unicode]) -> None
# type: (List[str]) -> None
"""Only rebuild as much as needed for changes in the *filenames*."""
# bring the filenames to the canonical format, that is,
# relative to the source directory and without source_suffix.
dirlen = len(self.srcdir) + 1
to_write = []
suffixes = None # type: Tuple[unicode]
suffixes = None # type: Tuple[str]
suffixes = tuple(self.config.source_suffix) # type: ignore
for filename in filenames:
filename = path.normpath(path.abspath(filename))
@ -328,7 +327,7 @@ class Builder:
len(to_build))
def build(self, docnames, summary=None, method='update'):
# type: (Iterable[unicode], unicode, unicode) -> None
# type: (Iterable[str], str, str) -> None
"""Main build method.
First updates the environment, and then calls :meth:`write`.
@ -399,7 +398,7 @@ class Builder:
self.finish_tasks.join()
def read(self):
# type: () -> List[unicode]
# type: () -> List[str]
"""(Re-)read all files new or changed since last update.
Store all environment docnames in the canonical format (ie using SEP as
@ -462,7 +461,7 @@ class Builder:
return sorted(docnames)
def _read_serial(self, docnames):
# type: (List[unicode]) -> None
# type: (List[str]) -> None
for docname in status_iterator(docnames, 'reading sources... ', "purple",
len(docnames), self.app.verbosity):
# remove all inventory entries for that file
@ -471,14 +470,14 @@ class Builder:
self.read_doc(docname)
def _read_parallel(self, docnames, nproc):
# type: (List[unicode], int) -> None
# type: (List[str], int) -> None
# clear all outdated docs at once
for docname in docnames:
self.app.emit('env-purge-doc', self.env, docname)
self.env.clear_doc(docname)
def read_process(docs):
# type: (List[unicode]) -> bytes
# type: (List[str]) -> bytes
self.env.app = self.app
for docname in docs:
self.read_doc(docname)
@ -486,7 +485,7 @@ class Builder:
return pickle.dumps(self.env, pickle.HIGHEST_PROTOCOL)
def merge(docs, otherenv):
# type: (List[unicode], bytes) -> None
# type: (List[str], bytes) -> None
env = pickle.loads(otherenv)
self.env.merge_info_from(docs, env, self.app)
@ -502,7 +501,7 @@ class Builder:
tasks.join()
def read_doc(self, docname):
# type: (unicode) -> None
# type: (str) -> None
"""Parse a file and add/update inventory entries for the doctree."""
self.env.prepare_settings(docname)
@ -528,7 +527,7 @@ class Builder:
self.write_doctree(docname, doctree)
def write_doctree(self, docname, doctree):
# type: (unicode, nodes.document) -> None
# type: (str, nodes.document) -> None
"""Write the doctree to a file."""
# make it picklable
doctree.reporter = None
@ -543,7 +542,7 @@ class Builder:
pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
def write(self, build_docnames, updated_docnames, method='update'):
# type: (Iterable[unicode], Sequence[unicode], unicode) -> None
# type: (Iterable[str], Sequence[str], str) -> None
if build_docnames is None or build_docnames == ['__all__']:
# build_all
build_docnames = self.env.found_docs
@ -574,7 +573,7 @@ class Builder:
self._write_serial(sorted(docnames))
def _write_serial(self, docnames):
# type: (Sequence[unicode]) -> None
# type: (Sequence[str]) -> None
with logging.pending_warnings():
for docname in status_iterator(docnames, __('writing output... '), "darkgreen",
len(docnames), self.app.verbosity):
@ -585,9 +584,9 @@ class Builder:
self.write_doc(docname, doctree)
def _write_parallel(self, docnames, nproc):
# type: (Sequence[unicode], int) -> None
# type: (Sequence[str], int) -> None
def write_process(docs):
# type: (List[Tuple[unicode, nodes.document]]) -> None
# type: (List[Tuple[str, nodes.document]]) -> None
self.app.phase = BuildPhase.WRITING
for docname, doctree in docs:
self.write_doc(docname, doctree)
@ -618,17 +617,17 @@ class Builder:
tasks.join()
def prepare_writing(self, docnames):
# type: (Set[unicode]) -> None
# type: (Set[str]) -> None
"""A place where you can add logic before :meth:`write_doc` is run"""
raise NotImplementedError
def write_doc(self, docname, doctree):
# type: (unicode, nodes.document) -> None
# type: (str, nodes.document) -> None
"""Where you actually write something to the filesystem."""
raise NotImplementedError
def write_doc_serialized(self, docname, doctree):
# type: (unicode, nodes.document) -> None
# type: (str, nodes.document) -> None
"""Handle parts of write_doc that must be called in the main process
if parallel build is active.
"""
@ -651,7 +650,7 @@ class Builder:
pass
def get_builder_config(self, option, default):
# type: (unicode, unicode) -> Any
# type: (str, str) -> Any
"""Return a builder specific option.
This method allows customization of common builder settings by

View File

@ -39,7 +39,6 @@ if False:
# For type annotation
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -80,7 +79,7 @@ MEDIA_TYPES = {
'.otf': 'application/x-font-otf',
'.ttf': 'application/x-font-ttf',
'.woff': 'application/font-woff',
} # type: Dict[unicode, unicode]
}
VECTOR_GRAPHICS_EXTENSIONS = ('.svg',)
@ -97,7 +96,7 @@ NavPoint = namedtuple('NavPoint', ['navpoint', 'playorder', 'text', 'refuri', 'c
def sphinx_smarty_pants(t, language='en'):
# type: (unicode, str) -> unicode
# type: (str, str) -> str
t = t.replace('&quot;', '"')
t = smartquotes.educateDashesOldSchool(t)
t = smartquotes.educateQuotes(t, language)
@ -158,21 +157,21 @@ class EpubBuilder(StandaloneHTMLBuilder):
self.link_suffix = '.xhtml'
self.playorder = 0
self.tocid = 0
self.id_cache = {} # type: Dict[unicode, unicode]
self.id_cache = {} # type: Dict[str, str]
self.use_index = self.get_builder_config('use_index', 'epub')
self.refnodes = [] # type: List[Dict[unicode, Any]]
self.refnodes = [] # type: List[Dict[str, Any]]
def create_build_info(self):
# type: () -> BuildInfo
return BuildInfo(self.config, self.tags, ['html', 'epub'])
def get_theme_config(self):
# type: () -> Tuple[unicode, Dict]
# type: () -> Tuple[str, Dict]
return self.config.epub_theme, self.config.epub_theme_options
# generic support functions
def make_id(self, name):
# type: (unicode) -> unicode
# type: (str) -> str
# id_cache is intentionally mutable
"""Return a unique id for name."""
id = self.id_cache.get(name)
@ -182,7 +181,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return id
def esc(self, name):
# type: (unicode) -> unicode
# type: (str) -> str
"""Replace all characters not allowed in text an attribute values."""
# Like cgi.escape, but also replace apostrophe
name = name.replace('&', '&amp;')
@ -193,7 +192,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return name
def get_refnodes(self, doctree, result):
# type: (nodes.Node, List[Dict[unicode, Any]]) -> List[Dict[unicode, Any]]
# type: (nodes.Node, List[Dict[str, Any]]) -> List[Dict[str, Any]]
"""Collect section titles, their depth in the toc and the refuri."""
# XXX: is there a better way than checking the attribute
# toctree-l[1-8] on the parent node?
@ -233,7 +232,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
self.toc_add_files(self.refnodes)
def toc_add_files(self, refnodes):
# type: (List[Dict[unicode, Any]]) -> None
# type: (List[Dict[str, Any]]) -> None
"""Add the master_doc, pre and post files to a list of refnodes.
"""
refnodes.insert(0, {
@ -256,7 +255,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
})
def fix_fragment(self, prefix, fragment):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
"""Return a href/id attribute with colons replaced by hyphens."""
return prefix + fragment.replace(':', '-')
@ -294,11 +293,11 @@ class EpubBuilder(StandaloneHTMLBuilder):
desc_signature.attributes['ids'] = newids
def add_visible_links(self, tree, show_urls='inline'):
# type: (nodes.document, unicode) -> None
# type: (nodes.document, str) -> None
"""Add visible link targets for external links"""
def make_footnote_ref(doc, label):
# type: (nodes.document, unicode) -> nodes.footnote_reference
# type: (nodes.document, str) -> nodes.footnote_reference
"""Create a footnote_reference node with children"""
footnote_ref = nodes.footnote_reference('[#]_')
footnote_ref.append(nodes.Text(label))
@ -306,7 +305,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return footnote_ref
def make_footnote(doc, label, uri):
# type: (nodes.document, unicode, unicode) -> nodes.footnote
# type: (nodes.document, str, str) -> nodes.footnote
"""Create a footnote node with children"""
footnote = nodes.footnote(uri)
para = nodes.paragraph()
@ -366,7 +365,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
fn_idx += 1
def write_doc(self, docname, doctree):
# type: (unicode, nodes.document) -> None
# type: (str, nodes.document) -> None
"""Write one document file.
This method is overwritten in order to fix fragment identifiers
@ -377,7 +376,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
super(EpubBuilder, self).write_doc(docname, doctree)
def fix_genindex(self, tree):
# type: (List[Tuple[unicode, List[Tuple[unicode, Any]]]]) -> None
# type: (List[Tuple[str, List[Tuple[str, Any]]]]) -> None
"""Fix href attributes for genindex pages."""
# XXX: modifies tree inline
# Logic modeled from themes/basic/genindex.html
@ -396,7 +395,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
self.fix_fragment(m.group(1), m.group(2)))
def is_vector_graphics(self, filename):
# type: (unicode) -> bool
# type: (str) -> bool
"""Does the filename extension indicate a vector graphic format?"""
ext = path.splitext(filename)[-1]
return ext in VECTOR_GRAPHICS_EXTENSIONS
@ -461,7 +460,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
# type: (unicode, Dict, unicode, unicode, Any) -> None
# type: (str, Dict, str, str, Any) -> None
"""Create a rendered page.
This method is overwritten for genindex pages in order to fix href link
@ -476,14 +475,14 @@ class EpubBuilder(StandaloneHTMLBuilder):
outfilename, event_arg)
def build_mimetype(self, outdir, outname):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
"""Write the metainfo file mimetype."""
logger.info(__('writing %s file...'), outname)
copy_asset_file(path.join(self.template_dir, 'mimetype'),
path.join(outdir, outname))
def build_container(self, outdir, outname):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
"""Write the metainfo file META-INF/container.xml."""
logger.info(__('writing %s file...'), outname)
filename = path.join(outdir, outname)
@ -491,11 +490,11 @@ class EpubBuilder(StandaloneHTMLBuilder):
copy_asset_file(path.join(self.template_dir, 'container.xml'), filename)
def content_metadata(self):
# type: () -> Dict[unicode, Any]
# type: () -> Dict[str, Any]
"""Create a dictionary with all metadata for the content.opf
file properly escaped.
"""
metadata = {} # type: Dict[unicode, Any]
metadata = {} # type: Dict[str, Any]
metadata['title'] = self.esc(self.config.epub_title)
metadata['author'] = self.esc(self.config.epub_author)
metadata['uid'] = self.esc(self.config.epub_uid)
@ -511,7 +510,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return metadata
def build_content(self, outdir, outname):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
"""Write the metainfo file content.opf It contains bibliographic data,
a file list and the spine (the reading order).
"""
@ -522,7 +521,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
self.files = [] # type: List[unicode]
self.files = [] # type: List[str]
self.ignored_files = ['.buildinfo', 'mimetype', 'content.opf',
'toc.ncx', 'META-INF/container.xml',
'Thumbs.db', 'ehthumbs.db', '.DS_Store',
@ -625,7 +624,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
metadata)
def new_navpoint(self, node, level, incr=True):
# type: (Dict[unicode, Any], int, bool) -> NavPoint
# type: (Dict[str, Any], int, bool) -> NavPoint
"""Create a new entry in the toc from the node at given level."""
# XXX Modifies the node
if incr:
@ -635,7 +634,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
node['text'], node['refuri'], [])
def build_navpoints(self, nodes):
# type: (List[Dict[unicode, Any]]) -> List[NavPoint]
# type: (List[Dict[str, Any]]) -> List[NavPoint]
"""Create the toc navigation structure.
Subelements of a node are nested inside the navpoint. For nested nodes
@ -680,11 +679,11 @@ class EpubBuilder(StandaloneHTMLBuilder):
return navstack[0].children
def toc_metadata(self, level, navpoints):
# type: (int, List[NavPoint]) -> Dict[unicode, Any]
# type: (int, List[NavPoint]) -> Dict[str, Any]
"""Create a dictionary with all metadata for the toc.ncx file
properly escaped.
"""
metadata = {} # type: Dict[unicode, Any]
metadata = {} # type: Dict[str, Any]
metadata['uid'] = self.config.epub_uid
metadata['title'] = self.esc(self.config.epub_title)
metadata['level'] = level
@ -692,7 +691,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return metadata
def build_toc(self, outdir, outname):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
"""Write the metainfo file toc.ncx."""
logger.info(__('writing %s file...'), outname)
@ -713,7 +712,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
self.toc_metadata(level, navpoints))
def build_epub(self, outdir, outname):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
"""Write the epub file.
It is a zip file with the mimetype file stored uncompressed as the first

View File

@ -8,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import pipes
import plistlib
@ -31,7 +30,6 @@ if False:
# For type annotation
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -270,7 +268,7 @@ class AppleHelpBuilder(StandaloneHTMLBuilder):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(AppleHelpBuilder)

View File

@ -27,7 +27,6 @@ if False:
# For type annotation
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -48,22 +47,22 @@ class ChangesBuilder(Builder):
self.templates.init(self, self.theme)
def get_outdated_docs(self):
# type: () -> unicode
# type: () -> str
return self.outdir
typemap = {
'versionadded': 'added',
'versionchanged': 'changed',
'deprecated': 'deprecated',
} # type: Dict[unicode, unicode]
}
def write(self, *ignored):
# type: (Any) -> None
version = self.config.version
domain = cast(ChangeSetDomain, self.env.get_domain('changeset'))
libchanges = {} # type: Dict[unicode, List[Tuple[unicode, unicode, int]]]
apichanges = [] # type: List[Tuple[unicode, unicode, int]]
otherchanges = {} # type: Dict[Tuple[unicode, unicode], List[Tuple[unicode, unicode, int]]] # NOQA
libchanges = {} # type: Dict[str, List[Tuple[str, str, int]]]
apichanges = [] # type: List[Tuple[str, str, int]]
otherchanges = {} # type: Dict[Tuple[str, str], List[Tuple[str, str, int]]]
if version not in self.env.versionchanges:
logger.info(bold(__('no changes in version %s.') % version))
return
@ -123,7 +122,7 @@ class ChangesBuilder(Builder):
'.. deprecated:: %s' % version]
def hl(no, line):
# type: (int, unicode) -> unicode
# type: (int, str) -> str
line = '<a name="L%s"> </a>' % no + htmlescape(line)
for x in hltext:
if x in line:
@ -157,7 +156,7 @@ class ChangesBuilder(Builder):
self.outdir)
def hl(self, text, version):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
text = htmlescape(text)
for directive in ['versionchanged', 'versionadded', 'deprecated']:
text = text.replace('.. %s:: %s' % (directive, version),
@ -170,7 +169,7 @@ class ChangesBuilder(Builder):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_builder(ChangesBuilder)
return {

View File

@ -36,7 +36,6 @@ if False:
# For type annotation
from typing import Dict, List # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -72,7 +71,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
self.build_devhelp(self.outdir, self.config.devhelp_basename)
def build_devhelp(self, outdir, outname):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
logger.info(__('dumping devhelp index...'))
# Basic info
@ -112,7 +111,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
index = IndexEntries(self.env).create_index(self)
def write_index(title, refs, subitems):
# type: (unicode, List[Any], Any) -> None
# type: (str, List[Any], Any) -> None
if len(refs) == 0:
pass
elif len(refs) == 1:
@ -141,7 +140,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(DevhelpBuilder)

View File

@ -18,7 +18,6 @@ if False:
from typing import Any, Dict, Set # NOQA
from docutils import nodes # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
class DummyBuilder(Builder):
@ -32,19 +31,19 @@ class DummyBuilder(Builder):
pass
def get_outdated_docs(self):
# type: () -> Set[unicode]
# type: () -> Set[str]
return self.env.found_docs
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
return ''
def prepare_writing(self, docnames):
# type: (Set[unicode]) -> None
# type: (Set[str]) -> None
pass
def write_doc(self, docname, doctree):
# type: (unicode, nodes.Node) -> None
# type: (str, nodes.Node) -> None
pass
def finish(self):
@ -53,7 +52,7 @@ class DummyBuilder(Builder):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_builder(DummyBuilder)
return {

View File

@ -28,7 +28,6 @@ if False:
from docutils import nodes # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -141,7 +140,7 @@ class Epub3Builder(_epub_base.EpubBuilder):
return metadata
def prepare_writing(self, docnames):
# type: (Set[unicode]) -> None
# type: (Set[str]) -> None
super(Epub3Builder, self).prepare_writing(docnames)
writing_mode = self.config.epub_writing_mode
@ -151,7 +150,7 @@ class Epub3Builder(_epub_base.EpubBuilder):
self.globalcontext['skip_ua_compatible'] = True
def build_navlist(self, navnodes):
# type: (List[Dict[unicode, Any]]) -> List[NavPoint]
# type: (List[Dict[str, Any]]) -> List[NavPoint]
"""Create the toc navigation structure.
This method is almost same as build_navpoints method in epub.py.
@ -205,7 +204,7 @@ class Epub3Builder(_epub_base.EpubBuilder):
return metadata
def build_navigation_doc(self, outdir, outname):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
"""Write the metainfo file nav.xhtml."""
logger.info(__('writing %s file...'), outname)
@ -231,7 +230,7 @@ class Epub3Builder(_epub_base.EpubBuilder):
def convert_epub_css_files(app, config):
# type: (Sphinx, Config) -> None
"""This converts string styled epub_css_files to tuple styled one."""
epub_css_files = [] # type: List[Tuple[unicode, Dict]]
epub_css_files = [] # type: List[Tuple[str, Dict]]
for entry in config.epub_css_files:
if isinstance(entry, str):
epub_css_files.append((entry, {}))
@ -247,7 +246,7 @@ def convert_epub_css_files(app, config):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_builder(Epub3Builder)
# config values

View File

@ -14,12 +14,11 @@ from __future__ import unicode_literals
from codecs import open
from collections import defaultdict, OrderedDict
from datetime import datetime, tzinfo, timedelta
from io import StringIO
from os import path, walk, getenv
from time import time
from uuid import uuid4
from six import StringIO
from sphinx.builders import Builder
from sphinx.domains.python import pairindextypes
from sphinx.errors import ThemeError
@ -37,7 +36,6 @@ if False:
from docutils import nodes # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.i18n import CatalogInfo # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -69,13 +67,13 @@ class Catalog:
def __init__(self):
# type: () -> None
self.messages = [] # type: List[unicode]
self.messages = [] # type: List[str]
# retain insertion order, a la OrderedDict
self.metadata = OrderedDict() # type: Dict[unicode, List[Tuple[unicode, int, unicode]]] # NOQA
self.metadata = OrderedDict() # type: Dict[str, List[Tuple[str, int, str]]]
# msgid -> file, line, uid
def add(self, msg, origin):
# type: (unicode, Union[nodes.Element, MsgOrigin]) -> None
# type: (str, Union[nodes.Element, MsgOrigin]) -> None
if not hasattr(origin, 'uid'):
# Nodes that are replicated like todo don't have a uid,
# however i18n is also unnecessary.
@ -92,7 +90,7 @@ class MsgOrigin:
"""
def __init__(self, source, line):
# type: (unicode, int) -> None
# type: (str, int) -> None
self.source = source
self.line = line
self.uid = uuid4().hex
@ -125,26 +123,26 @@ class I18nBuilder(Builder):
self.env.set_versioning_method(self.versioning_method,
self.env.config.gettext_uuid)
self.tags = I18nTags()
self.catalogs = defaultdict(Catalog) # type: DefaultDict[unicode, Catalog]
self.catalogs = defaultdict(Catalog) # type: DefaultDict[str, Catalog]
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
return ''
def get_outdated_docs(self):
# type: () -> Set[unicode]
# type: () -> Set[str]
return self.env.found_docs
def prepare_writing(self, docnames):
# type: (Set[unicode]) -> None
# type: (Set[str]) -> None
return
def compile_catalogs(self, catalogs, message):
# type: (Set[CatalogInfo], unicode) -> None
# type: (Set[CatalogInfo], str) -> None
return
def write_doc(self, docname, doctree):
# type: (unicode, nodes.document) -> None
# type: (str, nodes.document) -> None
catalog = self.catalogs[find_catalog(docname, self.config.gettext_compact)]
for node, msg in extract_messages(doctree):
@ -194,7 +192,7 @@ ltz = LocalTimeZone()
def should_write(filepath, new_content):
# type: (unicode, unicode) -> bool
# type: (str, str) -> bool
if not path.exists(filepath):
return True
try:
@ -226,7 +224,7 @@ class MessageCatalogBuilder(I18nBuilder):
self.templates.init(self)
def _collect_templates(self):
# type: () -> Set[unicode]
# type: () -> Set[str]
template_files = set()
for template_path in self.config.templates_path:
tmpl_abs_path = path.join(self.app.srcdir, template_path)
@ -258,7 +256,7 @@ class MessageCatalogBuilder(I18nBuilder):
raise ThemeError('%s: %r' % (template, exc))
def build(self, docnames, summary=None, method='update'):
# type: (Iterable[unicode], unicode, unicode) -> None
# type: (Iterable[str], str, str) -> None
self._extract_from_template()
super(MessageCatalogBuilder, self).build(docnames, summary, method)
@ -310,7 +308,7 @@ class MessageCatalogBuilder(I18nBuilder):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_builder(MessageCatalogBuilder)
app.add_config_value('gettext_compact', True, 'gettext')

View File

@ -60,7 +60,6 @@ if False:
from sphinx.config import Config # NOQA
from sphinx.domains import Domain, Index, IndexEntry # NOQA
from sphinx.util.tags import Tags # NOQA
from sphinx.util.typing import unicode # NOQA
# Experimental HTML5 Writer
if is_html5_writer_available():
@ -79,7 +78,7 @@ return_codes_re = re.compile('[\r\n]+')
def get_stable_hash(obj):
# type: (Any) -> unicode
# type: (Any) -> str
"""
Return a stable hash for a Python data structure. We can't just use
the md5 of str(obj) since for example dictionary items are enumerated
@ -99,11 +98,11 @@ class Stylesheet(text_type):
its filename (str).
"""
attributes = None # type: Dict[unicode, unicode]
filename = None # type: unicode
attributes = None # type: Dict[str, str]
filename = None # type: str
def __new__(cls, filename, *args, **attributes):
# type: (unicode, unicode, unicode) -> None
# type: (str, str, str) -> None
self = text_type.__new__(cls, filename) # type: ignore
self.filename = filename
self.attributes = attributes
@ -119,14 +118,14 @@ class Stylesheet(text_type):
class JSContainer(list):
"""The container for JavaScript scripts."""
def insert(self, index, obj):
# type: (int, unicode) -> None
# type: (int, str) -> None
warnings.warn('builder.script_files is deprecated. '
'Please use app.add_js_file() instead.',
RemovedInSphinx30Warning, stacklevel=2)
super(JSContainer, self).insert(index, obj)
def extend(self, other): # type: ignore
# type: (List[unicode]) -> None
# type: (List[str]) -> None
warnings.warn('builder.script_files is deprecated. '
'Please use app.add_js_file() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@ -134,7 +133,7 @@ class JSContainer(list):
self.append(item)
def __iadd__(self, other): # type: ignore
# type: (List[unicode]) -> JSContainer
# type: (List[str]) -> JSContainer
warnings.warn('builder.script_files is deprecated. '
'Please use app.add_js_file() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@ -143,7 +142,7 @@ class JSContainer(list):
return self
def __add__(self, other):
# type: (List[unicode]) -> JSContainer
# type: (List[str]) -> JSContainer
ret = JSContainer(self)
ret += other
return ret
@ -156,11 +155,11 @@ class JavaScript(text_type):
its filename (str).
"""
attributes = None # type: Dict[unicode, unicode]
filename = None # type: unicode
attributes = None # type: Dict[str, str]
filename = None # type: str
def __new__(cls, filename, **attributes):
# type: (unicode, **unicode) -> None
# type: (str, **str) -> None
self = text_type.__new__(cls, filename) # type: ignore
self.filename = filename
self.attributes = attributes
@ -193,7 +192,7 @@ class BuildInfo:
raise ValueError(__('build info file is broken: %r') % exc)
def __init__(self, config=None, tags=None, config_categories=[]):
# type: (Config, Tags, List[unicode]) -> None
# type: (Config, Tags, List[str]) -> None
self.config_hash = u''
self.tags_hash = u''
@ -249,8 +248,8 @@ class StandaloneHTMLBuilder(Builder):
# use html5 translator by default
default_html5_translator = False
imgpath = None # type: unicode
domain_indices = [] # type: List[Tuple[unicode, Type[Index], List[Tuple[unicode, List[IndexEntry]]], bool]] # NOQA
imgpath = None # type: str
domain_indices = [] # type: List[Tuple[str, Type[Index], List[Tuple[str, List[IndexEntry]]], bool]] # NOQA
# cached publisher object for snippets
_publisher = None
@ -260,7 +259,7 @@ class StandaloneHTMLBuilder(Builder):
super(StandaloneHTMLBuilder, self).__init__(app)
# CSS files
self.css_files = [] # type: List[Dict[unicode, unicode]]
self.css_files = [] # type: List[Dict[str, str]]
# JS files
self.script_files = JSContainer() # type: List[JavaScript]
@ -271,9 +270,9 @@ class StandaloneHTMLBuilder(Builder):
# basename of images directory
self.imagedir = '_images'
# section numbers for headings in the currently visited document
self.secnumbers = {} # type: Dict[unicode, Tuple[int, ...]]
self.secnumbers = {} # type: Dict[str, Tuple[int, ...]]
# currently written docname
self.current_docname = None # type: unicode
self.current_docname = None # type: str
self.init_templates()
self.init_highlighter()
@ -302,7 +301,7 @@ class StandaloneHTMLBuilder(Builder):
return BuildInfo(self.config, self.tags, ['html'])
def _get_translations_js(self):
# type: () -> unicode
# type: () -> str
candidates = [path.join(dir, self.config.language,
'LC_MESSAGES', 'sphinx.js')
for dir in self.config.locale_dirs] + \
@ -317,7 +316,7 @@ class StandaloneHTMLBuilder(Builder):
return None
def get_theme_config(self):
# type: () -> Tuple[unicode, Dict]
# type: () -> Tuple[str, Dict]
return self.config.html_theme, self.config.html_theme_options
def init_templates(self):
@ -349,7 +348,7 @@ class StandaloneHTMLBuilder(Builder):
self.add_css_file(filename, **attrs)
def add_css_file(self, filename, **kwargs):
# type: (unicode, **unicode) -> None
# type: (str, **str) -> None
if '://' not in filename:
filename = posixpath.join('_static', filename)
@ -372,7 +371,7 @@ class StandaloneHTMLBuilder(Builder):
self.add_js_file('translations.js')
def add_js_file(self, filename, **kwargs):
# type: (unicode, **unicode) -> None
# type: (str, **str) -> None
if filename and '://' not in filename:
filename = posixpath.join('_static', filename)
@ -392,7 +391,7 @@ class StandaloneHTMLBuilder(Builder):
@property
def math_renderer_name(self):
# type: () -> unicode
# type: () -> str
name = self.get_builder_config('math_renderer', 'html')
if name is not None:
# use given name
@ -412,14 +411,13 @@ class StandaloneHTMLBuilder(Builder):
return None
def get_outdated_docs(self):
# type: () -> Iterator[unicode]
# type: () -> Iterator[str]
try:
with open(path.join(self.outdir, '.buildinfo')) as fp:
buildinfo = BuildInfo.load(fp)
if self.build_info != buildinfo:
for docname in self.env.found_docs:
yield docname
yield from self.env.found_docs
return
except ValueError as exc:
logger.warning(__('Failed to read build info file: %r'), exc)
@ -450,11 +448,11 @@ class StandaloneHTMLBuilder(Builder):
pass
def get_asset_paths(self):
# type: () -> List[unicode]
# type: () -> List[str]
return self.config.html_extra_path + self.config.html_static_path
def render_partial(self, node):
# type: (nodes.Node) -> Dict[unicode, unicode]
# type: (nodes.Node) -> Dict[str, str]
"""Utility: Render a lone doctree node."""
if node is None:
return {'fragment': ''}
@ -480,7 +478,7 @@ class StandaloneHTMLBuilder(Builder):
return pub.writer.parts
def prepare_writing(self, docnames):
# type: (Set[unicode]) -> None
# type: (Set[str]) -> None
# create the search indexer
self.indexer = None
if self.search:
@ -509,7 +507,7 @@ class StandaloneHTMLBuilder(Builder):
domain = None # type: Domain
domain = self.env.domains[domain_name]
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name) # type: unicode
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
@ -538,7 +536,7 @@ class StandaloneHTMLBuilder(Builder):
self.relations = self.env.collect_relations()
rellinks = [] # type: List[Tuple[unicode, unicode, unicode, unicode]]
rellinks = [] # type: List[Tuple[str, str, str, str]]
if self.use_index:
rellinks.append(('genindex', _('General Index'), 'I', _('index')))
for indexname, indexcls, content, collapse in self.domain_indices:
@ -582,7 +580,7 @@ class StandaloneHTMLBuilder(Builder):
'logo': logo,
'favicon': favicon,
'html5_doctype': self.config.html_experimental_html5_writer and html5_ready,
} # type: Dict[unicode, Any]
}
if self.theme:
self.globalcontext.update(
('theme_' + key, val) for (key, val) in
@ -590,7 +588,7 @@ class StandaloneHTMLBuilder(Builder):
self.globalcontext.update(self.config.html_context)
def get_doc_context(self, docname, body, metatags):
# type: (unicode, unicode, unicode) -> Dict[unicode, Any]
# type: (str, str, str) -> Dict[str, Any]
"""Collect items for the template context of a page."""
# find out relations
prev = next = None
@ -671,14 +669,14 @@ class StandaloneHTMLBuilder(Builder):
}
def write_doc(self, docname, doctree):
# type: (unicode, nodes.document) -> None
# type: (str, nodes.document) -> None
destination = StringOutput(encoding='utf-8')
doctree.settings = self.docsettings
self.secnumbers = self.env.toc_secnumbers.get(docname, {})
self.fignumbers = self.env.toc_fignumbers.get(docname, {})
self.imgpath = relative_uri(self.get_target_uri(docname), '_images')
self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads') # type: unicode
self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads')
self.current_docname = docname
self.docwriter.write(doctree, destination)
self.docwriter.assemble_parts()
@ -689,7 +687,7 @@ class StandaloneHTMLBuilder(Builder):
self.handle_page(docname, ctx, event_arg=doctree)
def write_doc_serialized(self, docname, doctree):
# type: (unicode, nodes.document) -> None
# type: (str, nodes.document) -> None
self.imgpath = relative_uri(self.get_target_uri(docname), self.imagedir)
self.post_process_images(doctree)
title_node = self.env.longtitles.get(docname)
@ -809,7 +807,7 @@ class StandaloneHTMLBuilder(Builder):
def copy_download_files(self):
# type: () -> None
def to_relpath(f):
# type: (unicode) -> unicode
# type: (str) -> str
return relative_path(self.srcdir, f)
# copy downloadable files
if self.env.dlfiles:
@ -951,7 +949,7 @@ class StandaloneHTMLBuilder(Builder):
reference.append(node)
def load_indexer(self, docnames):
# type: (Iterable[unicode]) -> None
# type: (Iterable[str]) -> None
keep = set(self.env.all_docs) - set(docnames)
try:
searchindexfn = path.join(self.outdir, self.searchindex_filename)
@ -970,7 +968,7 @@ class StandaloneHTMLBuilder(Builder):
self.indexer.prune(keep)
def index_page(self, pagename, doctree, title):
# type: (unicode, nodes.document, unicode) -> None
# type: (str, nodes.document, str) -> None
# only index pages with title
if self.indexer is not None and title:
filename = self.env.doc2path(pagename, base=None)
@ -981,20 +979,20 @@ class StandaloneHTMLBuilder(Builder):
self.indexer.feed(pagename, title, doctree) # type: ignore
def _get_local_toctree(self, docname, collapse=True, **kwds):
# type: (unicode, bool, Any) -> unicode
# type: (str, bool, Any) -> str
if 'includehidden' not in kwds:
kwds['includehidden'] = False
return self.render_partial(TocTree(self.env).get_toctree_for(
docname, self, collapse, **kwds))['fragment']
def get_outfilename(self, pagename):
# type: (unicode) -> unicode
# type: (str) -> str
return path.join(self.outdir, os_path(pagename) + self.out_suffix)
def add_sidebars(self, pagename, ctx):
# type: (unicode, Dict) -> None
# type: (str, Dict) -> None
def has_wildcard(pattern):
# type: (unicode) -> bool
# type: (str) -> bool
return any(char in pattern for char in '*?[')
sidebars = None
matched = None
@ -1045,12 +1043,12 @@ class StandaloneHTMLBuilder(Builder):
# --------- these are overwritten by the serialization builder
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
return docname + self.link_suffix
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
# type: (unicode, Dict, unicode, unicode, Any) -> None
# type: (str, Dict, str, str, Any) -> None
ctx = self.globalcontext.copy()
# current_page_name is backwards compatibility
ctx['pagename'] = ctx['current_page_name'] = pagename
@ -1067,7 +1065,7 @@ class StandaloneHTMLBuilder(Builder):
ctx['pageurl'] = None
def pathto(otheruri, resource=False, baseuri=default_baseuri):
# type: (unicode, bool, unicode) -> unicode
# type: (str, bool, str) -> str
if resource and '://' in otheruri:
# allow non-local resources given by scheme
return otheruri
@ -1080,7 +1078,7 @@ class StandaloneHTMLBuilder(Builder):
ctx['pathto'] = pathto
def css_tag(css):
# type: (Stylesheet) -> unicode
# type: (Stylesheet) -> str
attrs = []
for key in sorted(css.attributes):
value = css.attributes[key]
@ -1091,7 +1089,7 @@ class StandaloneHTMLBuilder(Builder):
ctx['css_tag'] = css_tag
def hasdoc(name):
# type: (unicode) -> bool
# type: (str) -> bool
if name in self.env.all_docs:
return True
elif name == 'search' and self.search:
@ -1102,7 +1100,7 @@ class StandaloneHTMLBuilder(Builder):
ctx['hasdoc'] = hasdoc
def warn(*args, **kwargs):
# type: (Any, Any) -> unicode
# type: (Any, Any) -> str
"""Simple warn() wrapper for themes."""
warnings.warn('The template function warn() was deprecated. '
'Use warning() instead.',
@ -1150,7 +1148,7 @@ class StandaloneHTMLBuilder(Builder):
copyfile(self.env.doc2path(pagename), source_name)
def update_page_context(self, pagename, templatename, ctx, event_arg):
# type: (unicode, unicode, Dict, Any) -> None
# type: (str, str, Dict, Any) -> None
pass
def handle_finish(self):
@ -1193,7 +1191,7 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
name = 'dirhtml'
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
if docname == 'index':
return ''
if docname.endswith(SEP + 'index'):
@ -1201,7 +1199,7 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
return docname + SEP
def get_outfilename(self, pagename):
# type: (unicode) -> unicode
# type: (str) -> str
if pagename == 'index' or pagename.endswith(SEP + 'index'):
outfilename = path.join(self.outdir, os_path(pagename) +
self.out_suffix)
@ -1212,7 +1210,7 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
return outfilename
def prepare_writing(self, docnames):
# type: (Set[unicode]) -> None
# type: (Set[str]) -> None
super(DirectoryHTMLBuilder, self).prepare_writing(docnames)
self.globalcontext['no_search_suffix'] = True
@ -1228,11 +1226,11 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
copysource = False
def get_outdated_docs(self): # type: ignore
# type: () -> Union[unicode, List[unicode]]
# type: () -> Union[str, List[str]]
return 'all documents'
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
if docname in self.env.all_docs:
# all references are on the same page...
return self.config.master_doc + self.out_suffix + \
@ -1242,7 +1240,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
return docname + self.out_suffix
def get_relative_uri(self, from_, to, typ=None):
# type: (unicode, unicode, unicode) -> unicode
# type: (str, str, str) -> str
# ignore source
return self.get_target_uri(to, typ)
@ -1262,7 +1260,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
refnode['refuri'] = fname + refuri[hashindex:]
def _get_local_toctree(self, docname, collapse=True, **kwds):
# type: (unicode, bool, Any) -> unicode
# type: (str, bool, Any) -> str
if 'includehidden' not in kwds:
kwds['includehidden'] = False
toctree = TocTree(self.env).get_toctree_for(docname, self, collapse, **kwds)
@ -1281,7 +1279,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
return tree
def assemble_toc_secnumbers(self):
# type: () -> Dict[unicode, Dict[unicode, Tuple[int, ...]]]
# type: () -> Dict[str, Dict[str, Tuple[int, ...]]]
# Assemble toc_secnumbers to resolve section numbers on SingleHTML.
# Merge all secnumbers to single secnumber.
#
@ -1291,7 +1289,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_secnumber().
new_secnumbers = {} # type: Dict[unicode, Tuple[int, ...]]
new_secnumbers = {} # type: Dict[str, Tuple[int, ...]]
for docname, secnums in self.env.toc_secnumbers.items():
for id, secnum in secnums.items():
alias = "%s/%s" % (docname, id)
@ -1300,7 +1298,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
return {self.config.master_doc: new_secnumbers}
def assemble_toc_fignumbers(self):
# type: () -> Dict[unicode, Dict[unicode, Dict[unicode, Tuple[int, ...]]]] # NOQA
# type: () -> Dict[str, Dict[str, Dict[str, Tuple[int, ...]]]]
# Assemble toc_fignumbers to resolve figure numbers on SingleHTML.
# Merge all fignumbers to single fignumber.
#
@ -1310,7 +1308,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_fignumber().
new_fignumbers = {} # type: Dict[unicode, Dict[unicode, Tuple[int, ...]]]
new_fignumbers = {} # type: Dict[str, Dict[str, Tuple[int, ...]]]
# {u'foo': {'figure': {'id2': (2,), 'id1': (1,)}}, u'bar': {'figure': {'id1': (3,)}}}
for docname, fignumlist in self.env.toc_fignumbers.items():
for figtype, fignums in fignumlist.items():
@ -1322,7 +1320,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
return {self.config.master_doc: new_fignumbers}
def get_doc_context(self, docname, body, metatags):
# type: (unicode, unicode, unicode) -> Dict
# type: (str, str, str) -> Dict
# no relation links...
toctree = TocTree(self.env).get_toctree_for(self.config.master_doc, self, False)
# if there is no toctree, toc is None
@ -1404,7 +1402,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
additional_dump_args = () # type: Tuple
#: the filename for the global context file
globalcontext_filename = None # type: unicode
globalcontext_filename = None # type: str
supported_image_types = ['image/svg+xml', 'image/png',
'image/gif', 'image/jpeg']
@ -1423,7 +1421,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
self.use_index = self.get_builder_config('use_index', 'html')
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
if docname == 'index':
return ''
if docname.endswith(SEP + 'index'):
@ -1431,7 +1429,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
return docname + SEP
def dump_context(self, context, filename):
# type: (Dict, unicode) -> None
# type: (Dict, str) -> None
if self.implementation_dumps_unicode:
with open(filename, 'w', encoding='utf-8') as ft:
self.implementation.dump(context, ft, *self.additional_dump_args)
@ -1441,7 +1439,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
def handle_page(self, pagename, ctx, templatename='page.html',
outfilename=None, event_arg=None):
# type: (unicode, Dict, unicode, unicode, Any) -> None
# type: (str, Dict, str, str, Any) -> None
ctx['current_page_name'] = pagename
self.add_sidebars(pagename, ctx)
@ -1528,7 +1526,7 @@ class JSONHTMLBuilder(SerializingHTMLBuilder):
def convert_html_css_files(app, config):
# type: (Sphinx, Config) -> None
"""This converts string styled html_css_files to tuple styled one."""
html_css_files = [] # type: List[Tuple[unicode, Dict]]
html_css_files = [] # type: List[Tuple[str, Dict]]
for entry in config.html_css_files:
if isinstance(entry, str):
html_css_files.append((entry, {}))
@ -1546,7 +1544,7 @@ def convert_html_css_files(app, config):
def convert_html_js_files(app, config):
# type: (Sphinx, Config) -> None
"""This converts string styled html_js_files to tuple styled one."""
html_js_files = [] # type: List[Tuple[unicode, Dict]]
html_js_files = [] # type: List[Tuple[str, Dict]]
for entry in config.html_js_files:
if isinstance(entry, str):
html_js_files.append((entry, {}))
@ -1562,7 +1560,7 @@ def convert_html_js_files(app, config):
def setup_js_tag_helper(app, pagename, templatexname, context, doctree):
# type: (Sphinx, unicode, unicode, Dict, nodes.Node) -> None
# type: (Sphinx, str, str, Dict, nodes.Node) -> None
"""Set up js_tag() template helper.
.. note:: This set up function is added to keep compatibility with webhelper.
@ -1570,9 +1568,9 @@ def setup_js_tag_helper(app, pagename, templatexname, context, doctree):
pathto = context.get('pathto')
def js_tag(js):
# type: (JavaScript) -> unicode
# type: (JavaScript) -> str
attrs = []
body = '' # type: unicode
body = ''
if isinstance(js, JavaScript):
for key in sorted(js.attributes):
value = js.attributes[key]
@ -1606,7 +1604,7 @@ def validate_math_renderer(app):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
# builders
app.add_builder(StandaloneHTMLBuilder)
app.add_builder(DirectoryHTMLBuilder)

View File

@ -9,7 +9,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
from os import path
@ -31,7 +30,6 @@ if False:
from typing import Any, Dict, IO, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -209,13 +207,13 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
self.lcid, self.encoding = locale
def open_file(self, outdir, basename, mode='w'):
# type: (unicode, unicode, unicode) -> IO
# type: (str, str, str) -> IO
# open a file with the correct encoding for the selected language
return open(path.join(outdir, basename), mode, encoding=self.encoding,
errors='xmlcharrefreplace')
def update_page_context(self, pagename, templatename, ctx, event_arg):
# type: (unicode, unicode, Dict, unicode) -> None
# type: (str, str, Dict, str) -> None
ctx['encoding'] = self.encoding
def handle_finish(self):
@ -223,7 +221,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
self.build_hhx(self.outdir, self.config.htmlhelp_basename)
def write_doc(self, docname, doctree):
# type: (unicode, nodes.document) -> None
# type: (str, nodes.document) -> None
for node in doctree.traverse(nodes.reference):
# add ``target=_blank`` attributes to external links
if node.get('internal') is None and 'refuri' in node:
@ -232,7 +230,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
super(HTMLHelpBuilder, self).write_doc(docname, doctree)
def build_hhx(self, outdir, outname):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
logger.info(__('dumping stopword list...'))
with self.open_file(outdir, outname + '.stp') as f:
for word in sorted(stopwords):
@ -306,9 +304,9 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
f.write('<UL>\n')
def write_index(title, refs, subitems):
# type: (unicode, List[Tuple[unicode, unicode]], List[Tuple[unicode, List[Tuple[unicode, unicode]]]]) -> None # NOQA
# type: (str, List[Tuple[str, str]], List[Tuple[str, List[Tuple[str, str]]]]) -> None # NOQA
def write_param(name, value):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
item = ' <param name="%s" value="%s">\n' % \
(name, value)
f.write(item)
@ -337,13 +335,13 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
def default_htmlhelp_basename(config):
# type: (Config) -> unicode
# type: (Config) -> str
"""Better default htmlhelp_basename setting."""
return make_filename_from_project(config.project) + 'doc'
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(HTMLHelpBuilder)

View File

@ -45,7 +45,6 @@ if False:
from typing import Any, Dict, Iterable, List, Tuple, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.util.typing import unicode # NOQA
XINDY_LANG_OPTIONS = {
@ -103,11 +102,11 @@ XINDY_LANG_OPTIONS = {
'el': '-L greek -C utf8 ',
# FIXME, not compatible with [:2] slice but does Sphinx support Greek ?
'el-polyton': '-L greek -C polytonic-utf8 ',
} # type: Dict[unicode, unicode]
}
XINDY_CYRILLIC_SCRIPTS = [
'be', 'bg', 'mk', 'mn', 'ru', 'sr', 'sh', 'uk',
] # type: List[unicode]
]
logger = logging.getLogger(__name__)
@ -130,27 +129,27 @@ class LaTeXBuilder(Builder):
def init(self):
# type: () -> None
self.context = {} # type: Dict[unicode, Any]
self.docnames = [] # type: Iterable[unicode]
self.document_data = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode, bool]] # NOQA
self.context = {} # type: Dict[str, Any]
self.docnames = [] # type: Iterable[str]
self.document_data = [] # type: List[Tuple[str, str, str, str, str, bool]]
self.usepackages = self.app.registry.latex_packages
texescape.init()
self.init_context()
def get_outdated_docs(self):
# type: () -> Union[unicode, List[unicode]]
# type: () -> Union[str, List[str]]
return 'all documents' # for now
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
if docname not in self.docnames:
raise NoUri
else:
return '%' + docname
def get_relative_uri(self, from_, to, typ=None):
# type: (unicode, unicode, unicode) -> unicode
# type: (str, str, str) -> str
# ignore source path
return self.get_target_uri(to, typ)
@ -162,7 +161,7 @@ class LaTeXBuilder(Builder):
'will be written'))
return
# assign subdirs to titles
self.titles = [] # type: List[Tuple[unicode, unicode]]
self.titles = [] # type: List[Tuple[str, str]]
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
@ -256,7 +255,7 @@ class LaTeXBuilder(Builder):
logger.info("done")
def get_contentsname(self, indexfile):
# type: (unicode) -> unicode
# type: (str) -> str
tree = self.env.get_doctree(indexfile)
contentsname = None
for toctree in tree.traverse(addnodes.toctree):
@ -267,12 +266,12 @@ class LaTeXBuilder(Builder):
return contentsname
def update_doc_context(self, title, author):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
self.context['title'] = title
self.context['author'] = author
def assemble_doctree(self, indexfile, toctree_only, appendices):
# type: (unicode, bool, List[unicode]) -> nodes.document
# type: (str, bool, List[str]) -> nodes.document
from docutils import nodes # NOQA
self.docnames = set([indexfile] + appendices)
logger.info(darkgreen(indexfile) + " ", nonl=1)
@ -427,7 +426,7 @@ def validate_config_values(app, config):
def default_latex_engine(config):
# type: (Config) -> unicode
# type: (Config) -> str
""" Better default latex_engine settings for specific languages. """
if config.language == 'ja':
return 'platex'
@ -436,7 +435,7 @@ def default_latex_engine(config):
def default_latex_docclass(config):
# type: (Config) -> Dict[unicode, unicode]
# type: (Config) -> Dict[str, str]
""" Better default latex_docclass settings for specific languages. """
if config.language == 'ja':
return {'manual': 'jsbook',
@ -452,7 +451,7 @@ def default_latex_use_xindy(config):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_builder(LaTeXBuilder)
app.add_post_transform(CitationReferenceTransform)
app.add_post_transform(MathReferenceTransform)

View File

@ -23,7 +23,6 @@ from sphinx.util.nodes import NodeMatcher
if False:
# For type annotation
from typing import Any, Dict, List, Set, Tuple, Union # NOQA
from sphinx.util.typing import unicode # NOQA
URI_SCHEMES = ('mailto:', 'http:', 'https:', 'ftp:')
@ -92,7 +91,7 @@ class ShowUrlsTransform(SphinxTransform):
node.parent.insert(index + 1, textnode)
def get_docname_for_node(self, node):
# type: (nodes.Node) -> unicode
# type: (nodes.Node) -> str
while node:
if isinstance(node, nodes.document):
return self.env.path2doc(node['source'])
@ -104,7 +103,7 @@ class ShowUrlsTransform(SphinxTransform):
return None # never reached here. only for type hinting
def create_footnote(self, uri, docname):
# type: (unicode, unicode) -> Tuple[nodes.footnote, nodes.footnote_reference]
# type: (str, str) -> Tuple[nodes.footnote, nodes.footnote_reference]
reference = nodes.reference('', nodes.Text(uri), refuri=uri, nolinkurl=True)
footnote = nodes.footnote(uri, auto=1, docname=docname)
footnote['names'].append('#')
@ -154,7 +153,7 @@ class FootnoteCollector(nodes.NodeVisitor):
def __init__(self, document):
# type: (nodes.document) -> None
self.auto_footnotes = [] # type: List[nodes.footnote]
self.used_footnote_numbers = set() # type: Set[unicode]
self.used_footnote_numbers = set() # type: Set[str]
self.footnote_refs = [] # type: List[nodes.footnote_reference]
super(FootnoteCollector, self).__init__(document)
@ -361,7 +360,7 @@ class LaTeXFootnoteTransform(SphinxTransform):
class LaTeXFootnoteVisitor(nodes.NodeVisitor):
def __init__(self, document, footnotes):
# type: (nodes.document, List[nodes.footnote]) -> None
self.appeared = set() # type: Set[Tuple[unicode, unicode]]
self.appeared = set() # type: Set[Tuple[str, str]]
self.footnotes = footnotes # type: List[nodes.footnote]
self.pendings = [] # type: List[nodes.footnote]
self.table_footnotes = [] # type: List[nodes.footnote]

View File

@ -34,7 +34,6 @@ if False:
from typing import Any, Dict, List, Set, Tuple, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.requests.requests import Response # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -44,7 +43,7 @@ class AnchorCheckParser(HTMLParser):
"""Specialized HTML parser that looks for a specific anchor."""
def __init__(self, search_anchor):
# type: (unicode) -> None
# type: (str) -> None
super(AnchorCheckParser, self).__init__()
self.search_anchor = search_anchor
@ -59,7 +58,7 @@ class AnchorCheckParser(HTMLParser):
def check_anchor(response, anchor):
# type: (Response, unicode) -> bool
# type: (Response, str) -> bool
"""Reads HTML data from a response object `response` searching for `anchor`.
Returns True if anchor was found, False otherwise.
"""
@ -87,9 +86,9 @@ class CheckExternalLinksBuilder(Builder):
self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore]
self.anchors_ignore = [re.compile(x)
for x in self.app.config.linkcheck_anchors_ignore]
self.good = set() # type: Set[unicode]
self.broken = {} # type: Dict[unicode, unicode]
self.redirected = {} # type: Dict[unicode, Tuple[unicode, int]]
self.good = set() # type: Set[str]
self.broken = {} # type: Dict[str, str]
self.redirected = {} # type: Dict[str, Tuple[str, int]]
# set a timeout for non-responding servers
socket.setdefaulttimeout(5.0)
# create output file
@ -117,7 +116,7 @@ class CheckExternalLinksBuilder(Builder):
kwargs['timeout'] = self.app.config.linkcheck_timeout
def check_uri():
# type: () -> Tuple[unicode, unicode, int]
# type: () -> Tuple[str, str, int]
# split off anchor
if '#' in uri:
req_url, anchor = uri.split('#', 1)
@ -181,7 +180,7 @@ class CheckExternalLinksBuilder(Builder):
return 'redirected', new_url, 0
def check():
# type: () -> Tuple[unicode, unicode, int]
# type: () -> Tuple[str, str, int]
# check for various conditions without bothering the network
if len(uri) == 0 or uri.startswith(('#', 'mailto:', 'ftp:')):
return 'unchecked', '', 0
@ -220,7 +219,7 @@ class CheckExternalLinksBuilder(Builder):
self.rqueue.put((uri, docname, lineno, status, info, code))
def process_result(self, result):
# type: (Tuple[unicode, unicode, int, unicode, unicode, int]) -> None
# type: (Tuple[str, str, int, str, str, int]) -> None
uri, docname, lineno, status, info, code = result
if status == 'unchecked':
return
@ -258,19 +257,19 @@ class CheckExternalLinksBuilder(Builder):
logger.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info))
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
return ''
def get_outdated_docs(self):
# type: () -> Set[unicode]
# type: () -> Set[str]
return self.env.found_docs
def prepare_writing(self, docnames):
# type: (Set[unicode]) -> None
# type: (Set[str]) -> None
return
def write_doc(self, docname, doctree):
# type: (unicode, nodes.Node) -> None
# type: (str, nodes.Node) -> None
logger.info('')
n = 0
for node in doctree.traverse(nodes.reference):
@ -293,7 +292,7 @@ class CheckExternalLinksBuilder(Builder):
self.app.statuscode = 1
def write_entry(self, what, docname, line, uri):
# type: (unicode, unicode, int, unicode) -> None
# type: (str, str, int, str) -> None
with open(path.join(self.outdir, 'output.txt'), 'a', encoding='utf-8') as output:
output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None),
line, what, uri))
@ -305,7 +304,7 @@ class CheckExternalLinksBuilder(Builder):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_builder(CheckExternalLinksBuilder)
app.add_config_value('linkcheck_ignore', [], None)

View File

@ -29,7 +29,6 @@ if False:
from typing import Any, Dict, List, Set, Tuple, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -44,7 +43,7 @@ class ManualPageBuilder(Builder):
epilog = __('The manual pages are in %(outdir)s.')
default_translator_class = ManualPageTranslator
supported_image_types = [] # type: List[unicode]
supported_image_types = [] # type: List[str]
def init(self):
# type: () -> None
@ -53,11 +52,11 @@ class ManualPageBuilder(Builder):
'will be written'))
def get_outdated_docs(self):
# type: () -> Union[unicode, List[unicode]]
# type: () -> Union[str, List[str]]
return 'all manpages' # for now
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
if typ == 'token':
return ''
raise NoUri
@ -96,7 +95,7 @@ class ManualPageBuilder(Builder):
encoding='utf-8')
tree = self.env.get_doctree(docname)
docnames = set() # type: Set[unicode]
docnames = set() # type: Set[str]
largetree = inline_all_toctrees(self, docnames, docname, tree,
darkgreen, [docname])
largetree.settings = docsettings
@ -115,7 +114,7 @@ class ManualPageBuilder(Builder):
def default_man_pages(config):
# type: (Config) -> List[Tuple[unicode, unicode, unicode, List[unicode], int]]
# type: (Config) -> List[Tuple[str, str, str, List[str], int]]
""" Better default man_pages settings. """
filename = make_filename_from_project(config.project)
return [(config.master_doc, filename, '%s %s' % (config.project, config.release),
@ -123,7 +122,7 @@ def default_man_pages(config):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_builder(ManualPageBuilder)
app.add_config_value('man_pages', default_man_pages, None)

View File

@ -33,7 +33,6 @@ if False:
# For type annotation
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -47,7 +46,7 @@ section_template = '<section title="%(title)s" ref="%(ref)s"/>'
def render_file(filename, **kwargs):
# type: (unicode, Any) -> unicode
# type: (str, Any) -> str
pathname = os.path.join(package_dir, 'templates', 'qthelp', filename)
return SphinxRenderer.render_from_file(pathname, kwargs)
@ -88,7 +87,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
# self.config.html_style = 'traditional.css'
def get_theme_config(self):
# type: () -> Tuple[unicode, Dict]
# type: () -> Tuple[str, Dict]
return self.config.qthelp_theme, self.config.qthelp_theme_options
def handle_finish(self):
@ -96,7 +95,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
self.build_qhp(self.outdir, self.config.qthelp_basename)
def build_qhp(self, outdir, outname):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
logger.info(__('writing project file...'))
# sections
@ -170,8 +169,8 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return True
def write_toc(self, node, indentlevel=4):
# type: (nodes.Node, int) -> List[unicode]
parts = [] # type: List[unicode]
# type: (nodes.Node, int) -> List[str]
parts = [] # type: List[str]
if isinstance(node, nodes.list_item) and self.isdocnode(node):
compact_paragraph = cast(addnodes.compact_paragraph, node[0])
reference = cast(nodes.reference, compact_paragraph[0])
@ -205,7 +204,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return parts
def keyword_item(self, name, ref):
# type: (unicode, Any) -> unicode
# type: (str, Any) -> str
matchobj = _idpattern.match(name)
if matchobj:
groupdict = matchobj.groupdict()
@ -228,8 +227,8 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return item
def build_keywords(self, title, refs, subitems):
# type: (unicode, List[Any], Any) -> List[unicode]
keywords = [] # type: List[unicode]
# type: (str, List[Any], Any) -> List[str]
keywords = [] # type: List[str]
# if len(refs) == 0: # XXX
# write_param('See Also', title)
@ -251,7 +250,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return keywords
def get_project_files(self, outdir):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
@ -269,7 +268,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(QtHelpBuilder)

View File

@ -36,7 +36,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from typing import Any, Dict, Iterable, List, Tuple, Union # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -61,22 +60,22 @@ class TexinfoBuilder(Builder):
def init(self):
# type: () -> None
self.docnames = [] # type: Iterable[unicode]
self.document_data = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode, unicode, unicode, bool]] # NOQA
self.docnames = [] # type: Iterable[str]
self.document_data = [] # type: List[Tuple[str, str, str, str, str, str, str, bool]]
def get_outdated_docs(self):
# type: () -> Union[unicode, List[unicode]]
# type: () -> Union[str, List[str]]
return 'all documents' # for now
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
if docname not in self.docnames:
raise NoUri
else:
return '%' + docname
def get_relative_uri(self, from_, to, typ=None):
# type: (unicode, unicode, unicode) -> unicode
# type: (str, str, str) -> str
# ignore source path
return self.get_target_uri(to, typ)
@ -88,7 +87,7 @@ class TexinfoBuilder(Builder):
'will be written'))
return
# assign subdirs to titles
self.titles = [] # type: List[Tuple[unicode, unicode]]
self.titles = [] # type: List[Tuple[str, str]]
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
@ -106,7 +105,7 @@ class TexinfoBuilder(Builder):
for entry in self.document_data:
docname, targetname, title, author = entry[:4]
targetname += '.texi'
direntry = description = category = '' # type: unicode
direntry = description = category = ''
if len(entry) > 6:
direntry, description, category = entry[4:7]
toctree_only = False
@ -139,7 +138,7 @@ class TexinfoBuilder(Builder):
logger.info(__("done"))
def assemble_doctree(self, indexfile, toctree_only, appendices):
# type: (unicode, bool, List[unicode]) -> nodes.document
# type: (str, bool, List[str]) -> nodes.document
self.docnames = set([indexfile] + appendices)
logger.info(darkgreen(indexfile) + " ", nonl=1)
tree = self.env.get_doctree(indexfile)
@ -212,7 +211,7 @@ class TexinfoBuilder(Builder):
def default_texinfo_documents(config):
# type: (Config) -> List[Tuple[unicode, unicode, unicode, unicode, unicode, unicode, unicode]] # NOQA
# type: (Config) -> List[Tuple[str, str, str, str, str, str, str]]
""" Better default texinfo_documents settings. """
filename = make_filename_from_project(config.project)
return [(config.master_doc, filename, config.project, config.author, filename,
@ -220,7 +219,7 @@ def default_texinfo_documents(config):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_builder(TexinfoBuilder)
app.add_config_value('texinfo_documents', default_texinfo_documents, None)

View File

@ -24,7 +24,6 @@ if False:
from typing import Any, Dict, Iterator, Set, Tuple # NOQA
from docutils import nodes # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -38,15 +37,15 @@ class TextBuilder(Builder):
allow_parallel = True
default_translator_class = TextTranslator
current_docname = None # type: unicode
current_docname = None # type: str
def init(self):
# type: () -> None
# section numbers for headings in the currently visited document
self.secnumbers = {} # type: Dict[unicode, Tuple[int, ...]]
self.secnumbers = {} # type: Dict[str, Tuple[int, ...]]
def get_outdated_docs(self):
# type: () -> Iterator[unicode]
# type: () -> Iterator[str]
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
yield docname
@ -65,15 +64,15 @@ class TextBuilder(Builder):
pass
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
return ''
def prepare_writing(self, docnames):
# type: (Set[unicode]) -> None
# type: (Set[str]) -> None
self.writer = TextWriter(self)
def write_doc(self, docname, doctree):
# type: (unicode, nodes.Node) -> None
# type: (str, nodes.Node) -> None
self.current_docname = docname
self.secnumbers = self.env.toc_secnumbers.get(docname, {})
destination = StringOutput(encoding='utf-8')
@ -92,7 +91,7 @@ class TextBuilder(Builder):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_builder(TextBuilder)
app.add_config_value('text_sectionchars', '*=-~"+`', 'env')

View File

@ -13,11 +13,10 @@ if False:
# For type annotation
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
try:
from sphinxcontrib.websupport.builder import WebSupportBuilder
app.add_builder(WebSupportBuilder)

View File

@ -26,7 +26,6 @@ if False:
from typing import Any, Dict, Iterator, Set, Type # NOQA
from docutils.writers.xml import BaseXMLWriter # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -50,7 +49,7 @@ class XMLBuilder(Builder):
pass
def get_outdated_docs(self):
# type: () -> Iterator[unicode]
# type: () -> Iterator[str]
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
yield docname
@ -69,15 +68,15 @@ class XMLBuilder(Builder):
pass
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
return docname
def prepare_writing(self, docnames):
# type: (Set[unicode]) -> None
# type: (Set[str]) -> None
self.writer = self._writer_class(self)
def write_doc(self, docname, doctree):
# type: (unicode, nodes.Node) -> None
# type: (str, nodes.Node) -> None
# work around multiple string % tuple issues in docutils;
# replace tuples in attribute values with lists
doctree = doctree.deepcopy()
@ -119,7 +118,7 @@ class PseudoXMLBuilder(XMLBuilder):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_builder(XMLBuilder)
app.add_builder(PseudoXMLBuilder)

View File

@ -8,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import argparse
import locale
@ -199,15 +198,15 @@ files can be built by specifying individual filenames.
return parser
def make_main(argv=sys.argv[1:]): # type: ignore
# type: (List[unicode]) -> int
def make_main(argv=sys.argv[1:]):
# type: (List[str]) -> int
"""Sphinx build "make mode" entry."""
from sphinx.cmd import make_mode
return make_mode.run_make_mode(argv[1:])
def build_main(argv=sys.argv[1:]): # type: ignore
# type: (List[unicode]) -> int
def build_main(argv=sys.argv[1:]):
# type: (List[str]) -> int
"""Sphinx build "main" command-line entry."""
parser = get_parser()
@ -292,8 +291,8 @@ def build_main(argv=sys.argv[1:]): # type: ignore
return 2
def main(argv=sys.argv[1:]): # type: ignore
# type: (List[unicode]) -> int
def main(argv=sys.argv[1:]):
# type: (List[str]) -> int
locale.setlocale(locale.LC_ALL, '')
sphinx.locale.init_console(os.path.join(package_dir, 'locale'), 'sphinx')

View File

@ -14,7 +14,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import subprocess
@ -29,7 +28,6 @@ from sphinx.util.osutil import cd, rmtree
if False:
# For type annotation
from typing import List # NOQA
from sphinx.util.typing import unicode # NOQA
BUILDERS = [
@ -63,14 +61,14 @@ BUILDERS = [
class Make:
def __init__(self, srcdir, builddir, opts):
# type: (unicode, unicode, List[unicode]) -> None
# type: (str, str, List[str]) -> None
self.srcdir = srcdir
self.builddir = builddir
self.opts = opts
self.makecmd = os.environ.get('MAKE', 'make') # refer $MAKE to determine make command
def builddir_join(self, *comps):
# type: (unicode) -> unicode
# type: (str) -> str
return path.join(self.builddir, *comps)
def build_clean(self):
@ -147,7 +145,7 @@ class Make:
return 0
def run_generic_build(self, builder, doctreedir=None):
# type: (unicode, unicode) -> int
# type: (str, str) -> int
# compatibility with old Makefile
papersize = os.getenv('PAPER', '')
opts = self.opts
@ -164,7 +162,7 @@ class Make:
def run_make_mode(args):
# type: (List[unicode]) -> int
# type: (List[str]) -> int
if len(args) < 3:
print('Error: at least 3 arguments (builder, source '
'dir, build dir) are required.', file=sys.stderr)

View File

@ -9,7 +9,6 @@
:license: BSD, see LICENSE for details.
"""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import locale
@ -51,7 +50,6 @@ from sphinx.util.template import SphinxRenderer
if False:
# For type annotation
from typing import Any, Callable, Dict, List, Pattern, Union # NOQA
from sphinx.util.typing import unicode # NOQA
TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
@ -92,7 +90,7 @@ else:
# function to get input from terminal -- overridden by the test suite
def term_input(prompt):
# type: (unicode) -> unicode
# type: (str) -> str
if sys.platform == 'win32':
# Important: On windows, readline is not enabled by default. In these
# environment, escape sequences have been broken. To avoid the
@ -108,7 +106,7 @@ class ValidationError(Exception):
def is_path(x):
# type: (unicode) -> unicode
# type: (str) -> str
x = path.expanduser(x)
if not path.isdir(x):
raise ValidationError(__("Please enter a valid path name."))
@ -116,21 +114,21 @@ def is_path(x):
def allow_empty(x):
# type: (unicode) -> unicode
# type: (str) -> str
return x
def nonempty(x):
# type: (unicode) -> unicode
# type: (str) -> str
if not x:
raise ValidationError(__("Please enter some text."))
return x
def choice(*l):
# type: (unicode) -> Callable[[unicode], unicode]
# type: (str) -> Callable[[str], str]
def val(x):
# type: (unicode) -> unicode
# type: (str) -> str
if x not in l:
raise ValidationError(__('Please enter one of %s.') % ', '.join(l))
return x
@ -138,14 +136,14 @@ def choice(*l):
def boolean(x):
# type: (unicode) -> bool
# type: (str) -> bool
if x.upper() not in ('Y', 'YES', 'N', 'NO'):
raise ValidationError(__("Please enter either 'y' or 'n'."))
return x.upper() in ('Y', 'YES')
def suffix(x):
# type: (unicode) -> unicode
# type: (str) -> str
if not (x[0:1] == '.' and len(x) > 1):
raise ValidationError(__("Please enter a file suffix, "
"e.g. '.rst' or '.txt'."))
@ -153,12 +151,12 @@ def suffix(x):
def ok(x):
# type: (unicode) -> unicode
# type: (str) -> str
return x
def term_decode(text):
# type: (Union[bytes,unicode]) -> unicode
# type: (Union[bytes,str]) -> str
if isinstance(text, text_type):
return text
@ -180,10 +178,10 @@ def term_decode(text):
def do_prompt(text, default=None, validator=nonempty):
# type: (unicode, unicode, Callable[[unicode], Any]) -> Union[unicode, bool]
# type: (str, str, Callable[[str], Any]) -> Union[str, bool]
while True:
if default is not None:
prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default) # type: unicode
prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default)
else:
prompt = PROMPT_PREFIX + text + ': '
if USE_LIBEDIT:
@ -207,7 +205,7 @@ def do_prompt(text, default=None, validator=nonempty):
def convert_python_source(source, rex=re.compile(r"[uU]('.*?')")):
# type: (unicode, Pattern) -> unicode
# type: (str, Pattern) -> str
# remove Unicode literal prefixes
warnings.warn('convert_python_source() is deprecated.',
RemovedInSphinx40Warning)
@ -216,12 +214,12 @@ def convert_python_source(source, rex=re.compile(r"[uU]('.*?')")):
class QuickstartRenderer(SphinxRenderer):
def __init__(self, templatedir):
# type: (unicode) -> None
# type: (str) -> None
self.templatedir = templatedir or ''
super(QuickstartRenderer, self).__init__()
def render(self, template_name, context):
# type: (unicode, Dict) -> unicode
# type: (str, Dict) -> str
user_template = path.join(self.templatedir, path.basename(template_name))
if self.templatedir and path.exists(user_template):
return self.render_from_file(user_template, context)
@ -374,7 +372,7 @@ directly.'''))
def generate(d, overwrite=True, silent=False, templatedir=None):
# type: (Dict, bool, bool, unicode) -> None
# type: (Dict, bool, bool, str) -> None
"""Generate project based on values in *d*."""
template = QuickstartRenderer(templatedir=templatedir)
@ -426,7 +424,7 @@ def generate(d, overwrite=True, silent=False, templatedir=None):
ensuredir(path.join(srcdir, d['dot'] + 'static'))
def write_file(fpath, content, newline=None):
# type: (unicode, unicode, unicode) -> None
# type: (str, str, str) -> None
if overwrite or not path.isfile(fpath):
if 'quiet' not in d:
print(__('Creating file %s.') % fpath)

View File

@ -9,7 +9,6 @@
:license: BSD, see LICENSE for details.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import warnings
@ -45,8 +44,8 @@ def get_parser():
return build.get_parser()
def main(argv=sys.argv[1:]): # type: ignore
# type: (List[unicode]) -> int
def main(argv=sys.argv[1:]):
# type: (List[str]) -> int
warnings.warn('sphinx.cmdline module is deprecated. Use sphinx.cmd.build instead.',
RemovedInSphinx30Warning, stacklevel=2)
return build.main(argv)

View File

@ -17,7 +17,7 @@ from collections import OrderedDict
from os import path, getenv
from typing import Any, NamedTuple, Union
from six import text_type, integer_types
from six import text_type
from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.errors import ConfigError, ExtensionError
@ -33,7 +33,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.tags import Tags # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -68,11 +67,11 @@ class ENUM:
app.add_config_value('latex_show_urls', 'no', None, ENUM('no', 'footnote', 'inline'))
"""
def __init__(self, *candidates):
# type: (unicode) -> None
# type: (str) -> None
self.candidates = candidates
def match(self, value):
# type: (Union[unicode,List,Tuple]) -> bool
# type: (Union[str, List, Tuple]) -> bool
if isinstance(value, (list, tuple)):
return all(item in self.candidates for item in value)
else:
@ -155,7 +154,7 @@ class Config:
'smartquotes_excludes': ({'languages': ['ja'],
'builders': ['man', 'text']},
'env', []),
} # type: Dict[unicode, Tuple]
} # type: Dict[str, Tuple]
def __init__(self, *args):
# type: (Any) -> None
@ -166,7 +165,7 @@ class Config:
RemovedInSphinx30Warning, stacklevel=2)
dirname, filename, overrides, tags = args
if dirname is None:
config = {} # type: Dict[unicode, Any]
config = {} # type: Dict[str, Any]
else:
config = eval_config_file(path.join(dirname, filename), tags)
else:
@ -188,11 +187,11 @@ class Config:
config['extensions'] = overrides.pop('extensions').split(',')
else:
config['extensions'] = overrides.pop('extensions')
self.extensions = config.get('extensions', []) # type: List[unicode]
self.extensions = config.get('extensions', []) # type: List[str]
@classmethod
def read(cls, confdir, overrides=None, tags=None):
# type: (unicode, Dict, Tags) -> Config
# type: (str, Dict, Tags) -> Config
"""Create a Config object from configuration file."""
filename = path.join(confdir, CONFIG_FILENAME)
namespace = eval_config_file(filename, tags)
@ -211,7 +210,7 @@ class Config:
check_unicode(self)
def convert_overrides(self, name, value):
# type: (unicode, Any) -> Any
# type: (str, Any) -> Any
if not isinstance(value, str):
return value
else:
@ -224,7 +223,7 @@ class Config:
(name, name + '.key=value'))
elif isinstance(defvalue, list):
return value.split(',')
elif isinstance(defvalue, integer_types):
elif isinstance(defvalue, int):
try:
return int(value)
except ValueError:
@ -277,7 +276,7 @@ class Config:
self.__dict__[name] = config[name]
def __getattr__(self, name):
# type: (unicode) -> Any
# type: (str) -> Any
if name.startswith('_'):
raise AttributeError(name)
if name not in self.values:
@ -288,19 +287,19 @@ class Config:
return default
def __getitem__(self, name):
# type: (unicode) -> unicode
# type: (str) -> str
return getattr(self, name)
def __setitem__(self, name, value):
# type: (unicode, Any) -> None
# type: (str, Any) -> None
setattr(self, name, value)
def __delitem__(self, name):
# type: (unicode) -> None
# type: (str) -> None
delattr(self, name)
def __contains__(self, name):
# type: (unicode) -> bool
# type: (str) -> bool
return name in self.values
def __iter__(self):
@ -309,14 +308,14 @@ class Config:
yield ConfigValue(name, getattr(self, name), value[1])
def add(self, name, default, rebuild, types):
# type: (unicode, Any, Union[bool, unicode], Any) -> None
# type: (str, Any, Union[bool, str], Any) -> None
if name in self.values:
raise ExtensionError(__('Config value %r already present') % name)
else:
self.values[name] = (default, rebuild, types)
def filter(self, rebuild):
# type: (Union[unicode, List[unicode]]) -> Iterator[ConfigValue]
# type: (Union[str, List[str]]) -> Iterator[ConfigValue]
if isinstance(rebuild, str):
rebuild = [rebuild]
return (value for value in self if value.rebuild in rebuild)
@ -351,9 +350,9 @@ class Config:
def eval_config_file(filename, tags):
# type: (unicode, Tags) -> Dict[unicode, Any]
# type: (str, Tags) -> Dict[str, Any]
"""Evaluate a config file."""
namespace = {} # type: Dict[unicode, Any]
namespace = {} # type: Dict[str, Any]
namespace['__file__'] = filename
namespace['tags'] = tags
@ -510,7 +509,7 @@ def check_primary_domain(app, config):
def check_master_doc(app, env, added, changed, removed):
# type: (Sphinx, BuildEnvironment, Set[unicode], Set[unicode], Set[unicode]) -> Set[unicode] # NOQA
# type: (Sphinx, BuildEnvironment, Set[str], Set[str], Set[str]) -> Set[str]
"""Adjust master_doc to 'contents' to support an old project which does not have
no master_doc setting.
"""
@ -525,7 +524,7 @@ def check_master_doc(app, env, added, changed, removed):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.connect('config-inited', convert_source_suffix)
app.connect('config-inited', init_numfig_format)
app.connect('config-inited', correct_copyright_year)

View File

@ -13,9 +13,7 @@ import warnings
if False:
# For type annotation
# note: Don't use typing.TYPE_CHECK here (for py27 and py34).
from typing import Any, Dict, Type # NOQA
from sphinx.util.typing import unicode # NOQA
class RemovedInSphinx30Warning(PendingDeprecationWarning):
@ -39,22 +37,22 @@ class DeprecatedDict(dict):
super(DeprecatedDict, self).__init__(data)
def __setitem__(self, key, value):
# type: (unicode, Any) -> None
# type: (str, Any) -> None
warnings.warn(self.message, self.warning, stacklevel=2)
super(DeprecatedDict, self).__setitem__(key, value)
def setdefault(self, key, default=None):
# type: (unicode, Any) -> None
# type: (str, Any) -> None
warnings.warn(self.message, self.warning, stacklevel=2)
return super(DeprecatedDict, self).setdefault(key, default)
def __getitem__(self, key):
# type: (unicode) -> None
# type: (str) -> None
warnings.warn(self.message, self.warning, stacklevel=2)
return super(DeprecatedDict, self).__getitem__(key)
def get(self, key, default=None):
# type: (unicode, Any) -> None
# type: (str, Any) -> None
warnings.warn(self.message, self.warning, stacklevel=2)
return super(DeprecatedDict, self).get(key, default)

View File

@ -39,7 +39,7 @@ if False:
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.docfields import Field # NOQA
from sphinx.util.typing import DirectiveOption, unicode # NOQA
from sphinx.util.typing import DirectiveOption # NOQA
# RE to strip backslash escapes
@ -64,12 +64,12 @@ class ObjectDescription(SphinxDirective):
# types of doc fields that this directive handles, see sphinx.util.docfields
doc_field_types = [] # type: List[Field]
domain = None # type: unicode
objtype = None # type: unicode
domain = None # type: str
objtype = None # type: str
indexnode = None # type: addnodes.index
def get_signatures(self):
# type: () -> List[unicode]
# type: () -> List[str]
"""
Retrieve the signatures to document from the directive arguments. By
default, signatures are given as arguments, one per line.
@ -81,7 +81,7 @@ class ObjectDescription(SphinxDirective):
return [strip_backslash_re.sub(r'\1', line.strip()) for line in lines]
def handle_signature(self, sig, signode):
# type: (unicode, addnodes.desc_signature) -> Any
# type: (str, addnodes.desc_signature) -> Any
"""
Parse the signature *sig* into individual nodes and append them to
*signode*. If ValueError is raised, parsing is aborted and the whole
@ -94,7 +94,7 @@ class ObjectDescription(SphinxDirective):
raise ValueError
def add_target_and_index(self, name, sig, signode):
# type: (Any, unicode, addnodes.desc_signature) -> None
# type: (Any, str, addnodes.desc_signature) -> None
"""
Add cross-reference IDs and entries to self.indexnode, if applicable.
@ -150,7 +150,7 @@ class ObjectDescription(SphinxDirective):
node['objtype'] = node['desctype'] = self.objtype
node['noindex'] = noindex = ('noindex' in self.options)
self.names = [] # type: List[unicode]
self.names = [] # type: List[str]
signatures = self.get_signatures()
for i, sig in enumerate(signatures):
# add a signature node for each signature in the current unit
@ -246,7 +246,7 @@ class DefaultDomain(SphinxDirective):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
directives.register_directive('default-role', DefaultRole)
directives.register_directive('default-domain', DefaultDomain)
directives.register_directive('describe', ObjectDescription)

View File

@ -29,7 +29,6 @@ if False:
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -67,7 +66,7 @@ class HighlightLang(Highlight):
def dedent_lines(lines, dedent, location=None):
# type: (List[unicode], int, Any) -> List[unicode]
# type: (List[str], int, Any) -> List[str]
if not dedent:
return lines
@ -85,7 +84,7 @@ def dedent_lines(lines, dedent, location=None):
def container_wrapper(directive, literal_node, caption):
# type: (SphinxDirective, nodes.Node, unicode) -> nodes.container
# type: (SphinxDirective, nodes.Node, str) -> nodes.container
container_node = nodes.container('', literal_block=True,
classes=['literal-block-wrapper'])
parsed = nodes.Element()
@ -198,7 +197,7 @@ class LiteralIncludeReader:
]
def __init__(self, filename, options, config):
# type: (unicode, Dict, Config) -> None
# type: (str, Dict, Config) -> None
self.filename = filename
self.options = options
self.encoding = options.get('encoding', config.source_encoding)
@ -214,10 +213,10 @@ class LiteralIncludeReader:
(option1, option2))
def read_file(self, filename, location=None):
# type: (unicode, Any) -> List[unicode]
# type: (str, Any) -> List[str]
try:
with open(filename, encoding=self.encoding, errors='strict') as f:
text = f.read() # type: unicode
text = f.read()
if 'tab-width' in self.options:
text = text.expandtabs(self.options['tab-width'])
@ -230,7 +229,7 @@ class LiteralIncludeReader:
(self.encoding, filename))
def read(self, location=None):
# type: (Any) -> Tuple[unicode, int]
# type: (Any) -> Tuple[str, int]
if 'diff' in self.options:
lines = self.show_diff()
else:
@ -248,7 +247,7 @@ class LiteralIncludeReader:
return ''.join(lines), len(lines)
def show_diff(self, location=None):
# type: (Any) -> List[unicode]
# type: (Any) -> List[str]
new_lines = self.read_file(self.filename)
old_filename = self.options.get('diff')
old_lines = self.read_file(old_filename)
@ -256,7 +255,7 @@ class LiteralIncludeReader:
return list(diff)
def pyobject_filter(self, lines, location=None):
# type: (List[unicode], Any) -> List[unicode]
# type: (List[str], Any) -> List[str]
pyobject = self.options.get('pyobject')
if pyobject:
from sphinx.pycode import ModuleAnalyzer
@ -275,7 +274,7 @@ class LiteralIncludeReader:
return lines
def lines_filter(self, lines, location=None):
# type: (List[unicode], Any) -> List[unicode]
# type: (List[str], Any) -> List[str]
linespec = self.options.get('lines')
if linespec:
linelist = parselinenos(linespec, len(lines))
@ -300,7 +299,7 @@ class LiteralIncludeReader:
return lines
def start_filter(self, lines, location=None):
# type: (List[unicode], Any) -> List[unicode]
# type: (List[str], Any) -> List[str]
if 'start-at' in self.options:
start = self.options.get('start-at')
inclusive = False
@ -332,7 +331,7 @@ class LiteralIncludeReader:
return lines
def end_filter(self, lines, location=None):
# type: (List[unicode], Any) -> List[unicode]
# type: (List[str], Any) -> List[str]
if 'end-at' in self.options:
end = self.options.get('end-at')
inclusive = True
@ -360,7 +359,7 @@ class LiteralIncludeReader:
return lines
def prepend_filter(self, lines, location=None):
# type: (List[unicode], Any) -> List[unicode]
# type: (List[str], Any) -> List[str]
prepend = self.options.get('prepend')
if prepend:
lines.insert(0, prepend + '\n')
@ -368,7 +367,7 @@ class LiteralIncludeReader:
return lines
def append_filter(self, lines, location=None):
# type: (List[unicode], Any) -> List[unicode]
# type: (List[str], Any) -> List[str]
append = self.options.get('append')
if append:
lines.append(append + '\n')
@ -376,7 +375,7 @@ class LiteralIncludeReader:
return lines
def dedent_filter(self, lines, location=None):
# type: (List[unicode], Any) -> List[unicode]
# type: (List[str], Any) -> List[str]
if 'dedent' in self.options:
return dedent_lines(lines, self.options.get('dedent'), location=location)
else:
@ -470,7 +469,7 @@ class LiteralInclude(SphinxDirective):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
directives.register_directive('highlight', Highlight)
directives.register_directive('highlightlang', HighlightLang)
directives.register_directive('code-block', CodeBlock)

View File

@ -29,14 +29,13 @@ if False:
# For type annotation
from typing import Any, Dict, Generator, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
glob_re = re.compile(r'.*[*?\[].*')
def int_or_nothing(argument):
# type: (unicode) -> int
# type: (str) -> int
if not argument:
return 999
return int(argument)
@ -396,7 +395,7 @@ class Include(BaseInclude, SphinxDirective):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
directives.register_directive('toctree', TocTree)
directives.register_directive('sectionauthor', Author)
directives.register_directive('moduleauthor', Author)

View File

@ -22,7 +22,6 @@ if False:
# For type annotation
from typing import Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
class Figure(images.Figure):

View File

@ -25,7 +25,7 @@ if False:
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.roles import XRefRole # NOQA
from sphinx.util.typing import RoleFunction, unicode # NOQA
from sphinx.util.typing import RoleFunction # NOQA
class ObjType:
@ -48,8 +48,8 @@ class ObjType:
}
def __init__(self, lname, *roles, **attrs):
# type: (unicode, Any, Any) -> None
self.lname = lname # type: unicode
# type: (str, Any, Any) -> None
self.lname = lname
self.roles = roles # type: Tuple
self.attrs = self.known_attrs.copy() # type: Dict
self.attrs.update(attrs)
@ -79,9 +79,9 @@ class Index:
domains using :meth:`~sphinx.application.Sphinx.add_index_to_domain()`.
"""
name = None # type: unicode
localname = None # type: unicode
shortname = None # type: unicode
name = None # type: str
localname = None # type: str
shortname = None # type: str
def __init__(self, domain):
# type: (Domain) -> None
@ -91,7 +91,7 @@ class Index:
self.domain = domain
def generate(self, docnames=None):
# type: (Iterable[unicode]) -> Tuple[List[Tuple[unicode, List[IndexEntry]]], bool]
# type: (Iterable[str]) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]
"""Return entries for the index given by *name*. If *docnames* is
given, restrict to entries referring to these docnames.
@ -150,17 +150,17 @@ class Domain:
#: domain label: longer, more descriptive (used in messages)
label = ''
#: type (usually directive) name -> ObjType instance
object_types = {} # type: Dict[unicode, ObjType]
object_types = {} # type: Dict[str, ObjType]
#: directive name -> directive class
directives = {} # type: Dict[unicode, Any]
directives = {} # type: Dict[str, Any]
#: role name -> role callable
roles = {} # type: Dict[unicode, Union[RoleFunction, XRefRole]]
roles = {} # type: Dict[str, Union[RoleFunction, XRefRole]]
#: a list of Index subclasses
indices = [] # type: List[Type[Index]]
#: role name -> a warning message if reference is missing
dangling_warnings = {} # type: Dict[unicode, unicode]
dangling_warnings = {} # type: Dict[str, str]
#: node_class -> (enum_node_type, title_getter)
enumerable_nodes = {} # type: Dict[Type[nodes.Node], Tuple[unicode, Callable]]
enumerable_nodes = {} # type: Dict[Type[nodes.Node], Tuple[str, Callable]]
#: data value for a fresh environment
initial_data = {} # type: Dict
@ -172,10 +172,10 @@ class Domain:
def __init__(self, env):
# type: (BuildEnvironment) -> None
self.env = env # type: BuildEnvironment
self._role_cache = {} # type: Dict[unicode, Callable]
self._directive_cache = {} # type: Dict[unicode, Callable]
self._role2type = {} # type: Dict[unicode, List[unicode]]
self._type2role = {} # type: Dict[unicode, unicode]
self._role_cache = {} # type: Dict[str, Callable]
self._directive_cache = {} # type: Dict[str, Callable]
self._role2type = {} # type: Dict[str, List[str]]
self._type2role = {} # type: Dict[str, str]
# convert class variables to instance one (to enhance through API)
self.object_types = dict(self.object_types)
@ -196,11 +196,11 @@ class Domain:
for rolename in obj.roles:
self._role2type.setdefault(rolename, []).append(name)
self._type2role[name] = obj.roles[0] if obj.roles else ''
self.objtypes_for_role = self._role2type.get # type: Callable[[unicode], List[unicode]] # NOQA
self.role_for_objtype = self._type2role.get # type: Callable[[unicode], unicode]
self.objtypes_for_role = self._role2type.get # type: Callable[[str], List[str]]
self.role_for_objtype = self._type2role.get # type: Callable[[str], str]
def add_object_type(self, name, objtype):
# type: (unicode, ObjType) -> None
# type: (str, ObjType) -> None
"""Add an object type."""
self.object_types[name] = objtype
if objtype.roles:
@ -212,7 +212,7 @@ class Domain:
self._role2type.setdefault(role, []).append(name)
def role(self, name):
# type: (unicode) -> RoleFunction
# type: (str) -> RoleFunction
"""Return a role adapter function that always gives the registered
role its full name ('domain:name') as the first argument.
"""
@ -223,14 +223,14 @@ class Domain:
fullname = '%s:%s' % (self.name, name)
def role_adapter(typ, rawtext, text, lineno, inliner, options={}, content=[]):
# type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
# type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
return self.roles[name](fullname, rawtext, text, lineno,
inliner, options, content)
self._role_cache[name] = role_adapter
return role_adapter
def directive(self, name):
# type: (unicode) -> Callable
# type: (str) -> Callable
"""Return a directive adapter class that always gives the registered
directive its full name ('domain:name') as ``self.name``.
"""
@ -252,12 +252,12 @@ class Domain:
# methods that should be overwritten
def clear_doc(self, docname):
# type: (unicode) -> None
# type: (str) -> None
"""Remove traces of a document in the domain-specific inventories."""
pass
def merge_domaindata(self, docnames, otherdata):
# type: (List[unicode], Dict) -> None
# type: (List[str], Dict) -> None
"""Merge in data regarding *docnames* from a different domaindata
inventory (coming from a subprocess in parallel builds).
"""
@ -266,7 +266,7 @@ class Domain:
self.__class__)
def process_doc(self, env, docname, document):
# type: (BuildEnvironment, unicode, nodes.document) -> None
# type: (BuildEnvironment, str, nodes.document) -> None
"""Process a document after it is read by the environment."""
pass
@ -284,7 +284,7 @@ class Domain:
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
"""Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
@ -301,7 +301,7 @@ class Domain:
pass
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
# type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
"""Resolve the pending_xref *node* with the given *target*.
The reference comes from an "any" or similar role, which means that we
@ -318,7 +318,7 @@ class Domain:
raise NotImplementedError
def get_objects(self):
# type: () -> Iterable[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
# type: () -> Iterable[Tuple[str, str, str, str, str, int]]
"""Return an iterable of "object descriptions", which are tuples with
five items:
@ -338,19 +338,19 @@ class Domain:
return []
def get_type_name(self, type, primary=False):
# type: (ObjType, bool) -> unicode
# type: (ObjType, bool) -> str
"""Return full name for given ObjType."""
if primary:
return type.lname
return _('%s %s') % (self.label, type.lname)
def get_enumerable_node_type(self, node):
# type: (nodes.Node) -> unicode
# type: (nodes.Node) -> str
"""Get type of enumerable nodes (experimental)."""
enum_node_type, _ = self.enumerable_nodes.get(node.__class__, (None, None))
return enum_node_type
def get_full_qualified_name(self, node):
# type: (nodes.Element) -> unicode
# type: (nodes.Element) -> str
"""Return full qualified name for given node."""
return None

View File

@ -28,7 +28,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
# RE to split at word boundaries
@ -82,7 +81,7 @@ class CObject(ObjectDescription):
))
def _parse_type(self, node, ctype):
# type: (nodes.Element, unicode) -> None
# type: (nodes.Element, str) -> None
# add cross-ref nodes for all words
for part in [_f for _f in wsplit_re.split(ctype) if _f]:
tnode = nodes.Text(part, part)
@ -97,7 +96,7 @@ class CObject(ObjectDescription):
node += tnode
def _parse_arglist(self, arglist):
# type: (unicode) -> Iterator[unicode]
# type: (str) -> Iterator[str]
while True:
m = c_funcptr_arg_sig_re.match(arglist)
if m:
@ -116,7 +115,7 @@ class CObject(ObjectDescription):
break
def handle_signature(self, sig, signode):
# type: (unicode, addnodes.desc_signature) -> unicode
# type: (str, addnodes.desc_signature) -> str
"""Transform a C signature into RST nodes."""
# first try the function pointer signature regex, it's more specific
m = c_funcptr_sig_re.match(sig)
@ -186,7 +185,7 @@ class CObject(ObjectDescription):
return fullname
def get_index_text(self, name):
# type: (unicode) -> unicode
# type: (str) -> str
if self.objtype == 'function':
return _('%s (C function)') % name
elif self.objtype == 'member':
@ -201,7 +200,7 @@ class CObject(ObjectDescription):
return ''
def add_target_and_index(self, name, sig, signode):
# type: (unicode, unicode, addnodes.desc_signature) -> None
# type: (str, str, addnodes.desc_signature) -> None
# for C API items we add a prefix since names are usually not qualified
# by a module name and so easily clash with e.g. section titles
targetname = 'c.' + name
@ -239,7 +238,7 @@ class CObject(ObjectDescription):
class CXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
# type: (BuildEnvironment, nodes.Element, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
# type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
if not has_explicit_title:
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
@ -280,16 +279,16 @@ class CDomain(Domain):
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
} # type: Dict[unicode, Dict[unicode, Tuple[unicode, Any]]]
} # type: Dict[str, Dict[str, Tuple[str, Any]]]
def clear_doc(self, docname):
# type: (unicode) -> None
# type: (str) -> None
for fullname, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
def merge_domaindata(self, docnames, otherdata):
# type: (List[unicode], Dict) -> None
# type: (List[str], Dict) -> None
# XXX check duplicates
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
@ -297,7 +296,7 @@ class CDomain(Domain):
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# strip pointer asterisk
target = target.rstrip(' *')
# becase TypedField can generate xrefs
@ -311,7 +310,7 @@ class CDomain(Domain):
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
# type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
# strip pointer asterisk
target = target.rstrip(' *')
if target not in self.data['objects']:
@ -322,13 +321,13 @@ class CDomain(Domain):
contnode, target))]
def get_objects(self):
# type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
# type: () -> Iterator[Tuple[str, str, str, str, str, int]]
for refname, (docname, type) in list(self.data['objects'].items()):
yield (refname, refname, type, docname, 'c.' + refname, 1)
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_domain(CDomain)
return {

View File

@ -27,14 +27,13 @@ if False:
from typing import Any, Dict, List # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
versionlabels = {
'versionadded': _('New in version %s'),
'versionchanged': _('Changed in version %s'),
'deprecated': _('Deprecated since version %s'),
} # type: Dict[unicode, unicode]
}
locale.versionlabels = DeprecatedDict(
versionlabels,
@ -116,14 +115,14 @@ class ChangeSetDomain(Domain):
} # type: Dict
def clear_doc(self, docname):
# type: (unicode) -> None
# type: (str) -> None
for version, changes in self.data['changes'].items():
for changeset in changes[:]:
if changeset.docname == docname:
changes.remove(changeset)
def merge_domaindata(self, docnames, otherdata):
# type: (List[unicode], Dict) -> None
# type: (List[str], Dict) -> None
# XXX duplicates?
for version, otherchanges in otherdata['changes'].items():
changes = self.data['changes'].setdefault(version, [])
@ -132,7 +131,7 @@ class ChangeSetDomain(Domain):
changes.append(changeset)
def process_doc(self, env, docname, document):
# type: (BuildEnvironment, unicode, nodes.document) -> None
# type: (BuildEnvironment, str, nodes.document) -> None
pass # nothing to do here. All changesets are registered on calling directive.
def note_changeset(self, node):
@ -145,12 +144,12 @@ class ChangeSetDomain(Domain):
self.data['changes'].setdefault(version, []).append(changeset)
def get_changesets_for(self, version):
# type: (unicode) -> List[ChangeSet]
# type: (str) -> List[ChangeSet]
return self.data['changes'].get(version, [])
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_domain(ChangeSetDomain)
app.add_directive('deprecated', VersionChange)
app.add_directive('versionadded', VersionChange)

File diff suppressed because it is too large Load Diff

View File

@ -29,7 +29,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
class JSObject(ObjectDescription):
@ -41,14 +40,14 @@ class JSObject(ObjectDescription):
has_arguments = False
#: what is displayed right before the documentation entry
display_prefix = None # type: unicode
display_prefix = None # type: str
#: If ``allow_nesting`` is ``True``, the object prefixes will be accumulated
#: based on directive nesting
allow_nesting = False
def handle_signature(self, sig, signode):
# type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode]
# type: (str, addnodes.desc_signature) -> Tuple[str, str]
"""Breaks down construct signatures
Parses out prefix and argument list from construct definition. The
@ -102,7 +101,7 @@ class JSObject(ObjectDescription):
return fullname, prefix
def add_target_and_index(self, name_obj, sig, signode):
# type: (Tuple[unicode, unicode], unicode, addnodes.desc_signature) -> None
# type: (Tuple[str, str], str, addnodes.desc_signature) -> None
mod_name = self.env.ref_context.get('js:module')
fullname = (mod_name and mod_name + '.' or '') + name_obj[0]
if fullname not in self.state.document.ids:
@ -126,7 +125,7 @@ class JSObject(ObjectDescription):
'', None))
def get_index_text(self, objectname, name_obj):
# type: (unicode, Tuple[unicode, unicode]) -> unicode
# type: (str, Tuple[str, str]) -> str
name, obj = name_obj
if self.objtype == 'function':
if not obj:
@ -273,7 +272,7 @@ class JSModule(SphinxDirective):
class JSXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
# type: (BuildEnvironment, nodes.Element, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
# type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
# basically what sphinx.domains.python.PyXRefRole does
refnode['js:object'] = env.ref_context.get('js:object')
refnode['js:module'] = env.ref_context.get('js:module')
@ -323,10 +322,10 @@ class JavaScriptDomain(Domain):
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # mod_name -> docname
} # type: Dict[unicode, Dict[unicode, Tuple[unicode, unicode]]]
} # type: Dict[str, Dict[str, Tuple[str, str]]]
def clear_doc(self, docname):
# type: (unicode) -> None
# type: (str) -> None
for fullname, (pkg_docname, _l) in list(self.data['objects'].items()):
if pkg_docname == docname:
del self.data['objects'][fullname]
@ -335,7 +334,7 @@ class JavaScriptDomain(Domain):
del self.data['modules'][mod_name]
def merge_domaindata(self, docnames, otherdata):
# type: (List[unicode], Dict) -> None
# type: (List[str], Dict) -> None
# XXX check duplicates
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
@ -345,7 +344,7 @@ class JavaScriptDomain(Domain):
self.data['modules'][mod_name] = pkg_docname
def find_obj(self, env, mod_name, prefix, name, typ, searchorder=0):
# type: (BuildEnvironment, unicode, unicode, unicode, unicode, int) -> Tuple[unicode, Tuple[unicode, unicode]] # NOQA
# type: (BuildEnvironment, str, str, str, str, int) -> Tuple[str, Tuple[str, str]]
if name[-2:] == '()':
name = name[:-2]
objects = self.data['objects']
@ -371,7 +370,7 @@ class JavaScriptDomain(Domain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
mod_name = node.get('js:module')
prefix = node.get('js:object')
searchorder = node.hasattr('refspecific') and 1 or 0
@ -383,7 +382,7 @@ class JavaScriptDomain(Domain):
def resolve_any_xref(self, env, fromdocname, builder, target, node,
contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
# type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
mod_name = node.get('js:module')
prefix = node.get('js:object')
name, obj = self.find_obj(env, mod_name, prefix, target, None, 1)
@ -394,13 +393,13 @@ class JavaScriptDomain(Domain):
name.replace('$', '_S_'), contnode, name))]
def get_objects(self):
# type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
# type: () -> Iterator[Tuple[str, str, str, str, str, int]]
for refname, (docname, type) in list(self.data['objects'].items()):
yield refname, refname, type, docname, \
refname.replace('$', '_S_'), 1
def get_full_qualified_name(self, node):
# type: (nodes.Element) -> unicode
# type: (nodes.Element) -> str
modname = node.get('js:module')
prefix = node.get('js:object')
target = node.get('reftarget')
@ -411,7 +410,7 @@ class JavaScriptDomain(Domain):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_domain(JavaScriptDomain)
return {

View File

@ -26,7 +26,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -46,7 +45,7 @@ class MathDomain(Domain):
initial_data = {
'objects': {}, # labelid -> (docname, eqno)
'has_equations': {}, # docname -> bool
} # type: Dict[unicode, Dict[unicode, Tuple[unicode, int]]]
} # type: Dict[str, Dict[str, Tuple[str, int]]]
dangling_warnings = {
'eq': 'equation not found: %(target)s',
}
@ -59,7 +58,7 @@ class MathDomain(Domain):
}
def process_doc(self, env, docname, document):
# type: (BuildEnvironment, unicode, nodes.document) -> None
# type: (BuildEnvironment, str, nodes.document) -> None
def math_node(node):
# type: (nodes.Node) -> bool
return isinstance(node, (nodes.math, nodes.math_block))
@ -67,7 +66,7 @@ class MathDomain(Domain):
self.data['has_equations'][docname] = any(document.traverse(math_node))
def clear_doc(self, docname):
# type: (unicode) -> None
# type: (str) -> None
for equation_id, (doc, eqno) in list(self.data['objects'].items()):
if doc == docname:
del self.data['objects'][equation_id]
@ -75,7 +74,7 @@ class MathDomain(Domain):
self.data['has_equations'].pop(docname, None)
def merge_domaindata(self, docnames, otherdata):
# type: (Iterable[unicode], Dict) -> None
# type: (Iterable[str], Dict) -> None
for labelid, (doc, eqno) in otherdata['objects'].items():
if doc in docnames:
self.data['objects'][labelid] = (doc, eqno)
@ -84,7 +83,7 @@ class MathDomain(Domain):
self.data['has_equations'][docname] = otherdata['has_equations'][docname]
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
assert typ in ('eq', 'numref')
docname, number = self.data['objects'].get(target, (None, None))
if docname:
@ -109,7 +108,7 @@ class MathDomain(Domain):
return None
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
# type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
refnode = self.resolve_xref(env, fromdocname, builder, 'eq', target, node, contnode)
if refnode is None:
return []
@ -121,7 +120,7 @@ class MathDomain(Domain):
return []
def add_equation(self, env, docname, labelid):
# type: (BuildEnvironment, unicode, unicode) -> int
# type: (BuildEnvironment, str, str) -> int
equations = self.data['objects']
if labelid in equations:
path = env.doc2path(equations[labelid][0])
@ -133,7 +132,7 @@ class MathDomain(Domain):
return eqno
def get_next_equation_number(self, docname):
# type: (unicode) -> int
# type: (str) -> int
targets = [eq for eq in self.data['objects'].values() if eq[0] == docname]
return len(targets) + 1
@ -143,7 +142,7 @@ class MathDomain(Domain):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_domain(MathDomain)
app.add_role('eq', MathReferenceRole(warn_dangling=True))

View File

@ -31,7 +31,7 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import TextlikeNode, unicode # NOQA
from sphinx.util.typing import TextlikeNode # NOQA
logger = logging.getLogger(__name__)
@ -54,7 +54,7 @@ pairindextypes = {
'exception': _('exception'),
'statement': _('statement'),
'builtin': _('built-in function'),
} # Dict[unicode, unicode]
}
locale.pairindextypes = DeprecatedDict(
pairindextypes,
@ -65,7 +65,7 @@ locale.pairindextypes = DeprecatedDict(
def _pseudo_parse_arglist(signode, arglist):
# type: (addnodes.desc_signature, unicode) -> None
# type: (addnodes.desc_signature, str) -> None
""""Parse" a list of arguments separated by commas.
Arguments can have "optional" annotations given by enclosing them in
@ -117,9 +117,9 @@ def _pseudo_parse_arglist(signode, arglist):
# when it comes to handling "." and "~" prefixes.
class PyXrefMixin:
def make_xref(self,
rolename, # type: unicode
domain, # type: unicode
target, # type: unicode
rolename, # type: str
domain, # type: str
target, # type: str
innernode=nodes.emphasis, # type: Type[TextlikeNode]
contnode=None, # type: nodes.Node
env=None, # type: BuildEnvironment
@ -140,9 +140,9 @@ class PyXrefMixin:
return result
def make_xrefs(self,
rolename, # type: unicode
domain, # type: unicode
target, # type: unicode
rolename, # type: str
domain, # type: str
target, # type: str
innernode=nodes.emphasis, # type: Type[TextlikeNode]
contnode=None, # type: nodes.Node
env=None, # type: BuildEnvironment
@ -171,7 +171,7 @@ class PyXrefMixin:
class PyField(PyXrefMixin, Field):
def make_xref(self, rolename, domain, target,
innernode=nodes.emphasis, contnode=None, env=None):
# type: (unicode, unicode, unicode, Type[TextlikeNode], nodes.Node, BuildEnvironment) -> nodes.Node # NOQA
# type: (str, str, str, Type[TextlikeNode], nodes.Node, BuildEnvironment) -> nodes.Node # NOQA
if rolename == 'class' and target == 'None':
# None is not a type, so use obj role instead.
rolename = 'obj'
@ -187,7 +187,7 @@ class PyGroupedField(PyXrefMixin, GroupedField):
class PyTypedField(PyXrefMixin, TypedField):
def make_xref(self, rolename, domain, target,
innernode=nodes.emphasis, contnode=None, env=None):
# type: (unicode, unicode, unicode, Type[TextlikeNode], nodes.Node, BuildEnvironment) -> nodes.Node # NOQA
# type: (str, str, str, Type[TextlikeNode], nodes.Node, BuildEnvironment) -> nodes.Node # NOQA
if rolename == 'class' and target == 'None':
# None is not a type, so use obj role instead.
rolename = 'obj'
@ -231,7 +231,7 @@ class PyObject(ObjectDescription):
allow_nesting = False
def get_signature_prefix(self, sig):
# type: (unicode) -> unicode
# type: (str) -> str
"""May return a prefix to put before the object name in the
signature.
"""
@ -245,7 +245,7 @@ class PyObject(ObjectDescription):
return False
def handle_signature(self, sig, signode):
# type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode]
# type: (str, addnodes.desc_signature) -> Tuple[str, str]
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
@ -325,12 +325,12 @@ class PyObject(ObjectDescription):
return fullname, name_prefix
def get_index_text(self, modname, name):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
"""Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls, sig, signode):
# type: (unicode, unicode, addnodes.desc_signature) -> None
# type: (str, str, addnodes.desc_signature) -> None
modname = self.options.get(
'module', self.env.ref_context.get('py:module'))
fullname = (modname and modname + '.' or '') + name_cls[0]
@ -426,7 +426,7 @@ class PyModulelevel(PyObject):
return self.objtype == 'function'
def get_index_text(self, modname, name_cls):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
if self.objtype == 'function':
if not modname:
return _('%s() (built-in function)') % name_cls[0]
@ -447,11 +447,11 @@ class PyClasslike(PyObject):
allow_nesting = True
def get_signature_prefix(self, sig):
# type: (unicode) -> unicode
# type: (str) -> str
return self.objtype + ' '
def get_index_text(self, modname, name_cls):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
if self.objtype == 'class':
if not modname:
return _('%s (built-in class)') % name_cls[0]
@ -472,7 +472,7 @@ class PyClassmember(PyObject):
return self.objtype.endswith('method')
def get_signature_prefix(self, sig):
# type: (unicode) -> unicode
# type: (str) -> str
if self.objtype == 'staticmethod':
return 'static '
elif self.objtype == 'classmethod':
@ -480,7 +480,7 @@ class PyClassmember(PyObject):
return ''
def get_index_text(self, modname, name_cls):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
name, cls = name_cls
add_modules = self.env.config.add_module_names
if self.objtype == 'method':
@ -542,7 +542,7 @@ class PyDecoratorMixin:
Mixin for decorator directives.
"""
def handle_signature(self, sig, signode):
# type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode]
# type: (str, addnodes.desc_signature) -> Tuple[str, str]
ret = super(PyDecoratorMixin, self).handle_signature(sig, signode) # type: ignore
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
@ -640,7 +640,7 @@ class PyCurrentModule(SphinxDirective):
class PyXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
# type: (BuildEnvironment, nodes.Element, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
# type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
refnode['py:module'] = env.ref_context.get('py:module')
refnode['py:class'] = env.ref_context.get('py:class')
if not has_explicit_title:
@ -671,10 +671,10 @@ class PythonModuleIndex(Index):
shortname = _('modules')
def generate(self, docnames=None):
# type: (Iterable[unicode]) -> Tuple[List[Tuple[unicode, List[IndexEntry]]], bool]
content = {} # type: Dict[unicode, List[IndexEntry]]
# type: (Iterable[str]) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]
content = {} # type: Dict[str, List[IndexEntry]]
# list of prefixes to ignore
ignores = None # type: List[unicode]
ignores = None # type: List[str]
ignores = self.domain.env.config['modindex_common_prefix'] # type: ignore
ignores = sorted(ignores, key=len, reverse=True)
# list of all modules, sorted by module name
@ -750,7 +750,7 @@ class PythonDomain(Domain):
'staticmethod': ObjType(_('static method'), 'meth', 'obj'),
'attribute': ObjType(_('attribute'), 'attr', 'obj'),
'module': ObjType(_('module'), 'mod', 'obj'),
} # type: Dict[unicode, ObjType]
} # type: Dict[str, ObjType]
directives = {
'function': PyModulelevel,
@ -780,13 +780,13 @@ class PythonDomain(Domain):
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
} # type: Dict[unicode, Dict[unicode, Tuple[Any]]]
} # type: Dict[str, Dict[str, Tuple[Any]]]
indices = [
PythonModuleIndex,
]
def clear_doc(self, docname):
# type: (unicode) -> None
# type: (str) -> None
for fullname, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
@ -795,7 +795,7 @@ class PythonDomain(Domain):
del self.data['modules'][modname]
def merge_domaindata(self, docnames, otherdata):
# type: (List[unicode], Dict) -> None
# type: (List[str], Dict) -> None
# XXX check duplicates?
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
@ -805,7 +805,7 @@ class PythonDomain(Domain):
self.data['modules'][modname] = data
def find_obj(self, env, modname, classname, name, type, searchmode=0):
# type: (BuildEnvironment, unicode, unicode, unicode, unicode, int) -> List[Tuple[unicode, Any]] # NOQA
# type: (BuildEnvironment, str, str, str, str, int) -> List[Tuple[str, Any]]
"""Find a Python object for "name", perhaps using the given module
and/or classname. Returns a list of (name, object entry) tuples.
"""
@ -817,7 +817,7 @@ class PythonDomain(Domain):
return []
objects = self.data['objects']
matches = [] # type: List[Tuple[unicode, Any]]
matches = [] # type: List[Tuple[str, Any]]
newname = None
if searchmode == 1:
@ -870,7 +870,7 @@ class PythonDomain(Domain):
def resolve_xref(self, env, fromdocname, builder,
type, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
modname = node.get('py:module')
clsname = node.get('py:class')
searchmode = node.hasattr('refspecific') and 1 or 0
@ -891,10 +891,10 @@ class PythonDomain(Domain):
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
# type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
modname = node.get('py:module')
clsname = node.get('py:class')
results = [] # type: List[Tuple[unicode, nodes.Element]]
results = [] # type: List[Tuple[str, nodes.Element]]
# always search in "refspecific" mode with the :any: role
matches = self.find_obj(env, modname, clsname, target, None, 1)
@ -910,7 +910,7 @@ class PythonDomain(Domain):
return results
def _make_module_refnode(self, builder, fromdocname, name, contnode):
# type: (Builder, unicode, unicode, nodes.Node) -> nodes.Element
# type: (Builder, str, str, nodes.Node) -> nodes.Element
# get additional info for modules
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
@ -924,7 +924,7 @@ class PythonDomain(Domain):
'module-' + name, contnode, title)
def get_objects(self):
# type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
# type: () -> Iterator[Tuple[str, str, str, str, str, int]]
for modname, info in self.data['modules'].items():
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
for refname, (docname, type) in self.data['objects'].items():
@ -932,7 +932,7 @@ class PythonDomain(Domain):
yield (refname, refname, type, docname, refname, 1)
def get_full_qualified_name(self, node):
# type: (nodes.Element) -> unicode
# type: (nodes.Element) -> str
modname = node.get('py:module')
clsname = node.get('py:class')
target = node.get('reftarget')
@ -943,7 +943,7 @@ class PythonDomain(Domain):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_domain(PythonDomain)
return {

View File

@ -25,7 +25,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
dir_sig_re = re.compile(r'\.\. (.+?)::(.*)$')
@ -37,7 +36,7 @@ class ReSTMarkup(ObjectDescription):
"""
def add_target_and_index(self, name, sig, signode):
# type: (unicode, unicode, addnodes.desc_signature) -> None
# type: (str, str, addnodes.desc_signature) -> None
targetname = self.objtype + '-' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
@ -59,7 +58,7 @@ class ReSTMarkup(ObjectDescription):
targetname, '', None))
def get_index_text(self, objectname, name):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
if self.objtype == 'directive':
return _('%s (directive)') % name
elif self.objtype == 'role':
@ -68,7 +67,7 @@ class ReSTMarkup(ObjectDescription):
def parse_directive(d):
# type: (unicode) -> Tuple[unicode, unicode]
# type: (str) -> Tuple[str, str]
"""Parse a directive signature.
Returns (directive, arguments) string tuple. If no arguments are given,
@ -90,7 +89,7 @@ class ReSTDirective(ReSTMarkup):
Description of a reST directive.
"""
def handle_signature(self, sig, signode):
# type: (unicode, addnodes.desc_signature) -> unicode
# type: (str, addnodes.desc_signature) -> str
name, args = parse_directive(sig)
desc_name = '.. %s::' % name
signode += addnodes.desc_name(desc_name, desc_name)
@ -104,7 +103,7 @@ class ReSTRole(ReSTMarkup):
Description of a reST role.
"""
def handle_signature(self, sig, signode):
# type: (unicode, addnodes.desc_signature) -> unicode
# type: (str, addnodes.desc_signature) -> str
signode += addnodes.desc_name(':%s:' % sig, ':%s:' % sig)
return sig
@ -128,23 +127,23 @@ class ReSTDomain(Domain):
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
} # type: Dict[unicode, Dict[unicode, Tuple[unicode, ObjType]]]
} # type: Dict[str, Dict[str, Tuple[str, ObjType]]]
def clear_doc(self, docname):
# type: (unicode) -> None
# type: (str) -> None
for (typ, name), doc in list(self.data['objects'].items()):
if doc == docname:
del self.data['objects'][typ, name]
def merge_domaindata(self, docnames, otherdata):
# type: (List[unicode], Dict) -> None
# type: (List[str], Dict) -> None
# XXX check duplicates
for (typ, name), doc in otherdata['objects'].items():
if doc in docnames:
self.data['objects'][typ, name] = doc
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
objects = self.data['objects']
objtypes = self.objtypes_for_role(typ)
for objtype in objtypes:
@ -156,9 +155,9 @@ class ReSTDomain(Domain):
return None
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
# type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
objects = self.data['objects']
results = [] # type: List[Tuple[unicode, nodes.Element]]
results = [] # type: List[Tuple[str, nodes.Element]]
for objtype in self.object_types:
if (objtype, target) in self.data['objects']:
results.append(('rst:' + self.role_for_objtype(objtype),
@ -169,13 +168,13 @@ class ReSTDomain(Domain):
return results
def get_objects(self):
# type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
# type: () -> Iterator[Tuple[str, str, str, str, str, int]]
for (typ, name), docname in self.data['objects'].items():
yield name, name, typ, docname, typ + '-' + name, 1
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_domain(ReSTDomain)
return {

View File

@ -36,7 +36,7 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import RoleFunction, unicode # NOQA
from sphinx.util.typing import RoleFunction # NOQA
logger = logging.getLogger(__name__)
@ -51,11 +51,11 @@ class GenericObject(ObjectDescription):
"""
A generic x-ref directive registered with Sphinx.add_object_type().
"""
indextemplate = '' # type: unicode
parse_node = None # type: Callable[[GenericObject, BuildEnvironment, unicode, addnodes.desc_signature], unicode] # NOQA
indextemplate = ''
parse_node = None # type: Callable[[GenericObject, BuildEnvironment, str, addnodes.desc_signature], str] # NOQA
def handle_signature(self, sig, signode):
# type: (unicode, addnodes.desc_signature) -> unicode
# type: (str, addnodes.desc_signature) -> str
if self.parse_node:
name = self.parse_node(self.env, sig, signode)
else:
@ -66,7 +66,7 @@ class GenericObject(ObjectDescription):
return name
def add_target_and_index(self, name, sig, signode):
# type: (unicode, unicode, addnodes.desc_signature) -> None
# type: (str, str, addnodes.desc_signature) -> None
targetname = '%s-%s' % (self.objtype, name)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
@ -156,10 +156,10 @@ class Cmdoption(ObjectDescription):
"""
def handle_signature(self, sig, signode):
# type: (unicode, addnodes.desc_signature) -> unicode
# type: (str, addnodes.desc_signature) -> str
"""Transform an option description into RST nodes."""
count = 0
firstname = '' # type: unicode
firstname = ''
for potential_option in sig.split(', '):
potential_option = potential_option.strip()
m = option_desc_re.match(potential_option)
@ -185,7 +185,7 @@ class Cmdoption(ObjectDescription):
return firstname
def add_target_and_index(self, firstname, sig, signode):
# type: (unicode, unicode, addnodes.desc_signature) -> None
# type: (str, str, addnodes.desc_signature) -> None
currprogram = self.env.ref_context.get('std:program')
for optname in signode.get('allnames', []):
targetname = optname.replace('/', '-')
@ -233,20 +233,20 @@ class Program(SphinxDirective):
class OptionXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
# type: (BuildEnvironment, nodes.Element, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
# type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
refnode['std:program'] = env.ref_context.get('std:program')
return title, target
def split_term_classifiers(line):
# type: (unicode) -> List[Union[unicode, None]]
# type: (str) -> List[Union[str, None]]
# split line into a term and classifiers. if no classifier, None is used..
parts = re.split(' +: +', line) + [None]
return parts
def make_glossary_term(env, textnodes, index_key, source, lineno, new_id=None):
# type: (BuildEnvironment, Iterable[nodes.Node], unicode, unicode, int, unicode) -> nodes.term # NOQA
# type: (BuildEnvironment, Iterable[nodes.Node], str, str, int, str) -> nodes.term
# get a text-only representation of the term and register it
# as a cross-reference target
term = nodes.term('', '', *textnodes)
@ -300,7 +300,7 @@ class Glossary(SphinxDirective):
# be* a definition list.
# first, collect single entries
entries = [] # type: List[Tuple[List[Tuple[unicode, unicode, int]], StringList]]
entries = [] # type: List[Tuple[List[Tuple[str, str, int]], StringList]]
in_definition = True
was_empty = True
messages = [] # type: List[nodes.Node]
@ -352,7 +352,7 @@ class Glossary(SphinxDirective):
# now, parse all the entries into a big definition list
items = []
for terms, definition in entries:
termtexts = [] # type: List[unicode]
termtexts = [] # type: List[str]
termnodes = [] # type: List[nodes.Node]
system_messages = [] # type: List[nodes.Node]
for line, source, lineno in terms:
@ -390,7 +390,7 @@ class Glossary(SphinxDirective):
def token_xrefs(text):
# type: (unicode) -> List[nodes.Node]
# type: (str) -> List[nodes.Node]
retnodes = [] # type: List[nodes.Node]
pos = 0
for m in token_re.finditer(text):
@ -463,7 +463,7 @@ class StandardDomain(Domain):
'envvar': ObjType(_('environment variable'), 'envvar'),
'cmdoption': ObjType(_('program option'), 'option'),
'doc': ObjType(_('document'), 'doc', searchprio=-1)
} # type: Dict[unicode, ObjType]
} # type: Dict[str, ObjType]
directives = {
'program': Program,
@ -472,7 +472,7 @@ class StandardDomain(Domain):
'envvar': EnvVar,
'glossary': Glossary,
'productionlist': ProductionList,
} # type: Dict[unicode, Type[Directive]]
} # type: Dict[str, Type[Directive]]
roles = {
'option': OptionXRefRole(warn_dangling=True),
'envvar': EnvVarXRefRole(),
@ -491,7 +491,7 @@ class StandardDomain(Domain):
'keyword': XRefRole(warn_dangling=True),
# links to documents
'doc': XRefRole(warn_dangling=True, innernodeclass=nodes.inline),
} # type: Dict[unicode, Union[RoleFunction, XRefRole]]
} # type: Dict[str, Union[RoleFunction, XRefRole]]
initial_data = {
'progoptions': {}, # (program, name) -> docname, labelid
@ -525,7 +525,7 @@ class StandardDomain(Domain):
nodes.figure: ('figure', None),
nodes.table: ('table', None),
nodes.container: ('code-block', None),
} # type: Dict[Type[nodes.Node], Tuple[unicode, Callable]]
} # type: Dict[Type[nodes.Node], Tuple[str, Callable]]
def __init__(self, env):
# type: (BuildEnvironment) -> None
@ -537,7 +537,7 @@ class StandardDomain(Domain):
self.enumerable_nodes[node] = settings
def clear_doc(self, docname):
# type: (unicode) -> None
# type: (str) -> None
for key, (fn, _l) in list(self.data['progoptions'].items()):
if fn == docname:
del self.data['progoptions'][key]
@ -560,7 +560,7 @@ class StandardDomain(Domain):
del self.data['anonlabels'][key]
def merge_domaindata(self, docnames, otherdata):
# type: (List[unicode], Dict) -> None
# type: (List[str], Dict) -> None
# XXX duplicates?
for key, data in otherdata['progoptions'].items():
if data[0] in docnames:
@ -584,13 +584,13 @@ class StandardDomain(Domain):
self.data['anonlabels'][key] = data
def process_doc(self, env, docname, document):
# type: (BuildEnvironment, unicode, nodes.document) -> None
# type: (BuildEnvironment, str, nodes.document) -> None
self.note_citations(env, docname, document)
self.note_citation_refs(env, docname, document)
self.note_labels(env, docname, document)
def note_citations(self, env, docname, document):
# type: (BuildEnvironment, unicode, nodes.document) -> None
# type: (BuildEnvironment, str, nodes.document) -> None
for node in document.traverse(nodes.citation):
node['docname'] = docname
label = cast(nodes.label, node[0]).astext()
@ -601,7 +601,7 @@ class StandardDomain(Domain):
self.data['citations'][label] = (docname, node['ids'][0], node.line)
def note_citation_refs(self, env, docname, document):
# type: (BuildEnvironment, unicode, nodes.document) -> None
# type: (BuildEnvironment, str, nodes.document) -> None
for node in document.traverse(addnodes.pending_xref):
if node['refdomain'] == 'std' and node['reftype'] == 'citation':
label = node['reftarget']
@ -609,7 +609,7 @@ class StandardDomain(Domain):
citation_refs.append(docname)
def note_labels(self, env, docname, document):
# type: (BuildEnvironment, unicode, nodes.document) -> None
# type: (BuildEnvironment, str, nodes.document) -> None
labels, anonlabels = self.data['labels'], self.data['anonlabels']
for name, explicit in document.nametypes.items():
if not explicit:
@ -652,11 +652,11 @@ class StandardDomain(Domain):
labels[name] = docname, labelid, sectname
def add_object(self, objtype, name, docname, labelid):
# type: (unicode, unicode, unicode, unicode) -> None
# type: (str, str, str, str) -> None
self.data['objects'][objtype, name] = (docname, labelid)
def add_program_option(self, program, name, docname, labelid):
# type: (unicode, unicode, unicode, unicode) -> None
# type: (str, str, str, str) -> None
self.data['progoptions'][program, name] = (docname, labelid)
def check_consistency(self):
@ -669,7 +669,7 @@ class StandardDomain(Domain):
def build_reference_node(self, fromdocname, builder, docname, labelid,
sectname, rolename, **options):
# type: (unicode, Builder, unicode, unicode, unicode, unicode, Any) -> nodes.Element
# type: (str, Builder, str, str, str, str, Any) -> nodes.Element
nodeclass = options.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **options)
innernode = nodes.inline(sectname, sectname)
@ -693,7 +693,7 @@ class StandardDomain(Domain):
return newnode
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
if typ == 'ref':
resolver = self._resolve_ref_xref
elif typ == 'numref':
@ -712,7 +712,7 @@ class StandardDomain(Domain):
return resolver(env, fromdocname, builder, typ, target, node, contnode)
def _resolve_ref_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
if node['refexplicit']:
# reference to anonymous label; the reference uses
# the supplied link caption
@ -730,7 +730,7 @@ class StandardDomain(Domain):
docname, labelid, sectname, 'ref')
def _resolve_numref_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
if target in self.data['labels']:
docname, labelid, figname = self.data['labels'].get(target, ('', '', ''))
else:
@ -791,7 +791,7 @@ class StandardDomain(Domain):
title=title)
def _resolve_keyword_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# keywords are oddballs: they are referenced by named labels
docname, labelid, _ = self.data['labels'].get(target, ('', '', ''))
if not docname:
@ -800,7 +800,7 @@ class StandardDomain(Domain):
labelid, contnode)
def _resolve_doc_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# directly reference to document by source name; can be absolute or relative
refdoc = node.get('refdoc', fromdocname)
docname = docname_join(refdoc, node['reftarget'])
@ -816,7 +816,7 @@ class StandardDomain(Domain):
return make_refnode(builder, fromdocname, docname, None, innernode)
def _resolve_option_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
progname = node.get('std:program')
target = target.strip()
docname, labelid = self.data['progoptions'].get((progname, target), ('', ''))
@ -838,7 +838,7 @@ class StandardDomain(Domain):
labelid, contnode)
def _resolve_citation_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
from sphinx.environment import NoUri
docname, labelid, lineno = self.data['citations'].get(target, ('', '', 0))
@ -861,7 +861,7 @@ class StandardDomain(Domain):
raise
def _resolve_obj_xref(self, env, fromdocname, builder, typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
objtypes = self.objtypes_for_role(typ) or []
for objtype in objtypes:
if (objtype, target) in self.data['objects']:
@ -875,8 +875,8 @@ class StandardDomain(Domain):
labelid, contnode)
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, addnodes.pending_xref, nodes.Element) -> List[Tuple[unicode, nodes.Element]] # NOQA
results = [] # type: List[Tuple[unicode, nodes.Element]]
# type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
results = [] # type: List[Tuple[str, nodes.Element]]
ltarget = target.lower() # :ref: lowercases its target automatically
for role in ('ref', 'option'): # do not try "keyword"
res = self.resolve_xref(env, fromdocname, builder, role,
@ -897,7 +897,7 @@ class StandardDomain(Domain):
return results
def get_objects(self):
# type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
# type: () -> Iterator[Tuple[str, str, str, str, str, int]]
# handle the special 'doc' reference here
for doc in self.env.all_docs:
yield (doc, clean_astext(self.env.titles[doc]), 'doc', doc, '', -1)
@ -919,7 +919,7 @@ class StandardDomain(Domain):
yield (name, name, 'label', info[0], info[1], -1)
def get_type_name(self, type, primary=False):
# type: (ObjType, bool) -> unicode
# type: (ObjType, bool) -> str
# never prepend "Default"
return type.lname
@ -928,7 +928,7 @@ class StandardDomain(Domain):
return node.__class__ in self.enumerable_nodes
def get_numfig_title(self, node):
# type: (nodes.Node) -> unicode
# type: (nodes.Node) -> str
"""Get the title of enumerable nodes to refer them using its title"""
if self.is_enumerable_node(node):
_, title_getter = self.enumerable_nodes.get(node.__class__, (None, None))
@ -942,7 +942,7 @@ class StandardDomain(Domain):
return None
def get_enumerable_node_type(self, node):
# type: (nodes.Node) -> unicode
# type: (nodes.Node) -> str
"""Get type of enumerable nodes."""
def has_child(node, cls):
# type: (nodes.Element, Type) -> bool
@ -960,7 +960,7 @@ class StandardDomain(Domain):
return figtype
def get_figtype(self, node):
# type: (nodes.Node) -> unicode
# type: (nodes.Node) -> str
"""Get figure type of nodes.
.. deprecated:: 1.8
@ -971,7 +971,7 @@ class StandardDomain(Domain):
return self.get_enumerable_node_type(node)
def get_fignumber(self, env, builder, figtype, docname, target_node):
# type: (BuildEnvironment, Builder, unicode, unicode, nodes.Element) -> Tuple[int, ...]
# type: (BuildEnvironment, Builder, str, str, nodes.Element) -> Tuple[int, ...]
if figtype == 'section':
if builder.name == 'latex':
return tuple()
@ -994,7 +994,7 @@ class StandardDomain(Domain):
raise ValueError
def get_full_qualified_name(self, node):
# type: (nodes.Element) -> unicode
# type: (nodes.Element) -> str
if node.get('reftype') == 'option':
progname = node.get('std:program')
command = ws_re.split(node.get('reftarget'))
@ -1010,7 +1010,7 @@ class StandardDomain(Domain):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_domain(StandardDomain)
return {

View File

@ -40,7 +40,6 @@ if False:
from sphinx.config import Config # NOQA
from sphinx.domains import Domain # NOQA
from sphinx.project import Project # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -80,7 +79,7 @@ versioning_conditions = {
'none': False,
'text': is_translatable,
'commentable': is_commentable,
} # type: Dict[unicode, Union[bool, Callable]]
}
class NoUri(Exception):
@ -95,19 +94,19 @@ class BuildEnvironment:
transformations to resolve links to them.
"""
domains = None # type: Dict[unicode, Domain]
domains = None # type: Dict[str, Domain]
# --------- ENVIRONMENT INITIALIZATION -------------------------------------
def __init__(self, app=None):
# type: (Sphinx) -> None
self.app = None # type: Sphinx
self.doctreedir = None # type: unicode
self.srcdir = None # type: unicode
self.doctreedir = None # type: str
self.srcdir = None # type: str
self.config = None # type: Config
self.config_status = None # type: int
self.project = None # type: Project
self.version = None # type: Dict[unicode, unicode]
self.version = None # type: Dict[str, str]
# the method of doctree versioning; see set_versioning_method
self.versioning_condition = None # type: Union[bool, Callable]
@ -123,60 +122,60 @@ class BuildEnvironment:
# All "docnames" here are /-separated and relative and exclude
# the source suffix.
self.all_docs = {} # type: Dict[unicode, float]
self.all_docs = {} # type: Dict[str, float]
# docname -> mtime at the time of reading
# contains all read docnames
self.dependencies = defaultdict(set) # type: Dict[unicode, Set[unicode]]
self.dependencies = defaultdict(set) # type: Dict[str, Set[str]]
# docname -> set of dependent file
# names, relative to documentation root
self.included = defaultdict(set) # type: Dict[unicode, Set[unicode]]
self.included = defaultdict(set) # type: Dict[str, Set[str]]
# docname -> set of included file
# docnames included from other documents
self.reread_always = set() # type: Set[unicode]
self.reread_always = set() # type: Set[str]
# docnames to re-read unconditionally on
# next build
# File metadata
self.metadata = defaultdict(dict) # type: Dict[unicode, Dict[unicode, Any]]
self.metadata = defaultdict(dict) # type: Dict[str, Dict[str, Any]]
# docname -> dict of metadata items
# TOC inventory
self.titles = {} # type: Dict[unicode, nodes.title]
self.titles = {} # type: Dict[str, nodes.title]
# docname -> title node
self.longtitles = {} # type: Dict[unicode, nodes.title]
self.longtitles = {} # type: Dict[str, nodes.title]
# docname -> title node; only different if
# set differently with title directive
self.tocs = {} # type: Dict[unicode, nodes.bullet_list]
self.tocs = {} # type: Dict[str, nodes.bullet_list]
# docname -> table of contents nodetree
self.toc_num_entries = {} # type: Dict[unicode, int]
self.toc_num_entries = {} # type: Dict[str, int]
# docname -> number of real entries
# used to determine when to show the TOC
# in a sidebar (don't show if it's only one item)
self.toc_secnumbers = {} # type: Dict[unicode, Dict[unicode, Tuple[int, ...]]]
self.toc_secnumbers = {} # type: Dict[str, Dict[str, Tuple[int, ...]]]
# docname -> dict of sectionid -> number
self.toc_fignumbers = {} # type: Dict[unicode, Dict[unicode, Dict[unicode, Tuple[int, ...]]]] # NOQA
self.toc_fignumbers = {} # type: Dict[str, Dict[str, Dict[str, Tuple[int, ...]]]]
# docname -> dict of figtype ->
# dict of figureid -> number
self.toctree_includes = {} # type: Dict[unicode, List[unicode]]
self.toctree_includes = {} # type: Dict[str, List[str]]
# docname -> list of toctree includefiles
self.files_to_rebuild = {} # type: Dict[unicode, Set[unicode]]
self.files_to_rebuild = {} # type: Dict[str, Set[str]]
# docname -> set of files
# (containing its TOCs) to rebuild too
self.glob_toctrees = set() # type: Set[unicode]
self.glob_toctrees = set() # type: Set[str]
# docnames that have :glob: toctrees
self.numbered_toctrees = set() # type: Set[unicode]
self.numbered_toctrees = set() # type: Set[str]
# docnames that have :numbered: toctrees
# domain-specific inventories, here to be pickled
self.domaindata = {} # type: Dict[unicode, Dict]
self.domaindata = {} # type: Dict[str, Dict]
# domainname -> domain-specific dict
# Other inventories
self.indexentries = {} # type: Dict[unicode, List[Tuple[unicode, unicode, unicode, unicode, unicode]]] # NOQA
self.indexentries = {} # type: Dict[str, List[Tuple[str, str, str, str, str]]]
# docname -> list of
# (type, unicode, target, aliasname)
# (type, str, target, aliasname)
# these map absolute path -> (docnames, unique filename)
self.images = FilenameUniqDict() # type: FilenameUniqDict
@ -184,14 +183,14 @@ class BuildEnvironment:
# filename -> (set of docnames, destination)
# the original URI for images
self.original_image_uri = {} # type: Dict[unicode, unicode]
self.original_image_uri = {} # type: Dict[str, str]
# temporary data storage while reading a document
self.temp_data = {} # type: Dict[unicode, Any]
self.temp_data = {} # type: Dict[str, Any]
# context for cross-references (e.g. current module or class)
# this is similar to temp_data, but will for example be copied to
# attributes of "any" cross references
self.ref_context = {} # type: Dict[unicode, Any]
self.ref_context = {} # type: Dict[str, Any]
# set up environment
if app:
@ -268,7 +267,7 @@ class BuildEnvironment:
self.settings.setdefault('smart_quotes', True)
def set_versioning_method(self, method, compare):
# type: (unicode, bool) -> None
# type: (str, bool) -> None
"""This sets the doctree versioning method for this environment.
Versioning methods are a builder property; only builders with the same
@ -287,7 +286,7 @@ class BuildEnvironment:
self.versioning_compare = compare
def clear_doc(self, docname):
# type: (unicode) -> None
# type: (str) -> None
"""Remove all traces of a source file in the inventory."""
if docname in self.all_docs:
self.all_docs.pop(docname, None)
@ -298,7 +297,7 @@ class BuildEnvironment:
domain.clear_doc(docname)
def merge_info_from(self, docnames, other, app):
# type: (List[unicode], BuildEnvironment, Sphinx) -> None
# type: (List[str], BuildEnvironment, Sphinx) -> None
"""Merge global information gathered about *docnames* while reading them
from the *other* environment.
@ -320,7 +319,7 @@ class BuildEnvironment:
app.emit('env-merge-info', self, docnames, other)
def path2doc(self, filename):
# type: (unicode) -> Optional[unicode]
# type: (str) -> Optional[str]
"""Return the docname for the filename if the file is document.
*filename* should be absolute or relative to the source directory.
@ -328,7 +327,7 @@ class BuildEnvironment:
return self.project.path2doc(filename)
def doc2path(self, docname, base=True, suffix=None):
# type: (unicode, Union[bool, unicode], unicode) -> unicode
# type: (str, Union[bool, str], str) -> str
"""Return the filename for the document name.
If *base* is True, return absolute path under self.srcdir.
@ -352,7 +351,7 @@ class BuildEnvironment:
return pathname
def relfn2path(self, filename, docname=None):
# type: (unicode, unicode) -> Tuple[unicode, unicode]
# type: (str, str) -> Tuple[str, str]
"""Return paths to a file referenced from a document, relative to
documentation root and absolute.
@ -378,7 +377,7 @@ class BuildEnvironment:
@property
def found_docs(self):
# type: () -> Set[unicode]
# type: () -> Set[str]
"""contains all existing docnames."""
return self.project.docnames
@ -414,13 +413,13 @@ class BuildEnvironment:
raise DocumentError(__('Failed to scan documents in %s: %r') % (self.srcdir, exc))
def get_outdated_files(self, config_changed):
# type: (bool) -> Tuple[Set[unicode], Set[unicode], Set[unicode]]
# type: (bool) -> Tuple[Set[str], Set[str], Set[str]]
"""Return (added, changed, removed) sets."""
# clear all files no longer present
removed = set(self.all_docs) - self.found_docs
added = set() # type: Set[unicode]
changed = set() # type: Set[unicode]
added = set() # type: Set[str]
changed = set() # type: Set[str]
if config_changed:
# config values affect e.g. substitutions
@ -465,8 +464,8 @@ class BuildEnvironment:
return added, changed, removed
def check_dependents(self, app, already):
# type: (Sphinx, Set[unicode]) -> Iterator[unicode]
to_rewrite = [] # type: List[unicode]
# type: (Sphinx, Set[str]) -> Iterator[str]
to_rewrite = [] # type: List[str]
for docnames in app.emit('env-get-updated', self):
to_rewrite.extend(docnames)
for docname in set(to_rewrite):
@ -476,7 +475,7 @@ class BuildEnvironment:
# --------- SINGLE FILE READING --------------------------------------------
def prepare_settings(self, docname):
# type: (unicode) -> None
# type: (str) -> None
"""Prepare to set up environment for reading."""
self.temp_data['docname'] = docname
# defaults to the global default, but can be re-set in a document
@ -488,12 +487,12 @@ class BuildEnvironment:
@property
def docname(self):
# type: () -> unicode
# type: () -> str
"""Returns the docname of the document currently being parsed."""
return self.temp_data['docname']
def new_serialno(self, category=''):
# type: (unicode) -> int
# type: (str) -> int
"""Return a serial number, e.g. for index entry targets.
The number is guaranteed to be unique in the current document.
@ -504,7 +503,7 @@ class BuildEnvironment:
return cur
def note_dependency(self, filename):
# type: (unicode) -> None
# type: (str) -> None
"""Add *filename* as a dependency of the current document.
This means that the document will be rebuilt if this file changes.
@ -514,7 +513,7 @@ class BuildEnvironment:
self.dependencies[self.docname].add(filename)
def note_included(self, filename):
# type: (unicode) -> None
# type: (str) -> None
"""Add *filename* as a included from other document.
This means the document is not orphaned.
@ -531,7 +530,7 @@ class BuildEnvironment:
self.reread_always.add(self.docname)
def get_domain(self, domainname):
# type: (unicode) -> Domain
# type: (str) -> Domain
"""Return the domain instance with the specified name.
Raises an ExtensionError if the domain is not registered.
@ -544,7 +543,7 @@ class BuildEnvironment:
# --------- RESOLVING REFERENCES AND TOCTREES ------------------------------
def get_doctree(self, docname):
# type: (unicode) -> nodes.document
# type: (str) -> nodes.document
"""Read the doctree for a file from the pickle and return it."""
filename = path.join(self.doctreedir, docname + '.doctree')
with open(filename, 'rb') as f:
@ -555,7 +554,7 @@ class BuildEnvironment:
def get_and_resolve_doctree(self, docname, builder, doctree=None,
prune_toctrees=True, includehidden=False):
# type: (unicode, Builder, nodes.document, bool, bool) -> nodes.document
# type: (str, Builder, nodes.document, bool, bool) -> nodes.document
"""Read the doctree from the pickle, resolve cross-references and
toctrees and return it.
"""
@ -579,7 +578,7 @@ class BuildEnvironment:
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
# type: (unicode, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Node
# type: (str, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Node
"""Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
@ -596,11 +595,11 @@ class BuildEnvironment:
includehidden)
def resolve_references(self, doctree, fromdocname, builder):
# type: (nodes.document, unicode, Builder) -> None
# type: (nodes.document, str, Builder) -> None
self.apply_post_transforms(doctree, fromdocname)
def apply_post_transforms(self, doctree, docname):
# type: (nodes.document, unicode) -> None
# type: (nodes.document, str) -> None
"""Apply all post-transforms."""
try:
# set env.docname during applying post-transforms
@ -618,11 +617,11 @@ class BuildEnvironment:
self.app.emit('doctree-resolved', doctree, docname)
def collect_relations(self):
# type: () -> Dict[unicode, List[unicode]]
# type: () -> Dict[str, List[str]]
traversed = set()
def traverse_toctree(parent, docname):
# type: (unicode, unicode) -> Iterator[Tuple[unicode, unicode]]
# type: (str, str) -> Iterator[Tuple[str, str]]
if parent == docname:
logger.warning(__('self referenced toctree found. Ignored.'), location=docname)
return
@ -676,31 +675,31 @@ class BuildEnvironment:
# --------- METHODS FOR COMPATIBILITY --------------------------------------
def update(self, config, srcdir, doctreedir):
# type: (Config, unicode, unicode) -> List[unicode]
# type: (Config, str, str) -> List[str]
warnings.warn('env.update() is deprecated. Please use builder.read() instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.app.builder.read()
def _read_serial(self, docnames, app):
# type: (List[unicode], Sphinx) -> None
# type: (List[str], Sphinx) -> None
warnings.warn('env._read_serial() is deprecated. Please use builder.read() instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.app.builder._read_serial(docnames)
def _read_parallel(self, docnames, app, nproc):
# type: (List[unicode], Sphinx, int) -> None
# type: (List[str], Sphinx, int) -> None
warnings.warn('env._read_parallel() is deprecated. Please use builder.read() instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.app.builder._read_parallel(docnames, nproc)
def read_doc(self, docname, app=None):
# type: (unicode, Sphinx) -> None
# type: (str, Sphinx) -> None
warnings.warn('env.read_doc() is deprecated. Please use builder.read_doc() instead.',
RemovedInSphinx30Warning, stacklevel=2)
self.app.builder.read_doc(docname)
def write_doctree(self, docname, doctree):
# type: (unicode, nodes.document) -> None
# type: (str, nodes.document) -> None
warnings.warn('env.write_doctree() is deprecated. '
'Please use builder.write_doctree() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@ -708,7 +707,7 @@ class BuildEnvironment:
@property
def _nitpick_ignore(self):
# type: () -> List[unicode]
# type: () -> List[str]
warnings.warn('env._nitpick_ignore is deprecated. '
'Please use config.nitpick_ignore instead.',
RemovedInSphinx30Warning, stacklevel=2)
@ -742,7 +741,7 @@ class BuildEnvironment:
@classmethod
def frompickle(cls, filename, app):
# type: (unicode, Sphinx) -> BuildEnvironment
# type: (str, Sphinx) -> BuildEnvironment
warnings.warn('BuildEnvironment.frompickle() is deprecated. '
'Please use pickle.load() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@ -768,7 +767,7 @@ class BuildEnvironment:
return io.getvalue()
def topickle(self, filename):
# type: (unicode) -> None
# type: (str) -> None
warnings.warn('env.topickle() is deprecated. '
'Please use pickle.dump() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@ -777,14 +776,14 @@ class BuildEnvironment:
@property
def versionchanges(self):
# type: () -> Dict[unicode, List[Tuple[unicode, unicode, int, unicode, unicode, unicode]]] # NOQA
# type: () -> Dict[str, List[Tuple[str, str, int, str, str, str]]]
warnings.warn('env.versionchanges() is deprecated. '
'Please use ChangeSetDomain instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.domaindata['changeset']['changes']
def note_versionchange(self, type, version, node, lineno):
# type: (unicode, unicode, addnodes.versionmodified, int) -> None
# type: (str, str, addnodes.versionmodified, int) -> None
warnings.warn('env.note_versionchange() is deprecated. '
'Please use ChangeSetDomain.note_changeset() instead.',
RemovedInSphinx30Warning, stacklevel=2)

View File

@ -12,7 +12,6 @@
if False:
# For type annotation
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
class ImageAdapter:
@ -21,7 +20,7 @@ class ImageAdapter:
self.env = env
def get_original_image_uri(self, name):
# type: (unicode) -> unicode
# type: (str) -> str
"""Get the original image URI."""
while name in self.env.original_image_uri:
name = self.env.original_image_uri[name]

View File

@ -23,7 +23,6 @@ if False:
from typing import Any, Dict, Pattern, List, Tuple # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -35,14 +34,14 @@ class IndexEntries:
def create_index(self, builder, group_entries=True,
_fixre=re.compile(r'(.*) ([(][^()]*[)])')):
# type: (Builder, bool, Pattern) -> List[Tuple[unicode, List[Tuple[unicode, Any]]]] # NOQA
# type: (Builder, bool, Pattern) -> List[Tuple[str, List[Tuple[str, Any]]]]
"""Create the real index from the collected index entries."""
from sphinx.environment import NoUri
new = {} # type: Dict[unicode, List]
new = {} # type: Dict[str, List]
def add_entry(word, subword, main, link=True, dic=new, key=None):
# type: (unicode, unicode, unicode, bool, Dict, unicode) -> None
# type: (str, str, str, bool, Dict, str) -> None
# Force the word to be unicode if it's a ASCII bytestring.
# This will solve problems with unicode normalization later.
# For instance the RFC role will add bytestrings at the moment
@ -97,7 +96,7 @@ class IndexEntries:
# sort the index entries; put all symbols at the front, even those
# following the letters in ASCII, this is where the chr(127) comes from
def keyfunc(entry):
# type: (Tuple[unicode, List]) -> Tuple[unicode, unicode]
# type: (Tuple[str, List]) -> Tuple[str, str]
key, (void, void, category_key) = entry
if category_key:
# using specified category key to sort
@ -120,8 +119,8 @@ class IndexEntries:
# func()
# (in module foo)
# (in module bar)
oldkey = '' # type: unicode
oldsubitems = None # type: Dict[unicode, List]
oldkey = ''
oldsubitems = None # type: Dict[str, List]
i = 0
while i < len(newlist):
key, (targets, subitems, _key) = newlist[i]
@ -144,7 +143,7 @@ class IndexEntries:
# group the entries by letter
def keyfunc2(item):
# type: (Tuple[unicode, List]) -> unicode
# type: (Tuple[str, List]) -> str
# hack: mutating the subitems dicts to a list in the keyfunc
k, v = item
v[1] = sorted((si, se) for (si, (se, void, void)) in v[1].items())

View File

@ -24,7 +24,6 @@ if False:
from typing import Any, Dict, List # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -35,7 +34,7 @@ class TocTree:
self.env = env
def note(self, docname, toctreenode):
# type: (unicode, addnodes.toctree) -> None
# type: (str, addnodes.toctree) -> None
"""Note a TOC tree directive in a document and gather information about
file relations from it.
"""
@ -52,7 +51,7 @@ class TocTree:
def resolve(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
# type: (unicode, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Element # NOQA
# type: (str, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Element
"""Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
@ -120,7 +119,7 @@ class TocTree:
subnode = subnode.parent
def _entries_from_toctree(toctreenode, parents, separate=False, subtree=False):
# type: (addnodes.toctree, List[unicode], bool, bool) -> List[nodes.Element]
# type: (addnodes.toctree, List[str], bool, bool) -> List[nodes.Element]
"""Return TOC entries for a toctree node."""
refs = [(e[0], e[1]) for e in toctreenode['entries']]
entries = [] # type: List[nodes.Element]
@ -268,12 +267,12 @@ class TocTree:
return newnode
def get_toctree_ancestors(self, docname):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
parent = {}
for p, children in self.env.toctree_includes.items():
for child in children:
parent[child] = p
ancestors = [] # type: List[unicode]
ancestors = [] # type: List[str]
d = docname
while d in parent and d not in ancestors:
ancestors.append(d)
@ -303,7 +302,7 @@ class TocTree:
self._toctree_prune(subnode, depth + 1, maxdepth, collapse)
def get_toc_for(self, docname, builder):
# type: (unicode, Builder) -> nodes.Node
# type: (str, Builder) -> nodes.Node
"""Return a TOC nodetree -- for use on the same page only!"""
tocdepth = self.env.metadata[docname].get('tocdepth', 0)
try:
@ -319,7 +318,7 @@ class TocTree:
return toc
def get_toctree_for(self, docname, builder, collapse, **kwds):
# type: (unicode, Builder, bool, Any) -> nodes.Element
# type: (str, Builder, bool, Any) -> nodes.Element
"""Return the global TOC nodetree."""
doctree = self.env.get_doctree(self.env.config.master_doc)
toctrees = [] # type: List[nodes.Element]

View File

@ -15,7 +15,6 @@ if False:
from docutils import nodes # NOQA
from sphinx.sphinx import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
class EnvironmentCollector:
@ -27,7 +26,7 @@ class EnvironmentCollector:
entries and toctrees, etc.
"""
listener_ids = None # type: Dict[unicode, int]
listener_ids = None # type: Dict[str, int]
def enable(self, app):
# type: (Sphinx) -> None
@ -48,14 +47,14 @@ class EnvironmentCollector:
self.listener_ids = None
def clear_doc(self, app, env, docname):
# type: (Sphinx, BuildEnvironment, unicode) -> None
# type: (Sphinx, BuildEnvironment, str) -> None
"""Remove specified data of a document.
This method is called on the removal of the document."""
raise NotImplementedError
def merge_other(self, app, env, docnames, other):
# type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
# type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
"""Merge in specified data regarding docnames from a different `BuildEnvironment`
object which coming from a subprocess in parallel builds."""
raise NotImplementedError
@ -68,7 +67,7 @@ class EnvironmentCollector:
raise NotImplementedError
def get_updated_docs(self, app, env):
# type: (Sphinx, BuildEnvironment) -> List[unicode]
# type: (Sphinx, BuildEnvironment) -> List[str]
"""Return a list of docnames to re-read.
This methods is called after reading the whole of documents (experimental).
@ -76,7 +75,7 @@ class EnvironmentCollector:
return []
def get_outdated_docs(self, app, env, added, changed, removed):
# type: (Sphinx, BuildEnvironment, unicode, Set[unicode], Set[unicode], Set[unicode]) -> List[unicode] # NOQA
# type: (Sphinx, BuildEnvironment, str, Set[str], Set[str], Set[str]) -> List[str]
"""Return a list of docnames to re-read.
This methods is called before reading the documents.

View File

@ -29,7 +29,6 @@ if False:
from docutils import nodes # NOQA
from sphinx.sphinx import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -38,11 +37,11 @@ class ImageCollector(EnvironmentCollector):
"""Image files collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
# type: (Sphinx, BuildEnvironment, unicode) -> None
# type: (Sphinx, BuildEnvironment, str) -> None
env.images.purge_doc(docname)
def merge_other(self, app, env, docnames, other):
# type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
# type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
env.images.merge_other(docnames, other.images)
def process_doc(self, app, doctree):
@ -55,7 +54,7 @@ class ImageCollector(EnvironmentCollector):
# choose the best image from these candidates. The special key * is
# set if there is only single candidate to be used by a writer.
# The special key ? is set for nonlocal URIs.
candidates = {} # type: Dict[unicode, unicode]
candidates = {} # type: Dict[str, str]
node['candidates'] = candidates
imguri = node['uri']
if imguri.startswith('data:'):
@ -96,8 +95,8 @@ class ImageCollector(EnvironmentCollector):
app.env.images.add_file(docname, imgpath)
def collect_candidates(self, env, imgpath, candidates, node):
# type: (BuildEnvironment, unicode, Dict[unicode, unicode], nodes.Node) -> None
globbed = {} # type: Dict[unicode, List[unicode]]
# type: (BuildEnvironment, str, Dict[str, str], nodes.Node) -> None
globbed = {} # type: Dict[str, List[str]]
for filename in glob(imgpath):
new_imgpath = relative_path(path.join(env.srcdir, 'dummy'),
filename)
@ -116,11 +115,11 @@ class DownloadFileCollector(EnvironmentCollector):
"""Download files collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
# type: (Sphinx, BuildEnvironment, unicode) -> None
# type: (Sphinx, BuildEnvironment, str) -> None
env.dlfiles.purge_doc(docname)
def merge_other(self, app, env, docnames, other):
# type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
# type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
env.dlfiles.merge_other(docnames, other.dlfiles)
def process_doc(self, app, doctree):

View File

@ -23,18 +23,17 @@ if False:
from docutils import nodes # NOQA
from sphinx.sphinx import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
class DependenciesCollector(EnvironmentCollector):
"""dependencies collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
# type: (Sphinx, BuildEnvironment, unicode) -> None
# type: (Sphinx, BuildEnvironment, str) -> None
env.dependencies.pop(docname, None)
def merge_other(self, app, env, docnames, other):
# type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
# type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
if docname in other.dependencies:
env.dependencies[docname] = other.dependencies[docname]

View File

@ -21,7 +21,6 @@ if False:
from docutils import nodes # NOQA
from sphinx.applicatin import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -30,11 +29,11 @@ class IndexEntriesCollector(EnvironmentCollector):
name = 'indices'
def clear_doc(self, app, env, docname):
# type: (Sphinx, BuildEnvironment, unicode) -> None
# type: (Sphinx, BuildEnvironment, str) -> None
env.indexentries.pop(docname, None)
def merge_other(self, app, env, docnames, other):
# type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
# type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
env.indexentries[docname] = other.indexentries[docname]

View File

@ -21,18 +21,17 @@ if False:
from docutils import nodes # NOQA
from sphinx.sphinx import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
class MetadataCollector(EnvironmentCollector):
"""metadata collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
# type: (Sphinx, BuildEnvironment, unicode) -> None
# type: (Sphinx, BuildEnvironment, str) -> None
env.metadata.pop(docname, None)
def merge_other(self, app, env, docnames, other):
# type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
# type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
env.metadata[docname] = other.metadata[docname]

View File

@ -20,19 +20,18 @@ if False:
from docutils import nodes # NOQA
from sphinx.sphinx import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
class TitleCollector(EnvironmentCollector):
"""title collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
# type: (Sphinx, BuildEnvironment, unicode) -> None
# type: (Sphinx, BuildEnvironment, str) -> None
env.titles.pop(docname, None)
env.longtitles.pop(docname, None)
def merge_other(self, app, env, docnames, other):
# type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
# type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
env.titles[docname] = other.titles[docname]
env.longtitles[docname] = other.longtitles[docname]

View File

@ -26,7 +26,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
N = TypeVar('N')
@ -35,7 +34,7 @@ logger = logging.getLogger(__name__)
class TocTreeCollector(EnvironmentCollector):
def clear_doc(self, app, env, docname):
# type: (Sphinx, BuildEnvironment, unicode) -> None
# type: (Sphinx, BuildEnvironment, str) -> None
env.tocs.pop(docname, None)
env.toc_secnumbers.pop(docname, None)
env.toc_fignumbers.pop(docname, None)
@ -50,7 +49,7 @@ class TocTreeCollector(EnvironmentCollector):
del env.files_to_rebuild[subfn]
def merge_other(self, app, env, docnames, other):
# type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
# type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
env.tocs[docname] = other.tocs[docname]
env.toc_num_entries[docname] = other.toc_num_entries[docname]
@ -139,16 +138,16 @@ class TocTreeCollector(EnvironmentCollector):
app.env.toc_num_entries[docname] = numentries[0]
def get_updated_docs(self, app, env):
# type: (Sphinx, BuildEnvironment) -> List[unicode]
# type: (Sphinx, BuildEnvironment) -> List[str]
return self.assign_section_numbers(env) + self.assign_figure_numbers(env)
def assign_section_numbers(self, env):
# type: (BuildEnvironment) -> List[unicode]
# type: (BuildEnvironment) -> List[str]
"""Assign a section number to each heading under a numbered toctree."""
# a list of all docnames whose section numbers changed
rewrite_needed = []
assigned = set() # type: Set[unicode]
assigned = set() # type: Set[str]
old_secnumbers = env.toc_secnumbers
env.toc_secnumbers = {}
@ -200,7 +199,7 @@ class TocTreeCollector(EnvironmentCollector):
'(nested numbered toctree?)'), ref,
location=toctreenode, type='toc', subtype='secnum')
elif ref in env.tocs:
secnums = {} # type: Dict[unicode, Tuple[int, ...]]
secnums = {} # type: Dict[str, Tuple[int, ...]]
env.toc_secnumbers[ref] = secnums
assigned.add(ref)
_walk_toc(env.tocs[ref], secnums, depth, env.titles.get(ref))
@ -220,18 +219,18 @@ class TocTreeCollector(EnvironmentCollector):
return rewrite_needed
def assign_figure_numbers(self, env):
# type: (BuildEnvironment) -> List[unicode]
# type: (BuildEnvironment) -> List[str]
"""Assign a figure number to each figure under a numbered toctree."""
rewrite_needed = []
assigned = set() # type: Set[unicode]
assigned = set() # type: Set[str]
old_fignumbers = env.toc_fignumbers
env.toc_fignumbers = {}
fignum_counter = {} # type: Dict[unicode, Dict[Tuple[int, ...], int]]
fignum_counter = {} # type: Dict[str, Dict[Tuple[int, ...], int]]
def get_figtype(node):
# type: (nodes.Node) -> unicode
# type: (nodes.Node) -> str
for domain in env.domains.values():
figtype = domain.get_enumerable_node_type(node)
if figtype:
@ -240,7 +239,7 @@ class TocTreeCollector(EnvironmentCollector):
return None
def get_section_number(docname, section):
# type: (unicode, nodes.section) -> Tuple[int, ...]
# type: (str, nodes.section) -> Tuple[int, ...]
anchorname = '#' + section['ids'][0]
secnumbers = env.toc_secnumbers.get(docname, {})
if anchorname in secnumbers:
@ -251,7 +250,7 @@ class TocTreeCollector(EnvironmentCollector):
return secnum or tuple()
def get_next_fignumber(figtype, secnum):
# type: (unicode, Tuple[int, ...]) -> Tuple[int, ...]
# type: (str, Tuple[int, ...]) -> Tuple[int, ...]
counter = fignum_counter.setdefault(figtype, {})
secnum = secnum[:env.config.numfig_secnum_depth]
@ -259,7 +258,7 @@ class TocTreeCollector(EnvironmentCollector):
return secnum + (counter[secnum],)
def register_fignumber(docname, secnum, figtype, fignode):
# type: (unicode, Tuple[int, ...], unicode, nodes.Element) -> None
# type: (str, Tuple[int, ...], str, nodes.Element) -> None
env.toc_fignumbers.setdefault(docname, {})
fignumbers = env.toc_fignumbers[docname].setdefault(figtype, {})
figure_id = fignode['ids'][0]
@ -267,7 +266,7 @@ class TocTreeCollector(EnvironmentCollector):
fignumbers[figure_id] = get_next_fignumber(figtype, secnum)
def _walk_doctree(docname, doctree, secnum):
# type: (unicode, nodes.Element, Tuple[int, ...]) -> None
# type: (str, nodes.Element, Tuple[int, ...]) -> None
for subnode in doctree.children:
if isinstance(subnode, nodes.section):
next_secnum = get_section_number(docname, subnode)
@ -290,7 +289,7 @@ class TocTreeCollector(EnvironmentCollector):
_walk_doctree(docname, subnode, secnum)
def _walk_doc(docname, secnum):
# type: (unicode, Tuple[int, ...]) -> None
# type: (str, Tuple[int, ...]) -> None
if docname not in assigned:
assigned.add(docname)
doctree = env.get_doctree(docname)

View File

@ -13,7 +13,6 @@
if False:
# For type annotation
from typing import Any # NOQA
from sphinx.util.typing import unicode # NOQA
class SphinxError(Exception):
@ -54,7 +53,7 @@ class ExtensionError(SphinxError):
category = 'Extension error'
def __init__(self, message, orig_exc=None):
# type: (unicode, Exception) -> None
# type: (str, Exception) -> None
super(ExtensionError, self).__init__(message)
self.message = message
self.orig_exc = orig_exc

View File

@ -10,7 +10,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
from collections import OrderedDict, defaultdict
@ -20,7 +19,6 @@ from sphinx.locale import __
if False:
# For type annotation
from typing import Any, Callable, Dict, List # NOQA
from sphinx.util.typing import unicode # NOQA
# List of all known core events. Maps name to arguments description.
@ -41,24 +39,24 @@ core_events = {
'html-collect-pages': 'builder',
'html-page-context': 'pagename, context, doctree or None',
'build-finished': 'exception',
} # type: Dict[unicode, unicode]
}
class EventManager:
def __init__(self):
# type: () -> None
self.events = core_events.copy()
self.listeners = defaultdict(OrderedDict) # type: Dict[unicode, Dict[int, Callable]]
self.listeners = defaultdict(OrderedDict) # type: Dict[str, Dict[int, Callable]]
self.next_listener_id = 0
def add(self, name):
# type: (unicode) -> None
# type: (str) -> None
if name in self.events:
raise ExtensionError(__('Event %r already present') % name)
self.events[name] = ''
def connect(self, name, callback):
# type: (unicode, Callable) -> int
# type: (str, Callable) -> int
if name not in self.events:
raise ExtensionError(__('Unknown event name: %s') % name)
@ -73,14 +71,14 @@ class EventManager:
event.pop(listener_id, None)
def emit(self, name, *args):
# type: (unicode, Any) -> List
# type: (str, Any) -> List
results = []
for callback in self.listeners[name].values():
results.append(callback(*args))
return results
def emit_firstresult(self, name, *args):
# type: (unicode, Any) -> Any
# type: (str, Any) -> Any
for result in self.emit(name, *args):
if result is not None:
return result

View File

@ -15,8 +15,6 @@
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import argparse
import glob
import locale
@ -35,7 +33,6 @@ from sphinx.util.osutil import FileAvoidWrite, ensuredir
if False:
# For type annotation
from typing import Any, List, Tuple # NOQA
from sphinx.util.typing import unicode # NOQA
# automodule options
if 'SPHINX_APIDOC_OPTIONS' in os.environ:
@ -53,7 +50,7 @@ PY_SUFFIXES = set(['.py', '.pyx'])
def makename(package, module):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
"""Join package and module with a dot."""
# Both package and module can be None/empty.
if package:
@ -66,7 +63,7 @@ def makename(package, module):
def write_file(name, text, opts):
# type: (unicode, unicode, Any) -> None
# type: (str, str, Any) -> None
"""Write the output file for module/package <name>."""
fname = path.join(opts.destdir, '%s.%s' % (name, opts.suffix))
if opts.dryrun:
@ -81,7 +78,7 @@ def write_file(name, text, opts):
def format_heading(level, text, escape=True):
# type: (int, unicode, bool) -> unicode
# type: (int, str, bool) -> str
"""Create a heading of <level> [1, 2 or 3 supported]."""
if escape:
text = rst.escape(text)
@ -90,7 +87,7 @@ def format_heading(level, text, escape=True):
def format_directive(module, package=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
"""Create the automodule directive and add the options."""
directive = '.. automodule:: %s\n' % makename(package, module)
for option in OPTIONS:
@ -99,7 +96,7 @@ def format_directive(module, package=None):
def create_module_file(package, module, opts):
# type: (unicode, unicode, Any) -> None
# type: (str, str, Any) -> None
"""Build the text of the file and write the file."""
if not opts.noheadings:
text = format_heading(1, '%s module' % module)
@ -111,7 +108,7 @@ def create_module_file(package, module, opts):
def create_package_file(root, master_package, subroot, py_files, opts, subs, is_namespace, excludes=[]): # NOQA
# type: (unicode, unicode, unicode, List[unicode], Any, List[unicode], bool, List[unicode]) -> None # NOQA
# type: (str, str, str, List[str], Any, List[str], bool, List[str]) -> None
"""Build the text of the file and write the file."""
text = format_heading(1, ('%s package' if not is_namespace else "%s namespace")
% makename(master_package, subroot))
@ -171,14 +168,14 @@ def create_package_file(root, master_package, subroot, py_files, opts, subs, is_
def create_modules_toc_file(modules, opts, name='modules'):
# type: (List[unicode], Any, unicode) -> None
# type: (List[str], Any, str) -> None
"""Create the module's index."""
text = format_heading(1, '%s' % opts.header, escape=False)
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
prev_module = '' # type: unicode
prev_module = ''
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
@ -190,7 +187,7 @@ def create_modules_toc_file(modules, opts, name='modules'):
def shall_skip(module, opts, excludes=[]):
# type: (unicode, Any, List[unicode]) -> bool
# type: (str, Any, List[str]) -> bool
"""Check if we want to skip this module."""
# skip if the file doesn't exist and not using implicit namespaces
if not opts.implicit_namespaces and not path.exists(module):
@ -217,7 +214,7 @@ def shall_skip(module, opts, excludes=[]):
def recurse_tree(rootpath, excludes, opts):
# type: (unicode, List[unicode], Any) -> List[unicode]
# type: (str, List[str], Any) -> List[str]
"""
Look for every file in the directory tree and create the corresponding
ReST files.
@ -252,7 +249,7 @@ def recurse_tree(rootpath, excludes, opts):
# remove hidden ('.') and private ('_') directories, as well as
# excluded dirs
if includeprivate:
exclude_prefixes = ('.',) # type: Tuple[unicode, ...]
exclude_prefixes = ('.',) # type: Tuple[str, ...]
else:
exclude_prefixes = ('.', '_')
subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and
@ -282,7 +279,7 @@ def recurse_tree(rootpath, excludes, opts):
def is_excluded(root, excludes):
# type: (unicode, List[unicode]) -> bool
# type: (str, List[str]) -> bool
"""Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
@ -411,7 +408,7 @@ def main(argv=sys.argv[1:]):
if args.full:
from sphinx.cmd import quickstart as qs
modules.sort()
prev_module = '' # type: unicode
prev_module = ''
text = ''
for module in modules:
if module.startswith(prev_module + '.'):

View File

@ -43,7 +43,6 @@ if False:
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc.directive import DocumenterBridge # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -74,7 +73,7 @@ INSTANCEATTR = object()
def members_option(arg):
# type: (Any) -> Union[object, List[unicode]]
# type: (Any) -> Union[object, List[str]]
"""Used to convert the :members: option to auto directives."""
if arg is None:
return ALL
@ -82,7 +81,7 @@ def members_option(arg):
def members_set_option(arg):
# type: (Any) -> Union[object, Set[unicode]]
# type: (Any) -> Union[object, Set[str]]
"""Used to convert the :members: option to auto directives."""
if arg is None:
return ALL
@ -126,7 +125,7 @@ def merge_special_members_option(options):
# Some useful event listener factories for autodoc-process-docstring.
def cut_lines(pre, post=0, what=None):
# type: (int, int, unicode) -> Callable
# type: (int, int, str) -> Callable
"""Return a listener that removes the first *pre* and last *post*
lines of every docstring. If *what* is a sequence of strings,
only docstrings of a type in *what* will be processed.
@ -139,7 +138,7 @@ def cut_lines(pre, post=0, what=None):
This can (and should) be used in place of :confval:`automodule_skip_lines`.
"""
def process(app, what_, name, obj, options, lines):
# type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
# type: (Sphinx, str, str, Any, Any, List[str]) -> None
if what and what_ not in what:
return
del lines[:pre]
@ -155,7 +154,7 @@ def cut_lines(pre, post=0, what=None):
def between(marker, what=None, keepempty=False, exclude=False):
# type: (unicode, Sequence[unicode], bool, bool) -> Callable
# type: (str, Sequence[str], bool, bool) -> Callable
"""Return a listener that either keeps, or if *exclude* is True excludes,
lines between lines that match the *marker* regular expression. If no line
matches, the resulting docstring would be empty, so no change will be made
@ -167,7 +166,7 @@ def between(marker, what=None, keepempty=False, exclude=False):
marker_re = re.compile(marker)
def process(app, what_, name, obj, options, lines):
# type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
# type: (Sphinx, str, str, Any, Any, List[str]) -> None
if what and what_ not in what:
return
deleted = 0
@ -195,7 +194,7 @@ def between(marker, what=None, keepempty=False, exclude=False):
class Options(dict):
"""A dict/attribute hybrid that returns None on nonexisting keys."""
def __getattr__(self, name):
# type: (unicode) -> Any
# type: (str) -> Any
try:
return self[name.replace('_', '-')]
except KeyError:
@ -229,21 +228,21 @@ class Documenter:
#: true if the generated content may contain titles
titles_allowed = False
option_spec = {'noindex': bool_option} # type: Dict[unicode, Callable]
option_spec = {'noindex': bool_option} # type: Dict[str, Callable]
def get_attr(self, obj, name, *defargs):
# type: (Any, unicode, Any) -> Any
# type: (Any, str, Any) -> Any
"""getattr() override for types such as Zope interfaces."""
return autodoc_attrgetter(self.env.app, obj, name, *defargs)
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
# type: (Any, unicode, bool, Any) -> bool
# type: (Any, str, bool, Any) -> bool
"""Called to see if a member can be documented by this documenter."""
raise NotImplementedError('must be implemented in subclasses')
def __init__(self, directive, name, indent=u''):
# type: (DocumenterBridge, unicode, unicode) -> None
# type: (DocumenterBridge, str, str) -> None
self.directive = directive
self.env = directive.env # type: BuildEnvironment
self.options = directive.genopt
@ -253,15 +252,15 @@ class Documenter:
# qualified name (all set after resolve_name succeeds)
self.modname = None # type: str
self.module = None # type: ModuleType
self.objpath = None # type: List[unicode]
self.fullname = None # type: unicode
self.objpath = None # type: List[str]
self.fullname = None # type: str
# extra signature items (arguments and return annotation,
# also set after resolve_name succeeds)
self.args = None # type: unicode
self.retann = None # type: unicode
self.args = None # type: str
self.retann = None # type: str
# the object to document (set after import_object succeeds)
self.object = None # type: Any
self.object_name = None # type: unicode
self.object_name = None # type: str
# the parent/owner of the object to document
self.parent = None # type: Any
# the module analyzer to get at attribute docs, or None
@ -269,17 +268,17 @@ class Documenter:
@property
def documenters(self):
# type: () -> Dict[unicode, Type[Documenter]]
# type: () -> Dict[str, Type[Documenter]]
"""Returns registered Documenter classes"""
return get_documenters(self.env.app)
def add_line(self, line, source, *lineno):
# type: (unicode, unicode, int) -> None
# type: (str, str, int) -> None
"""Append one line of generated reST to the output."""
self.directive.result.append(self.indent + line, source, *lineno)
def resolve_name(self, modname, parents, path, base):
# type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
# type: (str, Any, str, Any) -> Tuple[str, List[str]]
"""Resolve the module and name of the object to document given by the
arguments and the current module/class.
@ -370,7 +369,7 @@ class Documenter:
return True
def format_args(self):
# type: () -> unicode
# type: () -> str
"""Format the argument signature of *self.object*.
Should return None if the object does not have a signature.
@ -378,7 +377,7 @@ class Documenter:
return None
def format_name(self):
# type: () -> unicode
# type: () -> str
"""Format the name of *self.object*.
This normally should be something that can be parsed by the generated
@ -390,14 +389,14 @@ class Documenter:
return '.'.join(self.objpath) or self.modname
def format_signature(self):
# type: () -> unicode
# type: () -> str
"""Format the signature (arguments and return annotation) of the object.
Let the user process it via the ``autodoc-process-signature`` event.
"""
if self.args is not None:
# signature given explicitly
args = "(%s)" % self.args # type: unicode
args = "(%s)" % self.args
else:
# try to introspect the signature
try:
@ -421,7 +420,7 @@ class Documenter:
return ''
def add_directive_header(self, sig):
# type: (unicode) -> None
# type: (str) -> None
"""Add the directive header and options to the generated content."""
domain = getattr(self, 'domain', 'py')
directive = getattr(self, 'directivetype', self.objtype)
@ -437,7 +436,7 @@ class Documenter:
self.add_line(u' :module: %s' % self.modname, sourcename)
def get_doc(self, encoding=None, ignore=1):
# type: (unicode, int) -> List[List[unicode]]
# type: (str, int) -> List[List[str]]
"""Decode and return lines of the docstring(s) for the object."""
if encoding is not None:
warnings.warn("The 'encoding' argument to autodoc.%s.get_doc() is deprecated."
@ -450,7 +449,7 @@ class Documenter:
return []
def process_doc(self, docstrings):
# type: (List[List[unicode]]) -> Iterator[unicode]
# type: (List[List[str]]) -> Iterator[str]
"""Let the user process the docstrings before adding them."""
for docstringlines in docstrings:
if self.env.app:
@ -458,11 +457,10 @@ class Documenter:
self.env.app.emit('autodoc-process-docstring',
self.objtype, self.fullname, self.object,
self.options, docstringlines)
for line in docstringlines:
yield line
yield from docstringlines
def get_sourcename(self):
# type: () -> unicode
# type: () -> str
if self.analyzer:
# prevent encoding errors when the file name is non-ASCII
if not isinstance(self.analyzer.srcname, text_type):
@ -505,7 +503,7 @@ class Documenter:
self.add_line(line, src[0], src[1])
def get_object_members(self, want_all):
# type: (bool) -> Tuple[bool, List[Tuple[unicode, Any]]]
# type: (bool) -> Tuple[bool, List[Tuple[str, Any]]]
"""Return `(members_check_module, members)` where `members` is a
list of `(membername, member)` pairs of the members of *self.object*.
@ -532,7 +530,7 @@ class Documenter:
if m.directly_defined)
def filter_members(self, members, want_all):
# type: (List[Tuple[unicode, Any]], bool) -> List[Tuple[unicode, Any, bool]]
# type: (List[Tuple[str, Any]], bool) -> List[Tuple[str, Any, bool]]
"""Filter the given member list.
Members are skipped if
@ -780,7 +778,7 @@ class ModuleDocumenter(Documenter):
'member-order': identity, 'exclude-members': members_set_option,
'private-members': bool_option, 'special-members': members_option,
'imported-members': bool_option, 'ignore-module-all': bool_option
} # type: Dict[unicode, Callable]
} # type: Dict[str, Callable]
def __init__(self, *args):
# type: (Any) -> None
@ -789,12 +787,12 @@ class ModuleDocumenter(Documenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
# type: (Any, unicode, bool, Any) -> bool
# type: (Any, str, bool, Any) -> bool
# don't document submodules automatically
return False
def resolve_name(self, modname, parents, path, base):
# type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
# type: (str, Any, str, Any) -> Tuple[str, List[str]]
if modname is not None:
logger.warning(__('"::" in automodule name doesn\'t make sense'),
type='autodoc')
@ -810,7 +808,7 @@ class ModuleDocumenter(Documenter):
return ret
def add_directive_header(self, sig):
# type: (unicode) -> None
# type: (str) -> None
Documenter.add_directive_header(self, sig)
sourcename = self.get_sourcename()
@ -826,7 +824,7 @@ class ModuleDocumenter(Documenter):
self.add_line(u' :deprecated:', sourcename)
def get_object_members(self, want_all):
# type: (bool) -> Tuple[bool, List[Tuple[unicode, object]]]
# type: (bool) -> Tuple[bool, List[Tuple[str, object]]]
if want_all:
if (self.options.ignore_module_all or not
hasattr(self.object, '__all__')):
@ -868,7 +866,7 @@ class ModuleLevelDocumenter(Documenter):
classes, data/constants).
"""
def resolve_name(self, modname, parents, path, base):
# type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
# type: (str, Any, str, Any) -> Tuple[str, List[str]]
if modname is None:
if path:
modname = path.rstrip('.')
@ -889,7 +887,7 @@ class ClassLevelDocumenter(Documenter):
attributes).
"""
def resolve_name(self, modname, parents, path, base):
# type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
# type: (str, Any, str, Any) -> Tuple[str, List[str]]
if modname is None:
if path:
mod_cls = path.rstrip('.')
@ -923,7 +921,7 @@ class DocstringSignatureMixin:
"""
def _find_signature(self, encoding=None):
# type: (unicode) -> Tuple[str, str]
# type: (str) -> Tuple[str, str]
if encoding is not None:
warnings.warn("The 'encoding' argument to autodoc.%s._find_signature() is "
"deprecated." % self.__class__.__name__,
@ -956,7 +954,7 @@ class DocstringSignatureMixin:
return result
def get_doc(self, encoding=None, ignore=1):
# type: (unicode, int) -> List[List[unicode]]
# type: (str, int) -> List[List[str]]
if encoding is not None:
warnings.warn("The 'encoding' argument to autodoc.%s.get_doc() is deprecated."
% self.__class__.__name__,
@ -967,7 +965,7 @@ class DocstringSignatureMixin:
return super(DocstringSignatureMixin, self).get_doc(None, ignore) # type: ignore
def format_signature(self):
# type: () -> unicode
# type: () -> str
if self.args is None and self.env.config.autodoc_docstring_signature: # type: ignore
# only act if a signature is not explicitly given already, and if
# the feature is enabled
@ -983,7 +981,7 @@ class DocstringStripSignatureMixin(DocstringSignatureMixin):
feature of stripping any function signature from the docstring.
"""
def format_signature(self):
# type: () -> unicode
# type: () -> str
if self.args is None and self.env.config.autodoc_docstring_signature: # type: ignore
# only act if a signature is not explicitly given already, and if
# the feature is enabled
@ -1005,11 +1003,11 @@ class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # typ
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
# type: (Any, unicode, bool, Any) -> bool
# type: (Any, str, bool, Any) -> bool
return isfunction(member) or isbuiltin(member)
def format_args(self):
# type: () -> unicode
# type: () -> str
if isbuiltin(self.object) or inspect.ismethoddescriptor(self.object):
# cannot introspect arguments of a C function or method
return None
@ -1057,7 +1055,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
'show-inheritance': bool_option, 'member-order': identity,
'exclude-members': members_set_option,
'private-members': bool_option, 'special-members': members_option,
} # type: Dict[unicode, Callable]
} # type: Dict[str, Callable]
def __init__(self, *args):
# type: (Any) -> None
@ -1066,7 +1064,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
# type: (Any, unicode, bool, Any) -> bool
# type: (Any, str, bool, Any) -> bool
return isinstance(member, type)
def import_object(self):
@ -1082,7 +1080,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
return ret
def format_args(self):
# type: () -> unicode
# type: () -> str
# for classes, the relevant signature is the __init__ method's
initmeth = self.get_attr(self.object, '__init__', None)
# classes without __init__ method, default __init__ or
@ -1099,14 +1097,14 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
return None
def format_signature(self):
# type: () -> unicode
# type: () -> str
if self.doc_as_attr:
return ''
return super(ClassDocumenter, self).format_signature()
def add_directive_header(self, sig):
# type: (unicode) -> None
# type: (str) -> None
if self.doc_as_attr:
self.directivetype = 'attribute'
super(ClassDocumenter, self).add_directive_header(sig)
@ -1124,7 +1122,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
sourcename)
def get_doc(self, encoding=None, ignore=1):
# type: (unicode, int) -> List[List[unicode]]
# type: (str, int) -> List[List[str]]
if encoding is not None:
warnings.warn("The 'encoding' argument to autodoc.%s.get_doc() is deprecated."
% self.__class__.__name__,
@ -1213,7 +1211,7 @@ class ExceptionDocumenter(ClassDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
# type: (Any, unicode, bool, Any) -> bool
# type: (Any, str, bool, Any) -> bool
return isinstance(member, type) and issubclass(member, BaseException)
@ -1229,11 +1227,11 @@ class DataDocumenter(ModuleLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
# type: (Any, unicode, bool, Any) -> bool
# type: (Any, str, bool, Any) -> bool
return isinstance(parent, ModuleDocumenter) and isattr
def add_directive_header(self, sig):
# type: (unicode) -> None
# type: (str) -> None
super(DataDocumenter, self).add_directive_header(sig)
sourcename = self.get_sourcename()
if not self.options.annotation:
@ -1269,7 +1267,7 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): # type:
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
# type: (Any, unicode, bool, Any) -> bool
# type: (Any, str, bool, Any) -> bool
return inspect.isroutine(member) and \
not isinstance(parent, ModuleDocumenter)
@ -1297,7 +1295,7 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): # type:
return ret
def format_args(self):
# type: () -> unicode
# type: () -> str
if isbuiltin(self.object) or inspect.ismethoddescriptor(self.object):
# can never get arguments of a C function or method
return None
@ -1334,7 +1332,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
# type: (Any, unicode, bool, Any) -> bool
# type: (Any, str, bool, Any) -> bool
non_attr_types = (type, MethodDescriptorType)
isdatadesc = isdescriptor(member) and not \
cls.is_function_or_method(member) and not \
@ -1370,7 +1368,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
or self.modname
def add_directive_header(self, sig):
# type: (unicode) -> None
# type: (str) -> None
super(AttributeDocumenter, self).add_directive_header(sig)
sourcename = self.get_sourcename()
if not self.options.annotation:
@ -1410,7 +1408,7 @@ class InstanceAttributeDocumenter(AttributeDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
# type: (Any, unicode, bool, Any) -> bool
# type: (Any, str, bool, Any) -> bool
"""This documents only INSTANCEATTR members."""
return isattr and (member is INSTANCEATTR)
@ -1429,13 +1427,13 @@ class InstanceAttributeDocumenter(AttributeDocumenter):
def get_documenters(app):
# type: (Sphinx) -> Dict[unicode, Type[Documenter]]
# type: (Sphinx) -> Dict[str, Type[Documenter]]
"""Returns registered Documenter classes"""
return app.registry.documenters
def autodoc_attrgetter(app, obj, name, *defargs):
# type: (Sphinx, Any, unicode, Any) -> Any
# type: (Sphinx, Any, str, Any) -> Any
"""Alternative getattr() for types"""
for typ, func in app.registry.autodoc_attrgettrs.items():
if isinstance(obj, typ):
@ -1469,7 +1467,7 @@ def merge_autodoc_default_flags(app, config):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_autodocumenter(ModuleDocumenter)
app.add_autodocumenter(ClassDocumenter)
app.add_autodocumenter(ExceptionDocumenter)

View File

@ -25,7 +25,6 @@ if False:
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -58,11 +57,11 @@ class DocumenterBridge:
self.reporter = reporter
self.genopt = options
self.lineno = lineno
self.filename_set = set() # type: Set[unicode]
self.filename_set = set() # type: Set[str]
self.result = StringList()
def warn(self, msg):
# type: (unicode) -> None
# type: (str) -> None
logger.warning(msg, location=(self.env.docname, self.lineno))

View File

@ -25,7 +25,6 @@ from sphinx.util.inspect import isenumclass, safe_getattr
if False:
# For type annotation
from typing import Any, Callable, Dict, Generator, Iterator, List, Optional, Sequence, Tuple, Union # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -220,7 +219,7 @@ def import_module(modname, warningiserror=False):
def import_object(modname, objpath, objtype='', attrgetter=safe_getattr, warningiserror=False):
# type: (str, List[unicode], str, Callable[[Any, unicode], Any], bool) -> Any
# type: (str, List[str], str, Callable[[Any, str], Any], bool) -> Any
if objpath:
logger.debug('[autodoc] from %s import %s', modname, '.'.join(objpath))
else:
@ -287,7 +286,7 @@ Attribute = namedtuple('Attribute', ['name', 'directly_defined', 'value'])
def get_object_members(subject, objpath, attrgetter, analyzer=None):
# type: (Any, List[unicode], Callable, Any) -> Dict[str, Attribute] # NOQA
# type: (Any, List[str], Callable, Any) -> Dict[str, Attribute] # NOQA
"""Get members and attributes of target object."""
# the members directly defined in the class
obj_dict = attrgetter(subject, '__dict__', {})

View File

@ -29,7 +29,6 @@ if False:
# For type annotation
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
def register_sections_as_label(app, document):
@ -57,7 +56,7 @@ def register_sections_as_label(app, document):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_config_value('autosectionlabel_prefix_document', False, 'env')
app.connect('doctree-read', register_sections_as_label)

View File

@ -90,7 +90,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
from sphinx.util.typing import unicode # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
logger = logging.getLogger(__name__)
@ -275,13 +274,13 @@ class Autosummary(SphinxDirective):
return nodes
def get_items(self, names):
# type: (List[unicode]) -> List[Tuple[unicode, unicode, unicode, unicode]]
# type: (List[str]) -> List[Tuple[str, str, str, str]]
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
prefixes = get_import_prefixes_from_env(self.env)
items = [] # type: List[Tuple[unicode, unicode, unicode, unicode]]
items = [] # type: List[Tuple[str, str, str, str]]
max_item_chars = 50
@ -350,7 +349,7 @@ class Autosummary(SphinxDirective):
return items
def get_table(self, items):
# type: (List[Tuple[unicode, unicode, unicode, unicode]]) -> List[nodes.Node]
# type: (List[Tuple[str, str, str, str]]) -> List[nodes.Node]
"""Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
@ -369,7 +368,7 @@ class Autosummary(SphinxDirective):
group.append(body)
def append_row(*column_texts):
# type: (unicode) -> None
# type: (str) -> None
row = nodes.row('')
source, line = self.state_machine.get_source_and_line()
for text in column_texts:
@ -389,7 +388,7 @@ class Autosummary(SphinxDirective):
for name, sig, summary, real_name in items:
qualifier = 'obj'
if 'nosignatures' not in self.options:
col1 = ':%s:`%s <%s>`\\ %s' % (qualifier, name, real_name, rst.escape(sig)) # type: unicode # NOQA
col1 = ':%s:`%s <%s>`\\ %s' % (qualifier, name, real_name, rst.escape(sig))
else:
col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name)
col2 = summary
@ -398,7 +397,7 @@ class Autosummary(SphinxDirective):
return [table_spec, table]
def warn(self, msg):
# type: (unicode) -> None
# type: (str) -> None
warnings.warn('Autosummary.warn() is deprecated',
RemovedInSphinx40Warning, stacklevel=2)
logger.warning(msg)
@ -426,13 +425,13 @@ class Autosummary(SphinxDirective):
def strip_arg_typehint(s):
# type: (unicode) -> unicode
# type: (str) -> str
"""Strip a type hint from argument definition."""
return s.split(':')[0].strip()
def mangle_signature(sig, max_chars=30):
# type: (unicode, int) -> unicode
# type: (str, int) -> str
"""Reformat a function signature to a more compact form."""
# Strip return type annotation
s = re.sub(r"\)\s*->\s.*$", ")", sig)
@ -446,8 +445,8 @@ def mangle_signature(sig, max_chars=30):
s = re.sub(r"'[^']*'", "", s)
# Parse the signature to arguments + options
args = [] # type: List[unicode]
opts = [] # type: List[unicode]
args = [] # type: List[str]
opts = [] # type: List[str]
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
while s:
@ -480,7 +479,7 @@ def mangle_signature(sig, max_chars=30):
def extract_summary(doc, document):
# type: (List[unicode], Any) -> unicode
# type: (List[str], Any) -> str
"""Extract summary from docstring."""
# Skip a blank lines at the top
@ -529,7 +528,7 @@ def extract_summary(doc, document):
def limited_join(sep, items, max_chars=30, overflow_marker="..."):
# type: (unicode, List[unicode], int, unicode) -> unicode
# type: (str, List[str], int, str) -> str
"""Join a number of strings to one, limiting the length to *max_chars*.
If the string overflows this limit, replace the last fitting item by
@ -578,7 +577,7 @@ def get_import_prefixes_from_env(env):
def import_by_name(name, prefixes=[None]):
# type: (unicode, List) -> Tuple[unicode, Any, Any, unicode]
# type: (str, List) -> Tuple[str, Any, Any, str]
"""Import a Python object that has the given *name*, under one of the
*prefixes*. The first name that succeeds is used.
"""
@ -597,7 +596,7 @@ def import_by_name(name, prefixes=[None]):
def _import_by_name(name):
# type: (str) -> Tuple[Any, Any, unicode]
# type: (str) -> Tuple[Any, Any, str]
"""Import a Python object given its full name."""
try:
name_parts = name.split('.')
@ -641,7 +640,7 @@ def _import_by_name(name):
# -- :autolink: (smart default role) -------------------------------------------
def autolink_role(typ, rawtext, etext, lineno, inliner, options={}, content=[]):
# type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
# type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
"""Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
@ -666,9 +665,9 @@ def autolink_role(typ, rawtext, etext, lineno, inliner, options={}, content=[]):
def get_rst_suffix(app):
# type: (Sphinx) -> unicode
# type: (Sphinx) -> str
def get_supported_format(suffix):
# type: (unicode) -> Tuple[unicode, ...]
# type: (str) -> Tuple[str, ...]
parser_class = app.registry.get_source_parsers().get(suffix)
if parser_class is None:
return ('restructuredtext',)
@ -676,7 +675,7 @@ def get_rst_suffix(app):
parser_class = import_object(parser_class, 'source parser')
return parser_class.supported
suffix = None # type: unicode
suffix = None # type: str
for suffix in app.config.source_suffix:
if 'restructuredtext' in get_supported_format(suffix):
return suffix
@ -715,7 +714,7 @@ def process_generate_options(app):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
# I need autodoc
app.setup_extension('sphinx.ext.autodoc')
app.add_node(autosummary_toc,

View File

@ -17,7 +17,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import argparse
import locale
@ -47,7 +46,6 @@ if False:
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
from sphinx.util.typing import unicode # NOQA
class DummyApplication:
@ -75,17 +73,17 @@ def setup_documenters(app):
def _simple_info(msg):
# type: (unicode) -> None
# type: (str) -> None
print(msg)
def _simple_warn(msg):
# type: (unicode) -> None
# type: (str) -> None
print('WARNING: ' + msg, file=sys.stderr)
def _underline(title, line='='):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
if '\n' in title:
raise ValueError('Can only underline single lines')
return title + '\n' + line * len(title)
@ -97,7 +95,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
warn=_simple_warn, info=_simple_info,
base_path=None, builder=None, template_dir=None,
imported_members=False, app=None):
# type: (List[unicode], unicode, unicode, Callable, Callable, unicode, Builder, unicode, bool, Any) -> None # NOQA
# type: (List[str], str, str, Callable, Callable, str, Builder, str, bool, Any) -> None
showed_sources = list(sorted(sources))
if len(showed_sources) > 20:
@ -112,7 +110,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
sources = [os.path.join(base_path, filename) for filename in sources]
# create our own templating environment
template_dirs = None # type: List[unicode]
template_dirs = None # type: List[str]
template_dirs = [os.path.join(package_dir, 'ext',
'autosummary', 'templates')]
@ -175,8 +173,8 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
template = template_env.get_template('autosummary/base.rst')
def get_members(obj, typ, include_public=[], imported=True):
# type: (Any, unicode, List[unicode], bool) -> Tuple[List[unicode], List[unicode]] # NOQA
items = [] # type: List[unicode]
# type: (Any, str, List[str], bool) -> Tuple[List[str], List[str]]
items = [] # type: List[str]
for name in dir(obj):
try:
value = safe_getattr(obj, name)
@ -191,7 +189,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
if x in include_public or not x.startswith('_')]
return public, items
ns = {} # type: Dict[unicode, Any]
ns = {} # type: Dict[str, Any]
if doc.objtype == 'module':
ns['members'] = dir(obj)
@ -241,12 +239,12 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
# -- Finding documented entries in files ---------------------------------------
def find_autosummary_in_files(filenames):
# type: (List[unicode]) -> List[Tuple[unicode, unicode, unicode]]
# type: (List[str]) -> List[Tuple[str, str, str]]
"""Find out what items are documented in source/*.rst.
See `find_autosummary_in_lines`.
"""
documented = [] # type: List[Tuple[unicode, unicode, unicode]]
documented = [] # type: List[Tuple[str, str, str]]
for filename in filenames:
with open(filename, encoding='utf-8', errors='ignore') as f:
lines = f.read().splitlines()
@ -255,7 +253,7 @@ def find_autosummary_in_files(filenames):
def find_autosummary_in_docstring(name, module=None, filename=None):
# type: (unicode, Any, unicode) -> List[Tuple[unicode, unicode, unicode]]
# type: (str, Any, str) -> List[Tuple[str, str, str]]
"""Find out what items are documented in the given object's docstring.
See `find_autosummary_in_lines`.
@ -275,7 +273,7 @@ def find_autosummary_in_docstring(name, module=None, filename=None):
def find_autosummary_in_lines(lines, module=None, filename=None):
# type: (List[unicode], Any, unicode) -> List[Tuple[unicode, unicode, unicode]]
# type: (List[str], Any, str) -> List[Tuple[str, str, str]]
"""Find out what items appear in autosummary:: directives in the
given lines.
@ -295,13 +293,13 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$')
documented = [] # type: List[Tuple[unicode, unicode, unicode]]
documented = [] # type: List[Tuple[str, str, str]]
toctree = None # type: unicode
toctree = None # type: str
template = None
current_module = module
in_autosummary = False
base_indent = "" # type: unicode
base_indent = ""
for line in lines:
if in_autosummary:

View File

@ -26,20 +26,19 @@ if False:
# For type annotation
from typing import Any, Callable, Dict, IO, List, Pattern, Set, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
# utility
def write_header(f, text, char='-'):
# type:(IO, unicode, unicode) -> None
# type:(IO, str, str) -> None
f.write(text + '\n')
f.write(char * len(text) + '\n')
def compile_regex_list(name, exps):
# type: (unicode, unicode) -> List[Pattern]
# type: (str, str) -> List[Pattern]
lst = []
for exp in exps:
try:
@ -59,19 +58,19 @@ class CoverageBuilder(Builder):
def init(self):
# type: () -> None
self.c_sourcefiles = [] # type: List[unicode]
self.c_sourcefiles = [] # type: List[str]
for pattern in self.config.coverage_c_path:
pattern = path.join(self.srcdir, pattern)
self.c_sourcefiles.extend(glob.glob(pattern))
self.c_regexes = [] # type: List[Tuple[unicode, Pattern]]
self.c_regexes = [] # type: List[Tuple[str, Pattern]]
for (name, exp) in self.config.coverage_c_regexes.items():
try:
self.c_regexes.append((name, re.compile(exp)))
except Exception:
logger.warning(__('invalid regex %r in coverage_c_regexes'), exp)
self.c_ignorexps = {} # type: Dict[unicode, List[Pattern]]
self.c_ignorexps = {} # type: Dict[str, List[Pattern]]
for (name, exps) in self.config.coverage_ignore_c_items.items():
self.c_ignorexps[name] = compile_regex_list('coverage_ignore_c_items',
exps)
@ -83,16 +82,16 @@ class CoverageBuilder(Builder):
self.config.coverage_ignore_functions)
def get_outdated_docs(self):
# type: () -> unicode
# type: () -> str
return 'coverage overview'
def write(self, *ignored):
# type: (Any) -> None
self.py_undoc = {} # type: Dict[unicode, Dict[unicode, Any]]
self.py_undoc = {} # type: Dict[str, Dict[str, Any]]
self.build_py_coverage()
self.write_py_coverage()
self.c_undoc = {} # type: Dict[unicode, Set[Tuple[unicode, unicode]]]
self.c_undoc = {} # type: Dict[str, Set[Tuple[str, str]]]
self.build_c_coverage()
self.write_c_coverage()
@ -101,7 +100,7 @@ class CoverageBuilder(Builder):
# Fetch all the info from the header files
c_objects = self.env.domaindata['c']['objects']
for filename in self.c_sourcefiles:
undoc = set() # type: Set[Tuple[unicode, unicode]]
undoc = set() # type: Set[Tuple[str, str]]
with open(filename) as f:
for line in f:
for key, regex in self.c_regexes:
@ -156,7 +155,7 @@ class CoverageBuilder(Builder):
continue
funcs = []
classes = {} # type: Dict[unicode, List[unicode]]
classes = {} # type: Dict[str, List[str]]
for name, obj in inspect.getmembers(mod):
# diverse module attributes are ignored:
@ -193,7 +192,7 @@ class CoverageBuilder(Builder):
classes[name] = []
continue
attrs = [] # type: List[unicode]
attrs = [] # type: List[str]
for attr_name in dir(obj):
if attr_name not in obj.__dict__:
@ -267,7 +266,7 @@ class CoverageBuilder(Builder):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_builder(CoverageBuilder)
app.add_config_value('coverage_ignore_modules', [], False)
app.add_config_value('coverage_ignore_functions', [], False)

View File

@ -16,13 +16,13 @@ import re
import sys
import time
import warnings
from io import StringIO
from os import path
from docutils import nodes
from docutils.parsers.rst import directives
from packaging.specifiers import SpecifierSet, InvalidSpecifier
from packaging.version import Version
from six import StringIO
import sphinx
from sphinx.builders import Builder
@ -38,7 +38,6 @@ if False:
# For type annotation
from typing import Any, Callable, Dict, IO, Iterable, List, Optional, Sequence, Set, Tuple, Type # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -47,14 +46,14 @@ doctestopt_re = re.compile(r'#\s*doctest:.+$', re.MULTILINE)
def doctest_encode(text, encoding):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
warnings.warn('doctest_encode() is deprecated.',
RemovedInSphinx40Warning)
return text
def is_allowed_version(spec, version):
# type: (unicode, unicode) -> bool
# type: (str, str) -> bool
"""Check `spec` satisfies `version` or not.
This obeys PEP-440 specifiers:
@ -207,7 +206,7 @@ parser = doctest.DocTestParser()
class TestGroup:
def __init__(self, name):
# type: (unicode) -> None
# type: (str) -> None
self.name = name
self.setup = [] # type: List[TestCode]
self.tests = [] # type: List[List[TestCode]]
@ -233,14 +232,14 @@ class TestGroup:
raise RuntimeError(__('invalid TestCode type'))
def __repr__(self):
# type: () -> unicode
# type: () -> str
return 'TestGroup(name=%r, setup=%r, cleanup=%r, tests=%r)' % (
self.name, self.setup, self.cleanup, self.tests)
class TestCode:
def __init__(self, code, type, filename, lineno, options=None):
# type: (unicode, unicode, Optional[str], int, Optional[Dict]) -> None
# type: (str, str, Optional[str], int, Optional[Dict]) -> None
self.code = code
self.type = type
self.filename = filename
@ -248,7 +247,7 @@ class TestCode:
self.options = options or {}
def __repr__(self):
# type: () -> unicode
# type: () -> str
return 'TestCode(%r, %r, filename=%r, lineno=%r, options=%r)' % (
self.code, self.type, self.filename, self.lineno, self.options)
@ -268,7 +267,7 @@ class SphinxDocTestRunner(doctest.DocTestRunner):
def _DocTestRunner__patched_linecache_getlines(self, filename,
module_globals=None):
# type: (unicode, Any) -> Any
# type: (str, Any) -> Any
# this is overridden from DocTestRunner adding the try-except below
m = self._DocTestRunner__LINECACHE_FILENAME_RE.match(filename) # type: ignore
if m and m.group('name') == self.test.name:
@ -325,12 +324,12 @@ class DocTestBuilder(Builder):
(date, '=' * len(date)))
def _out(self, text):
# type: (unicode) -> None
# type: (str) -> None
logger.info(text, nonl=True)
self.outfile.write(text)
def _warn_out(self, text):
# type: (unicode) -> None
# type: (str) -> None
if self.app.quiet or self.app.warningiserror:
logger.warning(text)
else:
@ -338,18 +337,18 @@ class DocTestBuilder(Builder):
self.outfile.write(text)
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
# type: (str, str) -> str
return ''
def get_outdated_docs(self):
# type: () -> Set[unicode]
# type: () -> Set[str]
return self.env.found_docs
def finish(self):
# type: () -> None
# write executive summary
def s(v):
# type: (int) -> unicode
# type: (int) -> str
return v != 1 and 's' or ''
repl = (self.total_tries, s(self.total_tries),
self.total_failures, s(self.total_failures),
@ -369,7 +368,7 @@ Doctest summary
self.app.statuscode = 1
def write(self, build_docnames, updated_docnames, method='update'):
# type: (Iterable[unicode], Sequence[unicode], unicode) -> None
# type: (Iterable[str], Sequence[str], str) -> None
if build_docnames is None:
build_docnames = sorted(self.env.all_docs)
@ -380,7 +379,7 @@ Doctest summary
self.test_doc(docname, doctree)
def get_filename_for_node(self, node, docname):
# type: (nodes.Node, unicode) -> unicode
# type: (nodes.Node, str) -> str
"""Try to get the file which actually contains the doctest, not the
filename of the document it's included in."""
try:
@ -408,8 +407,8 @@ Doctest summary
return None
def test_doc(self, docname, doctree):
# type: (unicode, nodes.Node) -> None
groups = {} # type: Dict[unicode, TestGroup]
# type: (str, nodes.Node) -> None
groups = {} # type: Dict[str, TestGroup]
add_to_all_groups = []
self.setup_runner = SphinxDocTestRunner(verbose=False,
optionflags=self.opt)
@ -487,7 +486,7 @@ Doctest summary
self.cleanup_tries += res_t
def compile(self, code, name, type, flags, dont_inherit):
# type: (unicode, unicode, unicode, Any, bool) -> Any
# type: (str, str, str, Any, bool) -> Any
return compile(code, name, self.type, flags, dont_inherit)
def test_group(self, group):
@ -565,7 +564,7 @@ Doctest summary
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_directive('testsetup', TestsetupDirective)
app.add_directive('testcleanup', TestcleanupDirective)
app.add_directive('doctest', DoctestDirective)

View File

@ -35,13 +35,12 @@ if False:
from docutils.parsers.rst.states import Inliner # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import RoleFunction # NOQA
from sphinx.util.typing import unicode # NOQA
def make_link_role(base_url, prefix):
# type: (unicode, unicode) -> RoleFunction
# type: (str, str) -> RoleFunction
def role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
# type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
# type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
text = utils.unescape(text)
has_explicit_title, title, part = split_explicit_title(text)
try:
@ -69,7 +68,7 @@ def setup_link_roles(app):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_config_value('extlinks', {}, 'env')
app.connect('builder-inited', setup_link_roles)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}

View File

@ -18,7 +18,6 @@ if False:
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
def create_nojekyll(app, env):
@ -29,6 +28,6 @@ def create_nojekyll(app, env):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.connect('env-updated', create_nojekyll)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}

View File

@ -36,7 +36,6 @@ if False:
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.docutils import SphinxTranslator # NOQA
from sphinx.util.typing import unicode # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
from sphinx.writers.latex import LaTeXTranslator # NOQA
from sphinx.writers.manpage import ManualPageTranslator # NOQA
@ -56,16 +55,16 @@ class ClickableMapDefinition:
href_re = re.compile('href=".*?"')
def __init__(self, filename, content, dot=''):
# type: (unicode, unicode, unicode) -> None
self.id = None # type: unicode
# type: (str, str, str) -> None
self.id = None # type: str
self.filename = filename
self.content = content.splitlines()
self.clickable = [] # type: List[unicode]
self.clickable = [] # type: List[str]
self.parse(dot=dot)
def parse(self, dot=None):
# type: (unicode) -> None
# type: (str) -> None
matched = self.maptag_re.match(self.content[0])
if not matched:
raise GraphvizError('Invalid clickable map file found: %s' % self.filename)
@ -83,7 +82,7 @@ class ClickableMapDefinition:
self.clickable.append(line)
def generate_clickable_map(self):
# type: () -> unicode
# type: () -> str
"""Generate clickable map tags if clickable item exists.
If not exists, this only returns empty string.
@ -99,7 +98,7 @@ class graphviz(nodes.General, nodes.Inline, nodes.Element):
def figure_wrapper(directive, node, caption):
# type: (Directive, graphviz, unicode) -> nodes.figure
# type: (Directive, graphviz, str) -> nodes.figure
figure_node = nodes.figure('', node)
if 'align' in node:
figure_node['align'] = node.attributes.pop('align')
@ -218,7 +217,7 @@ class GraphvizSimple(SphinxDirective):
def render_dot(self, code, options, format, prefix='graphviz'):
# type: (SphinxTranslator, unicode, Dict, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
# type: (SphinxTranslator, str, Dict, str, str) -> Tuple[str, str]
"""Render graphviz code into a PNG or PDF output file."""
graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot)
hashkey = (code + str(options) + str(graphviz_dot) +
@ -279,7 +278,7 @@ def render_dot(self, code, options, format, prefix='graphviz'):
def render_dot_html(self, node, code, options, prefix='graphviz',
imgcls=None, alt=None):
# type: (HTMLTranslator, graphviz, unicode, Dict, unicode, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
# type: (HTMLTranslator, graphviz, str, Dict, str, str, str) -> Tuple[str, str]
format = self.builder.config.graphviz_output_format
try:
if format not in ('png', 'svg'):
@ -337,7 +336,7 @@ def html_visit_graphviz(self, node):
def render_dot_latex(self, node, code, options, prefix='graphviz'):
# type: (LaTeXTranslator, graphviz, unicode, Dict, unicode) -> None
# type: (LaTeXTranslator, graphviz, str, Dict, str) -> None
try:
fname, outfn = render_dot(self, code, options, 'pdf', prefix)
except GraphvizError as exc:
@ -375,7 +374,7 @@ def latex_visit_graphviz(self, node):
def render_dot_texinfo(self, node, code, options, prefix='graphviz'):
# type: (TexinfoTranslator, graphviz, unicode, Dict, unicode) -> None
# type: (TexinfoTranslator, graphviz, str, Dict, str) -> None
try:
fname, outfn = render_dot(self, code, options, 'png', prefix)
except GraphvizError as exc:
@ -418,7 +417,7 @@ def on_build_finished(app, exc):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_node(graphviz,
html=(html_visit_graphviz, None),
latex=(latex_visit_graphviz, None),

View File

@ -30,7 +30,6 @@ if False:
# For type annotation
from typing import Any, Dict, List # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
class ifconfig(nodes.Element):
@ -57,7 +56,7 @@ class IfConfig(SphinxDirective):
def process_ifconfig_nodes(app, doctree, docname):
# type: (Sphinx, nodes.document, unicode) -> None
# type: (Sphinx, nodes.document, str) -> None
ns = dict((confval.name, confval.value) for confval in app.config)
ns.update(app.config.__dict__.copy())
ns['builder'] = app.builder.name
@ -80,7 +79,7 @@ def process_ifconfig_nodes(app, doctree, docname):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_node(ifconfig)
app.add_directive('ifconfig', IfConfig)
app.connect('doctree-resolved', process_ifconfig_nodes)

View File

@ -21,7 +21,6 @@ if False:
# For type annotation
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -64,7 +63,7 @@ class ImagemagickConverter(ImageConverter):
return True
def convert(self, _from, _to):
# type: (unicode, unicode) -> bool
# type: (str, str) -> bool
"""Converts the image to expected one."""
try:
if _from.lower().endswith('.gif'):
@ -100,7 +99,7 @@ class ImagemagickConverter(ImageConverter):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_post_transform(ImagemagickConverter)
app.add_config_value('image_converter', 'convert', 'env')
app.add_config_value('image_converter_args', [], 'env')

View File

@ -36,7 +36,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.config import Config # NOQA
from sphinx.util.typing import unicode # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
logger = logging.getLogger(__name__)
@ -46,7 +45,7 @@ class MathExtError(SphinxError):
category = 'Math extension error'
def __init__(self, msg, stderr=None, stdout=None):
# type: (unicode, bytes, bytes) -> None
# type: (str, bytes, bytes) -> None
if stderr:
msg += '\n[stderr]\n' + stderr.decode(sys_encoding, 'replace')
if stdout:
@ -91,7 +90,7 @@ depth_re = re.compile(br'\[\d+ depth=(-?\d+)\]')
def generate_latex_macro(math, config):
# type: (unicode, Config) -> unicode
# type: (str, Config) -> str
"""Generate LaTeX macro."""
fontsize = config.imgmath_font_size
baselineskip = int(round(fontsize * 1.2))
@ -106,7 +105,7 @@ def generate_latex_macro(math, config):
def ensure_tempdir(builder):
# type: (Builder) -> unicode
# type: (Builder) -> str
"""Create temporary directory.
use only one tempdir per build -- the use of a directory is cleaner
@ -120,7 +119,7 @@ def ensure_tempdir(builder):
def compile_math(latex, builder):
# type: (unicode, Builder) -> unicode
# type: (str, Builder) -> str
"""Compile LaTeX macros for math to DVI."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.tex')
@ -154,7 +153,7 @@ def compile_math(latex, builder):
def convert_dvi_to_image(command, name):
# type: (List[unicode], unicode) -> Tuple[bytes, bytes]
# type: (List[str], str) -> Tuple[bytes, bytes]
"""Convert DVI file to specific image format."""
try:
p = Popen(command, stdout=PIPE, stderr=PIPE)
@ -174,7 +173,7 @@ def convert_dvi_to_image(command, name):
def convert_dvi_to_png(dvipath, builder):
# type: (unicode, Builder) -> Tuple[unicode, int]
# type: (str, Builder) -> Tuple[str, int]
"""Convert DVI file to PNG image."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.png')
@ -201,7 +200,7 @@ def convert_dvi_to_png(dvipath, builder):
def convert_dvi_to_svg(dvipath, builder):
# type: (unicode, Builder) -> Tuple[unicode, int]
# type: (str, Builder) -> Tuple[str, int]
"""Convert DVI file to SVG image."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.svg')
@ -216,7 +215,7 @@ def convert_dvi_to_svg(dvipath, builder):
def render_math(self, math):
# type: (HTMLTranslator, unicode) -> Tuple[unicode, int]
# type: (HTMLTranslator, str) -> Tuple[str, int]
"""Render the LaTeX math expression *math* using latex and dvipng or
dvisvgm.
@ -285,7 +284,7 @@ def cleanup_tempdir(app, exc):
def get_tooltip(self, node):
# type: (HTMLTranslator, Union[nodes.math, nodes.math_block]) -> unicode
# type: (HTMLTranslator, Union[nodes.math, nodes.math_block]) -> str
if self.builder.config.imgmath_add_tooltips:
return ' alt="%s"' % self.encode(node.astext()).strip()
return ''
@ -347,7 +346,7 @@ def html_visit_displaymath(self, node):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_html_math_renderer('imgmath',
(html_visit_math, None),
(html_visit_displaymath, None))

View File

@ -59,7 +59,6 @@ if False:
from typing import Any, Dict, List, Tuple, Dict, Optional # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
from sphinx.writers.latex import LaTeXTranslator # NOQA
from sphinx.writers.texinfo import TexinfoTranslator # NOQA
@ -71,7 +70,7 @@ module_sig_re = re.compile(r'''^(?:([\w.]*)\.)? # module names
def try_import(objname):
# type: (unicode) -> Any
# type: (str) -> Any
"""Import a object or module using *name* and *currentmodule*.
*name* should be a relative name from *currentmodule* or
a fully-qualified name.
@ -99,7 +98,7 @@ def try_import(objname):
def import_classes(name, currmodule):
# type: (unicode, unicode) -> Any
# type: (str, str) -> Any
"""Import a class using its fully-qualified *name*."""
target = None
@ -142,7 +141,7 @@ class InheritanceGraph:
"""
def __init__(self, class_names, currmodule, show_builtins=False,
private_bases=False, parts=0, aliases=None, top_classes=[]):
# type: (List[unicode], str, bool, bool, int, Optional[Dict[unicode, unicode]], List[Any]) -> None # NOQA
# type: (List[str], str, bool, bool, int, Optional[Dict[str, str]], List[Any]) -> None
"""*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
@ -157,7 +156,7 @@ class InheritanceGraph:
'inheritance diagram')
def _import_classes(self, class_names, currmodule):
# type: (List[unicode], str) -> List[Any]
# type: (List[str], str) -> List[Any]
"""Import a list of classes."""
classes = [] # type: List[Any]
for name in class_names:
@ -165,7 +164,7 @@ class InheritanceGraph:
return classes
def _class_info(self, classes, show_builtins, private_bases, parts, aliases, top_classes):
# type: (List[Any], bool, bool, int, Optional[Dict[unicode, unicode]], List[Any]) -> List[Tuple[unicode, unicode, List[unicode], unicode]] # NOQA
# type: (List[Any], bool, bool, int, Optional[Dict[str, str]], List[Any]) -> List[Tuple[str, str, List[str], str]] # NOQA
"""Return name and bases for all classes that are ancestors of
*classes*.
@ -198,7 +197,7 @@ class InheritanceGraph:
except Exception: # might raise AttributeError for strange classes
pass
baselist = [] # type: List[unicode]
baselist = [] # type: List[str]
all_classes[cls] = (nodename, fullname, baselist, tooltip)
if fullname in top_classes:
@ -219,7 +218,7 @@ class InheritanceGraph:
return list(all_classes.values())
def class_name(self, cls, parts=0, aliases=None):
# type: (Any, int, Optional[Dict[unicode, unicode]]) -> unicode
# type: (Any, int, Optional[Dict[str, str]]) -> str
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
@ -240,7 +239,7 @@ class InheritanceGraph:
return result
def get_all_class_names(self):
# type: () -> List[unicode]
# type: () -> List[str]
"""Get all of the class names involved in the graph."""
return [fullname for (_, fullname, _, _) in self.class_info]
@ -263,16 +262,16 @@ class InheritanceGraph:
}
def _format_node_attrs(self, attrs):
# type: (Dict) -> unicode
# type: (Dict) -> str
return ','.join(['%s=%s' % x for x in sorted(attrs.items())])
def _format_graph_attrs(self, attrs):
# type: (Dict) -> unicode
# type: (Dict) -> str
return ''.join(['%s=%s;\n' % x for x in sorted(attrs.items())])
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
# type: (unicode, Dict, BuildEnvironment, Dict, Dict, Dict) -> unicode
# type: (str, Dict, BuildEnvironment, Dict, Dict, Dict) -> str
"""Generate a graphviz dot graph from the classes that were passed in
to __init__.
@ -294,7 +293,7 @@ class InheritanceGraph:
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
res = [] # type: List[unicode]
res = [] # type: List[str]
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
@ -389,7 +388,7 @@ class InheritanceDiagram(SphinxDirective):
def get_graph_hash(node):
# type: (inheritance_diagram) -> unicode
# type: (inheritance_diagram) -> str
encoded = (node['content'] + str(node['parts'])).encode('utf-8')
return md5(encoded).hexdigest()[-10:]
@ -466,7 +465,7 @@ def skip(self, node):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.ext.graphviz')
app.add_node(
inheritance_diagram,

View File

@ -24,8 +24,6 @@
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import functools
import posixpath
import sys
@ -49,7 +47,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
Inventory = Dict[text_type, Dict[text_type, Tuple[text_type, text_type, text_type, text_type]]] # NOQA
@ -70,7 +67,7 @@ class InventoryAdapter:
@property
def cache(self):
# type: () -> Dict[unicode, Tuple[unicode, int, Inventory]]
# type: () -> Dict[str, Tuple[str, int, Inventory]]
return self.env.intersphinx_cache # type: ignore
@property
@ -80,7 +77,7 @@ class InventoryAdapter:
@property
def named_inventory(self):
# type: () -> Dict[unicode, Inventory]
# type: () -> Dict[str, Inventory]
return self.env.intersphinx_named_inventory # type: ignore
def clear(self):
@ -90,7 +87,7 @@ class InventoryAdapter:
def _strip_basic_auth(url):
# type: (unicode) -> unicode
# type: (str) -> str
"""Returns *url* with basic auth credentials removed. Also returns the
basic auth username and password if they're present in *url*.
@ -112,7 +109,7 @@ def _strip_basic_auth(url):
def _read_from_url(url, config=None):
# type: (unicode, Config) -> IO
# type: (str, Config) -> IO
"""Reads data from *url* with an HTTP *GET*.
This function supports fetching from resources which use basic HTTP auth as
@ -138,7 +135,7 @@ def _read_from_url(url, config=None):
def _get_safe_url(url):
# type: (unicode) -> unicode
# type: (str) -> str
"""Gets version of *url* with basic auth passwords obscured. This function
returns results suitable for printing and logging.
@ -164,7 +161,7 @@ def _get_safe_url(url):
def fetch_inventory(app, uri, inv):
# type: (Sphinx, unicode, Any) -> Any
# type: (Sphinx, str, Any) -> Any
"""Fetch, parse and return an intersphinx inventory file."""
# both *uri* (base URI of the links to generate) and *inv* (actual
# location of the inventory file) can be local or remote URIs
@ -211,9 +208,9 @@ def load_mappings(app):
inventories = InventoryAdapter(app.builder.env)
update = False
for key, value in app.config.intersphinx_mapping.items():
name = None # type: unicode
uri = None # type: unicode
inv = None # type: Union[unicode, Tuple[unicode, ...]]
name = None # type: str
uri = None # type: str
inv = None # type: Union[str, Tuple[str, ...]]
if isinstance(value, (list, tuple)):
# new format
@ -291,7 +288,7 @@ def missing_reference(app, env, node, contnode):
"""Attempt to resolve a missing reference via intersphinx references."""
target = node['reftarget']
inventories = InventoryAdapter(env)
objtypes = None # type: List[unicode]
objtypes = None # type: List[str]
if node['reftype'] == 'any':
# we search anything!
objtypes = ['%s:%s' % (domain.name, objtype)
@ -365,7 +362,7 @@ def missing_reference(app, env, node, contnode):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_config_value('intersphinx_mapping', {}, True)
app.add_config_value('intersphinx_cache_limit', 5, False)
app.add_config_value('intersphinx_timeout', None, False)
@ -379,7 +376,7 @@ def setup(app):
def inspect_main(argv):
# type: (List[unicode]) -> None
# type: (List[str]) -> None
"""Debug functionality to print out an inventory"""
if len(argv) < 1:
print("Print out an inventory file.\n"
@ -396,7 +393,7 @@ def inspect_main(argv):
config = MockConfig()
def warn(self, msg):
# type: (unicode) -> None
# type: (str) -> None
print(msg, file=sys.stderr)
try:

View File

@ -27,7 +27,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
from sphinx.util.typing import unicode # NOQA
def html_visit_math(self, node):
@ -81,7 +80,7 @@ def install_jsmath(app, env):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_html_math_renderer('jsmath',
(html_visit_math, None),
(html_visit_displaymath, None))

View File

@ -20,7 +20,6 @@ if False:
# For type annotation
from typing import Any, Dict, Set # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
class LinkcodeError(SphinxError):
@ -45,7 +44,7 @@ def doctree_read(app, doctree):
for objnode in doctree.traverse(addnodes.desc):
domain = objnode.get('domain')
uris = set() # type: Set[unicode]
uris = set() # type: Set[str]
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
@ -78,7 +77,7 @@ def doctree_read(app, doctree):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.connect('doctree-read', doctree_read)
app.add_config_value('linkcode_resolve', None, '')
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}

View File

@ -25,7 +25,6 @@ if False:
# For type annotation
from typing import Any, Callable, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
@ -45,7 +44,7 @@ def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
def get_node_equation_number(writer, node):
# type: (HTMLTranslator, nodes.math_block) -> unicode
# type: (HTMLTranslator, nodes.math_block) -> str
warnings.warn('sphinx.ext.mathbase.get_node_equation_number() is moved to '
'sphinx.util.math package.',
RemovedInSphinx30Warning, stacklevel=2)
@ -54,7 +53,7 @@ def get_node_equation_number(writer, node):
def wrap_displaymath(text, label, numbering):
# type: (unicode, unicode, bool) -> unicode
# type: (str, str, bool) -> str
warnings.warn('sphinx.ext.mathbase.wrap_displaymath() is moved to '
'sphinx.util.math package.',
RemovedInSphinx30Warning, stacklevel=2)

View File

@ -29,7 +29,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
from sphinx.util.typing import unicode # NOQA
def html_visit_math(self, node):
@ -97,7 +96,7 @@ def install_mathjax(app, env):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_html_math_renderer('mathjax',
(html_visit_math, None),
(html_visit_displaymath, None))

View File

@ -16,7 +16,6 @@ from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring
if False:
# For type annotation
from typing import Any, Dict, List # NOQA
from sphinx.util.typing import unicode # NOQA
class Config:
@ -278,7 +277,7 @@ class Config:
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
"""Sphinx extension setup function.
When the extension is loaded, Sphinx imports this module and executes
@ -336,7 +335,7 @@ def _patch_python_domain():
def _process_docstring(app, what, name, obj, options, lines):
# type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
# type: (Sphinx, str, str, Any, Any, List[str]) -> None
"""Process the docstring for a given python object.
Called when autodoc has read and processed a docstring. `lines` is a list
@ -386,7 +385,7 @@ def _process_docstring(app, what, name, obj, options, lines):
def _skip_member(app, what, name, obj, skip, options):
# type: (Sphinx, unicode, unicode, Any, bool, Any) -> bool
# type: (Sphinx, str, str, Any, bool, Any) -> bool
"""Determine if private and special class members are included in docs.
The following settings in conf.py determine if private and special class

View File

@ -24,7 +24,6 @@ if False:
from typing import Any, Callable, Dict, List, Tuple, Type, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config as SphinxConfig # NOQA
from sphinx.util.typing import unicode # NOQA
_directive_regex = re.compile(r'\.\. \S+::')
@ -108,7 +107,7 @@ class GoogleDocstring(UnicodeMixin):
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
# type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA
# type: (Union[str, List[str]], SphinxConfig, Sphinx, str, str, Any, Any) -> None
self._config = config
self._app = app
@ -135,11 +134,11 @@ class GoogleDocstring(UnicodeMixin):
else:
lines = docstring
self._line_iter = modify_iter(lines, modifier=lambda s: s.rstrip())
self._parsed_lines = [] # type: List[unicode]
self._parsed_lines = [] # type: List[str]
self._is_in_section = False
self._section_indent = 0
if not hasattr(self, '_directive_sections'):
self._directive_sections = [] # type: List[unicode]
self._directive_sections = [] # type: List[str]
if not hasattr(self, '_sections'):
self._sections = {
'args': self._parse_parameters_section,
@ -172,14 +171,14 @@ class GoogleDocstring(UnicodeMixin):
'warns': self._parse_warns_section,
'yield': self._parse_yields_section,
'yields': self._parse_yields_section,
} # type: Dict[unicode, Callable]
} # type: Dict[str, Callable]
self._load_custom_sections()
self._parse()
def __unicode__(self):
# type: () -> unicode
# type: () -> str
"""Return the parsed docstring in reStructuredText format.
Returns
@ -191,7 +190,7 @@ class GoogleDocstring(UnicodeMixin):
return '\n'.join(self.lines())
def lines(self):
# type: () -> List[unicode]
# type: () -> List[str]
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
@ -203,7 +202,7 @@ class GoogleDocstring(UnicodeMixin):
return self._parsed_lines
def _consume_indented_block(self, indent=1):
# type: (int) -> List[unicode]
# type: (int) -> List[str]
lines = []
line = self._line_iter.peek()
while(not self._is_section_break() and
@ -213,7 +212,7 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _consume_contiguous(self):
# type: () -> List[unicode]
# type: () -> List[str]
lines = []
while (self._line_iter.has_next() and
self._line_iter.peek() and
@ -222,7 +221,7 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _consume_empty(self):
# type: () -> List[unicode]
# type: () -> List[str]
lines = []
line = self._line_iter.peek()
while self._line_iter.has_next() and not line:
@ -231,11 +230,11 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _consume_field(self, parse_type=True, prefer_type=False):
# type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
# type: (bool, bool) -> Tuple[str, str, List[str]]
line = next(self._line_iter)
before, colon, after = self._partition_field_on_colon(line)
_name, _type, _desc = before, '', after # type: unicode, unicode, unicode
_name, _type, _desc = before, '', after
if parse_type:
match = _google_typed_arg_regex.match(before)
@ -253,7 +252,7 @@ class GoogleDocstring(UnicodeMixin):
return _name, _type, _descs
def _consume_fields(self, parse_type=True, prefer_type=False):
# type: (bool, bool) -> List[Tuple[unicode, unicode, List[unicode]]]
# type: (bool, bool) -> List[Tuple[str, str, List[str]]]
self._consume_empty()
fields = []
while not self._is_section_break():
@ -263,7 +262,7 @@ class GoogleDocstring(UnicodeMixin):
return fields
def _consume_inline_attribute(self):
# type: () -> Tuple[unicode, List[unicode]]
# type: () -> Tuple[str, List[str]]
line = next(self._line_iter)
_type, colon, _desc = self._partition_field_on_colon(line)
if not colon or not _desc:
@ -274,11 +273,11 @@ class GoogleDocstring(UnicodeMixin):
return _type, _descs
def _consume_returns_section(self):
# type: () -> List[Tuple[unicode, unicode, List[unicode]]]
# type: () -> List[Tuple[str, str, List[str]]]
lines = self._dedent(self._consume_to_next_section())
if lines:
before, colon, after = self._partition_field_on_colon(lines[0])
_name, _type, _desc = '', '', lines # type: unicode, unicode, List[unicode]
_name, _type, _desc = '', '', lines
if colon:
if after:
@ -294,12 +293,12 @@ class GoogleDocstring(UnicodeMixin):
return []
def _consume_usage_section(self):
# type: () -> List[unicode]
# type: () -> List[str]
lines = self._dedent(self._consume_to_next_section())
return lines
def _consume_section_header(self):
# type: () -> unicode
# type: () -> str
section = next(self._line_iter)
stripped_section = section.strip(':')
if stripped_section.lower() in self._sections:
@ -307,14 +306,14 @@ class GoogleDocstring(UnicodeMixin):
return section
def _consume_to_end(self):
# type: () -> List[unicode]
# type: () -> List[str]
lines = []
while self._line_iter.has_next():
lines.append(next(self._line_iter))
return lines
def _consume_to_next_section(self):
# type: () -> List[unicode]
# type: () -> List[str]
self._consume_empty()
lines = []
while not self._is_section_break():
@ -322,7 +321,7 @@ class GoogleDocstring(UnicodeMixin):
return lines + self._consume_empty()
def _dedent(self, lines, full=False):
# type: (List[unicode], bool) -> List[unicode]
# type: (List[str], bool) -> List[str]
if full:
return [line.lstrip() for line in lines]
else:
@ -330,7 +329,7 @@ class GoogleDocstring(UnicodeMixin):
return [line[min_indent:] for line in lines]
def _escape_args_and_kwargs(self, name):
# type: (unicode) -> unicode
# type: (str) -> str
if name[:2] == '**':
return r'\*\*' + name[2:]
elif name[:1] == '*':
@ -339,7 +338,7 @@ class GoogleDocstring(UnicodeMixin):
return name
def _fix_field_desc(self, desc):
# type: (List[unicode]) -> List[unicode]
# type: (List[str]) -> List[str]
if self._is_list(desc):
desc = [u''] + desc
elif desc[0].endswith('::'):
@ -353,7 +352,7 @@ class GoogleDocstring(UnicodeMixin):
return desc
def _format_admonition(self, admonition, lines):
# type: (unicode, List[unicode]) -> List[unicode]
# type: (str, List[str]) -> List[str]
lines = self._strip_empty(lines)
if len(lines) == 1:
return ['.. %s:: %s' % (admonition, lines[0].strip()), '']
@ -364,7 +363,7 @@ class GoogleDocstring(UnicodeMixin):
return [u'.. %s::' % admonition, u'']
def _format_block(self, prefix, lines, padding=None):
# type: (unicode, List[unicode], unicode) -> List[unicode]
# type: (str, List[str], str) -> List[str]
if lines:
if padding is None:
padding = ' ' * len(prefix)
@ -382,7 +381,7 @@ class GoogleDocstring(UnicodeMixin):
def _format_docutils_params(self, fields, field_role='param',
type_role='type'):
# type: (List[Tuple[unicode, unicode, List[unicode]]], unicode, unicode) -> List[unicode] # NOQA
# type: (List[Tuple[str, str, List[str]]], str, str) -> List[str]
lines = []
for _name, _type, _desc in fields:
_desc = self._strip_empty(_desc)
@ -398,14 +397,14 @@ class GoogleDocstring(UnicodeMixin):
return lines + ['']
def _format_field(self, _name, _type, _desc):
# type: (unicode, unicode, List[unicode]) -> List[unicode]
# type: (str, str, List[str]) -> List[str]
_desc = self._strip_empty(_desc)
has_desc = any(_desc)
separator = has_desc and ' -- ' or ''
if _name:
if _type:
if '`' in _type:
field = '**%s** (%s)%s' % (_name, _type, separator) # type: unicode
field = '**%s** (%s)%s' % (_name, _type, separator)
else:
field = '**%s** (*%s*)%s' % (_name, _type, separator)
else:
@ -428,11 +427,11 @@ class GoogleDocstring(UnicodeMixin):
return [field]
def _format_fields(self, field_type, fields):
# type: (unicode, List[Tuple[unicode, unicode, List[unicode]]]) -> List[unicode]
# type: (str, List[Tuple[str, str, List[str]]]) -> List[str]
field_type = ':%s:' % field_type.strip()
padding = ' ' * len(field_type)
multi = len(fields) > 1
lines = [] # type: List[unicode]
lines = [] # type: List[str]
for _name, _type, _desc in fields:
field = self._format_field(_name, _type, _desc)
if multi:
@ -457,21 +456,21 @@ class GoogleDocstring(UnicodeMixin):
return 0
def _get_indent(self, line):
# type: (unicode) -> int
# type: (str) -> int
for i, s in enumerate(line):
if not s.isspace():
return i
return len(line)
def _get_initial_indent(self, lines):
# type: (List[unicode]) -> int
# type: (List[str]) -> int
for line in lines:
if line:
return self._get_indent(line)
return 0
def _get_min_indent(self, lines):
# type: (List[unicode]) -> int
# type: (List[str]) -> int
min_indent = None
for line in lines:
if line:
@ -483,11 +482,11 @@ class GoogleDocstring(UnicodeMixin):
return min_indent or 0
def _indent(self, lines, n=4):
# type: (List[unicode], int) -> List[unicode]
# type: (List[str], int) -> List[str]
return [(' ' * n) + line for line in lines]
def _is_indented(self, line, indent=1):
# type: (unicode, int) -> bool
# type: (str, int) -> bool
for i, s in enumerate(line):
if i >= indent:
return True
@ -496,7 +495,7 @@ class GoogleDocstring(UnicodeMixin):
return False
def _is_list(self, lines):
# type: (List[unicode]) -> bool
# type: (List[str]) -> bool
if not lines:
return False
if _bullet_list_regex.match(lines[0]):
@ -561,7 +560,7 @@ class GoogleDocstring(UnicodeMixin):
if self._name and (self._what == 'attribute' or self._what == 'data'):
# Implicit stop using StopIteration no longer allowed in
# Python 3.7; see PEP 479
res = [] # type: List[unicode]
res = [] # type: List[str]
try:
res = self._parse_attribute_docstring()
except StopIteration:
@ -590,12 +589,12 @@ class GoogleDocstring(UnicodeMixin):
self._parsed_lines.extend(lines)
def _parse_admonition(self, admonition, section):
# type (unicode, unicode) -> List[unicode]
# type (str, str) -> List[str]
lines = self._consume_to_next_section()
return self._format_admonition(admonition, lines)
def _parse_attribute_docstring(self):
# type: () -> List[unicode]
# type: () -> List[str]
_type, _desc = self._consume_inline_attribute()
lines = self._format_field('', '', _desc)
if _type:
@ -603,12 +602,12 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_attributes_section(self, section):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
lines = []
for _name, _type, _desc in self._consume_fields():
if self._config.napoleon_use_ivar:
_name = self._qualify_name(_name, self._obj)
field = ':ivar %s: ' % _name # type: unicode
field = ':ivar %s: ' % _name
lines.extend(self._format_block(field, _desc))
if _type:
lines.append(':vartype %s: %s' % (_name, _type))
@ -625,11 +624,11 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_examples_section(self, section):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
labels = {
'example': _('Example'),
'examples': _('Examples'),
} # type: Dict[unicode, unicode]
}
use_admonition = self._config.napoleon_use_admonition_for_examples
label = labels.get(section.lower(), section)
return self._parse_generic_section(label, use_admonition)
@ -639,19 +638,19 @@ class GoogleDocstring(UnicodeMixin):
return self._parse_generic_section(section, False)
def _parse_usage_section(self, section):
# type: (unicode) -> List[unicode]
header = ['.. rubric:: Usage:', ''] # type: List[unicode]
block = ['.. code-block:: python', ''] # type: List[unicode]
# type: (str) -> List[str]
header = ['.. rubric:: Usage:', '']
block = ['.. code-block:: python', '']
lines = self._consume_usage_section()
lines = self._indent(lines, 3)
return header + block + lines + ['']
def _parse_generic_section(self, section, use_admonition):
# type: (unicode, bool) -> List[unicode]
# type: (str, bool) -> List[str]
lines = self._strip_empty(self._consume_to_next_section())
lines = self._dedent(lines)
if use_admonition:
header = '.. admonition:: %s' % section # type: unicode
header = '.. admonition:: %s' % section
lines = self._indent(lines, 3)
else:
header = '.. rubric:: %s' % section
@ -661,7 +660,7 @@ class GoogleDocstring(UnicodeMixin):
return [header, '']
def _parse_keyword_arguments_section(self, section):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
fields = self._consume_fields()
if self._config.napoleon_use_keyword:
return self._format_docutils_params(
@ -672,8 +671,8 @@ class GoogleDocstring(UnicodeMixin):
return self._format_fields(_('Keyword Arguments'), fields)
def _parse_methods_section(self, section):
# type: (unicode) -> List[unicode]
lines = [] # type: List[unicode]
# type: (str) -> List[str]
lines = [] # type: List[str]
for _name, _type, _desc in self._consume_fields(parse_type=False):
lines.append('.. method:: %s' % _name)
if _desc:
@ -682,16 +681,16 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_notes_section(self, section):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
use_admonition = self._config.napoleon_use_admonition_for_notes
return self._parse_generic_section(_('Notes'), use_admonition)
def _parse_other_parameters_section(self, section):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
return self._format_fields(_('Other Parameters'), self._consume_fields())
def _parse_parameters_section(self, section):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
fields = self._consume_fields()
if self._config.napoleon_use_param:
return self._format_docutils_params(fields)
@ -699,9 +698,9 @@ class GoogleDocstring(UnicodeMixin):
return self._format_fields(_('Parameters'), fields)
def _parse_raises_section(self, section):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
fields = self._consume_fields(parse_type=False, prefer_type=True)
lines = [] # type: List[unicode]
lines = [] # type: List[str]
for _name, _type, _desc in fields:
m = self._name_rgx.match(_type).groupdict()
if m['role']:
@ -715,12 +714,12 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_references_section(self, section):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
use_admonition = self._config.napoleon_use_admonition_for_references
return self._parse_generic_section(_('References'), use_admonition)
def _parse_returns_section(self, section):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
fields = self._consume_returns_section()
multi = len(fields) > 1
if multi:
@ -728,7 +727,7 @@ class GoogleDocstring(UnicodeMixin):
else:
use_rtype = self._config.napoleon_use_rtype
lines = [] # type: List[unicode]
lines = [] # type: List[str]
for _name, _type, _desc in fields:
if use_rtype:
field = self._format_field(_name, '', _desc)
@ -749,23 +748,23 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_see_also_section(self, section):
# type (unicode) -> List[unicode]
# type (str) -> List[str]
return self._parse_admonition('seealso', section)
def _parse_warns_section(self, section):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
return self._format_fields(_('Warns'), self._consume_fields())
def _parse_yields_section(self, section):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
fields = self._consume_returns_section()
return self._format_fields(_('Yields'), fields)
def _partition_field_on_colon(self, line):
# type: (unicode) -> Tuple[unicode, unicode, unicode]
# type: (str) -> Tuple[str, str, str]
before_colon = []
after_colon = []
colon = '' # type: unicode
colon = ''
found_colon = False
for i, source in enumerate(_xref_regex.split(line)):
if found_colon:
@ -785,7 +784,7 @@ class GoogleDocstring(UnicodeMixin):
"".join(after_colon).strip())
def _qualify_name(self, attr_name, klass):
# type: (unicode, Type) -> unicode
# type: (str, Type) -> str
if klass and '.' not in attr_name:
if attr_name.startswith('~'):
attr_name = attr_name[1:]
@ -797,7 +796,7 @@ class GoogleDocstring(UnicodeMixin):
return attr_name
def _strip_empty(self, lines):
# type: (List[unicode]) -> List[unicode]
# type: (List[str]) -> List[str]
if lines:
start = -1
for i, line in enumerate(lines):
@ -912,13 +911,13 @@ class NumpyDocstring(GoogleDocstring):
"""
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
# type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA
# type: (Union[str, List[str]], SphinxConfig, Sphinx, str, str, Any, Any) -> None
self._directive_sections = ['.. index::']
super(NumpyDocstring, self).__init__(docstring, config, app, what,
name, obj, options)
def _consume_field(self, parse_type=True, prefer_type=False):
# type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
# type: (bool, bool) -> Tuple[str, str, List[str]]
line = next(self._line_iter)
if parse_type:
_name, _, _type = self._partition_field_on_colon(line)
@ -935,11 +934,11 @@ class NumpyDocstring(GoogleDocstring):
return _name, _type, _desc
def _consume_returns_section(self):
# type: () -> List[Tuple[unicode, unicode, List[unicode]]]
# type: () -> List[Tuple[str, str, List[str]]]
return self._consume_fields(prefer_type=True)
def _consume_section_header(self):
# type: () -> unicode
# type: () -> str
section = next(self._line_iter)
if not _directive_regex.match(section):
# Consume the header underline
@ -970,7 +969,7 @@ class NumpyDocstring(GoogleDocstring):
return False
def _parse_see_also_section(self, section):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
lines = self._consume_to_next_section()
try:
return self._parse_numpydoc_see_also_section(lines)
@ -978,7 +977,7 @@ class NumpyDocstring(GoogleDocstring):
return self._format_admonition('seealso', lines)
def _parse_numpydoc_see_also_section(self, content):
# type: (List[unicode]) -> List[unicode]
# type: (List[str]) -> List[str]
"""
Derived from the NumpyDoc implementation of _parse_see_also.
@ -993,7 +992,7 @@ class NumpyDocstring(GoogleDocstring):
items = []
def parse_item_name(text):
# type: (unicode) -> Tuple[unicode, unicode]
# type: (str) -> Tuple[str, str]
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
@ -1005,7 +1004,7 @@ class NumpyDocstring(GoogleDocstring):
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
# type: (unicode, List[unicode]) -> None
# type: (str, List[str]) -> None
if not name:
return
name, role = parse_item_name(name)
@ -1013,7 +1012,7 @@ class NumpyDocstring(GoogleDocstring):
del rest[:]
current_func = None
rest = [] # type: List[unicode]
rest = [] # type: List[str]
for line in content:
if not line.strip():
@ -1059,12 +1058,12 @@ class NumpyDocstring(GoogleDocstring):
'const': 'const',
'attribute': 'attr',
'attr': 'attr'
} # type: Dict[unicode, unicode]
}
if self._what is None:
func_role = 'obj' # type: unicode
func_role = 'obj'
else:
func_role = roles.get(self._what, '')
lines = [] # type: List[unicode]
lines = [] # type: List[str]
last_had_desc = True
for func, desc, role in items:
if role:

View File

@ -31,7 +31,6 @@ if False:
from typing import Any, Dict, Iterable, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
from sphinx.writers.html import HTMLTranslator # NOQA
from sphinx.writers.latex import LaTeXTranslator # NOQA
@ -133,7 +132,7 @@ class TodoList(SphinxDirective):
def process_todo_nodes(app, doctree, fromdocname):
# type: (Sphinx, nodes.document, unicode) -> None
# type: (Sphinx, nodes.document, str) -> None
node = None # type: nodes.Element
if not app.config['todo_include_todos']:
for node in doctree.traverse(todo_node):
@ -201,7 +200,7 @@ def process_todo_nodes(app, doctree, fromdocname):
def purge_todos(app, env, docname):
# type: (Sphinx, BuildEnvironment, unicode) -> None
# type: (Sphinx, BuildEnvironment, str) -> None
if not hasattr(env, 'todo_all_todos'):
return
env.todo_all_todos = [todo for todo in env.todo_all_todos # type: ignore
@ -209,7 +208,7 @@ def purge_todos(app, env, docname):
def merge_info(app, env, docnames, other):
# type: (Sphinx, BuildEnvironment, Iterable[unicode], BuildEnvironment) -> None
# type: (Sphinx, BuildEnvironment, Iterable[str], BuildEnvironment) -> None
if not hasattr(other, 'todo_all_todos'):
return
if not hasattr(env, 'todo_all_todos'):
@ -247,7 +246,7 @@ def latex_depart_todo_node(self, node):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_event('todo-defined')
app.add_config_value('todo_include_todos', False, 'html')
app.add_config_value('todo_link_only', False, 'html')

View File

@ -29,13 +29,12 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
def _get_full_modname(app, modname, attribute):
# type: (Sphinx, str, unicode) -> unicode
# type: (Sphinx, str, str) -> str
try:
return get_full_modname(modname, attribute)
except AttributeError:
@ -97,7 +96,7 @@ def doctree_read(app, doctree):
for objnode in doctree.traverse(addnodes.desc):
if objnode.get('domain') != 'py':
continue
names = set() # type: Set[unicode]
names = set() # type: Set[str]
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
@ -130,7 +129,7 @@ def doctree_read(app, doctree):
def env_merge_info(app, env, docnames, other):
# type: (Sphinx, BuildEnvironment, Iterable[unicode], BuildEnvironment) -> None
# type: (Sphinx, BuildEnvironment, Iterable[str], BuildEnvironment) -> None
if not hasattr(other, '_viewcode_modules'):
return
# create a _viewcode_modules dict on the main environment
@ -151,7 +150,7 @@ def missing_reference(app, env, node, contnode):
def collect_pages(app):
# type: (Sphinx) -> Iterator[Tuple[unicode, Dict[unicode, Any], unicode]]
# type: (Sphinx) -> Iterator[Tuple[str, Dict[str, Any], str]]
env = app.builder.env
if not hasattr(env, '_viewcode_modules'):
return
@ -216,7 +215,7 @@ def collect_pages(app):
'title': modname,
'body': (_('<h1>Source code for %s</h1>') % modname +
'\n'.join(lines)),
} # type: Dict[unicode, Any]
}
yield (pagename, context, 'page.html')
if not modnames:
@ -257,7 +256,7 @@ def migrate_viewcode_import(app, config):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_config_value('viewcode_import', None, False)
app.add_config_value('viewcode_enable_epub', False, False)
app.add_config_value('viewcode_follow_imported_members', True, False)

View File

@ -18,14 +18,13 @@ if False:
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
class Extension:
def __init__(self, name, module, **kwargs):
# type: (unicode, Any, Any) -> None
# type: (str, Any, Any) -> None
self.name = name
self.module = module
self.metadata = kwargs
@ -63,7 +62,7 @@ def verify_needs_extensions(app, config):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.connect('config-inited', verify_needs_extensions)
return {

View File

@ -34,7 +34,6 @@ if False:
# For type annotation
from typing import Any, Dict # NOQA
from pygments.formatter import Formatter # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -47,7 +46,7 @@ lexers = {
'pycon3': PythonConsoleLexer(python3=True, stripnl=False),
'rest': RstLexer(stripnl=False),
'c': CLexer(stripnl=False),
} # type: Dict[unicode, Lexer]
} # type: Dict[str, Lexer]
for _lexer in lexers.values():
_lexer.add_filter('raiseonerror')
@ -70,7 +69,7 @@ class PygmentsBridge:
latex_formatter = LatexFormatter
def __init__(self, dest='html', stylename='sphinx', trim_doctest_flags=None):
# type: (unicode, unicode, bool) -> None
# type: (str, str, bool) -> None
self.dest = dest
if stylename is None or stylename == 'sphinx':
style = SphinxStyle
@ -82,7 +81,7 @@ class PygmentsBridge:
stylename)
else:
style = get_style_by_name(stylename)
self.formatter_args = {'style': style} # type: Dict[unicode, Any]
self.formatter_args = {'style': style} # type: Dict[str, Any]
if dest == 'html':
self.formatter = self.html_formatter
else:
@ -100,7 +99,7 @@ class PygmentsBridge:
return self.formatter(**kwargs)
def unhighlighted(self, source):
# type: (unicode) -> unicode
# type: (str) -> str
warnings.warn('PygmentsBridge.unhighlighted() is now deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
if self.dest == 'html':
@ -114,7 +113,7 @@ class PygmentsBridge:
source + '\\end{Verbatim}\n'
def highlight_block(self, source, lang, opts=None, location=None, force=False, **kwargs):
# type: (unicode, unicode, Any, Any, bool, Any) -> unicode
# type: (str, str, Any, Any, bool, Any) -> str
if not isinstance(source, text_type):
source = source.decode()
@ -174,7 +173,7 @@ class PygmentsBridge:
return hlsource.translate(tex_hl_escape_map_new)
def get_stylesheet(self):
# type: () -> unicode
# type: () -> str
formatter = self.get_formatter()
if self.dest == 'html':
return formatter.get_style_defs('.highlight')

View File

@ -48,7 +48,6 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
@ -170,7 +169,7 @@ class SphinxBaseFileInput(FileInput):
super(SphinxBaseFileInput, self).__init__(*args, **kwds)
def read(self):
# type: () -> unicode
# type: () -> str
"""Reads the contents from file.
After reading, it emits Sphinx event ``source-read``.
@ -213,7 +212,7 @@ class SphinxRSTFileInput(SphinxBaseFileInput):
supported = ('restructuredtext',)
def prepend_prolog(self, text, prolog):
# type: (StringList, unicode) -> None
# type: (StringList, str) -> None
docinfo = self.count_docinfo_lines(text)
if docinfo:
# insert a blank line after docinfo
@ -227,7 +226,7 @@ class SphinxRSTFileInput(SphinxBaseFileInput):
text.insert(docinfo + lineno + 1, '', '<generated>', 0)
def append_epilog(self, text, epilog):
# type: (StringList, unicode) -> None
# type: (StringList, str) -> None
# append a blank line and rst_epilog
text.append('', '<generated>', 0)
for lineno, line in enumerate(epilog.splitlines()):
@ -265,7 +264,7 @@ class FiletypeNotFoundError(Exception):
def get_filetype(source_suffix, filename):
# type: (Dict[unicode, unicode], unicode) -> unicode
# type: (Dict[str, str], str) -> str
for suffix, filetype in source_suffix.items():
if filename.endswith(suffix):
# If default filetype (None), considered as restructuredtext.
@ -275,7 +274,7 @@ def get_filetype(source_suffix, filename):
def read_doc(app, env, filename):
# type: (Sphinx, BuildEnvironment, unicode) -> nodes.document
# type: (Sphinx, BuildEnvironment, str) -> nodes.document
"""Parse a document and convert to doctree."""
# set up error_handler for the target document
error_handler = UnicodeDecodeErrorHandler(env.docname)
@ -307,7 +306,7 @@ def read_doc(app, env, filename):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.registry.add_source_input(SphinxFileInput)
return {

View File

@ -28,18 +28,17 @@ if False:
from jinja2.environment import Environment # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.theming import Theme # NOQA
from sphinx.util.typing import unicode # NOQA
def _tobool(val):
# type: (unicode) -> bool
# type: (str) -> bool
if isinstance(val, str):
return val.lower() in ('true', '1', 'yes', 'on')
return bool(val)
def _toint(val):
# type: (unicode) -> int
# type: (str) -> int
try:
return int(val)
except ValueError:
@ -47,7 +46,7 @@ def _toint(val):
def _todim(val):
# type: (Union[int, unicode]) -> unicode
# type: (Union[int, str]) -> str
"""
Make val a css dimension. In particular the following transformations
are performed:
@ -88,7 +87,7 @@ def _slice_index(values, slices):
def accesskey(context, key):
# type: (Any, unicode) -> unicode
# type: (Any, str) -> str
"""Helper to output each access key only once."""
if '_accesskeys' not in context:
context.vars['_accesskeys'] = {}
@ -116,7 +115,7 @@ class idgen:
@contextfunction
def warning(context, message, *args, **kwargs):
# type: (Dict, unicode, Any, Any) -> unicode
# type: (Dict, str, Any, Any) -> str
if 'pagename' in context:
filename = context.get('pagename') + context.get('file_suffix', '')
message = 'in rendering %s: %s' % (filename, message)
@ -132,7 +131,7 @@ class SphinxFileSystemLoader(FileSystemLoader):
"""
def get_source(self, environment, template):
# type: (Environment, unicode) -> Tuple[unicode, unicode, Callable]
# type: (Environment, str) -> Tuple[str, str, Callable]
for searchpath in self.searchpath:
filename = path.join(searchpath, template)
f = open_if_exists(filename)
@ -161,7 +160,7 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
# TemplateBridge interface
def init(self, builder, theme=None, dirs=None):
# type: (Builder, Theme, List[unicode]) -> None
# type: (Builder, Theme, List[str]) -> None
# create a chain of paths to search
if theme:
# the theme's own dir and its bases' dirs
@ -205,11 +204,11 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
self.environment.install_gettext_translations(builder.app.translator) # type: ignore # NOQA
def render(self, template, context): # type: ignore
# type: (unicode, Dict) -> unicode
# type: (str, Dict) -> str
return self.environment.get_template(template).render(context)
def render_string(self, source, context):
# type: (unicode, Dict) -> unicode
# type: (str, Dict) -> str
return self.environment.from_string(source).render(context)
def newest_template_mtime(self):
@ -219,7 +218,7 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
# Loader interface
def get_source(self, environment, template):
# type: (Environment, unicode) -> Tuple[unicode, unicode, Callable]
# type: (Environment, str) -> Tuple[str, str, Callable]
loaders = self.loaders
# exclamation mark starts search from theme
if template.startswith('!'):

View File

@ -12,18 +12,16 @@
import gettext
import locale
import warnings
from collections import defaultdict
from collections import UserString, defaultdict
from gettext import NullTranslations
from six import text_type
from six.moves import UserString
from sphinx.deprecation import RemovedInSphinx30Warning
if False:
# For type annotation
from typing import Any, Callable, Dict, Iterator, List, Tuple # NOQA
from sphinx.util.typing import unicode # NOQA
class _TranslationProxy(UserString):
@ -41,7 +39,7 @@ class _TranslationProxy(UserString):
__slots__ = ('_func', '_args')
def __new__(cls, func, *args):
# type: (Callable, unicode) -> object
# type: (Callable, str) -> object
if not args:
# not called with "function" and "arguments", but a plain string
return text_type(func)
@ -52,20 +50,20 @@ class _TranslationProxy(UserString):
return (self._func,) + self._args # type: ignore
def __init__(self, func, *args):
# type: (Callable, unicode) -> None
# type: (Callable, str) -> None
self._func = func
self._args = args
@property
def data(self): # type: ignore
# type: () -> unicode
# type: () -> str
return self._func(*self._args)
# replace function from UserString; it instantiates a self.__class__
# for the encoding result
def encode(self, encoding=None, errors=None): # type: ignore
# type: (unicode, unicode) -> bytes
# type: (str, str) -> bytes
if encoding:
if errors:
return self.data.encode(encoding, errors)
@ -83,45 +81,45 @@ class _TranslationProxy(UserString):
return str(self.data)
def __unicode__(self):
# type: () -> unicode
# type: () -> str
return text_type(self.data)
def __add__(self, other): # type: ignore
# type: (unicode) -> unicode
# type: (str) -> str
return self.data + other
def __radd__(self, other):
# type: (unicode) -> unicode
# type: (str) -> str
return other + self.data
def __mod__(self, other): # type: ignore
# type: (unicode) -> unicode
# type: (str) -> str
return self.data % other
def __rmod__(self, other):
# type: (unicode) -> unicode
# type: (str) -> str
return other % self.data
def __mul__(self, other): # type: ignore
# type: (Any) -> unicode
# type: (Any) -> str
return self.data * other
def __rmul__(self, other):
# type: (Any) -> unicode
# type: (Any) -> str
return other * self.data
def __getattr__(self, name):
# type: (unicode) -> Any
# type: (str) -> Any
if name == '__members__':
return self.__dir__()
return getattr(self.data, name)
def __getstate__(self):
# type: () -> Tuple[Callable, Tuple[unicode, ...]]
# type: () -> Tuple[Callable, Tuple[str, ...]]
return self._func, self._args
def __setstate__(self, tup):
# type: (Tuple[Callable, Tuple[unicode]]) -> None
# type: (Tuple[Callable, Tuple[str]]) -> None
self._func, self._args = tup
def __copy__(self):
@ -137,7 +135,7 @@ class _TranslationProxy(UserString):
def mygettext(string):
# type: (unicode) -> unicode
# type: (str) -> str
"""Used instead of _ when creating TranslationProxies, because _ is
not bound yet at that time.
"""
@ -147,7 +145,7 @@ def mygettext(string):
def lazy_gettext(string):
# type: (unicode) -> unicode
# type: (str) -> str
"""A lazy version of `gettext`."""
# if isinstance(string, _TranslationProxy):
# return string
@ -156,11 +154,11 @@ def lazy_gettext(string):
return _TranslationProxy(mygettext, string) # type: ignore
translators = defaultdict(NullTranslations) # type: Dict[Tuple[unicode, unicode], NullTranslations] # NOQA
translators = defaultdict(NullTranslations) # type: Dict[Tuple[str, str], NullTranslations]
def init(locale_dirs, language, catalog='sphinx', namespace='general'):
# type: (List[unicode], unicode, unicode, unicode) -> Tuple[NullTranslations, bool]
# type: (List[str], str, str, str) -> Tuple[NullTranslations, bool]
"""Look for message catalogs in `locale_dirs` and *ensure* that there is at
least a NullTranslations catalog set in `translators`. If called multiple
times or if several ``.mo`` files are found, their contents are merged
@ -202,7 +200,7 @@ def init(locale_dirs, language, catalog='sphinx', namespace='general'):
def init_console(locale_dir, catalog):
# type: (unicode, unicode) -> Tuple[NullTranslations, bool]
# type: (str, str) -> Tuple[NullTranslations, bool]
"""Initialize locale for console.
.. versionadded:: 1.8
@ -218,17 +216,17 @@ def init_console(locale_dir, catalog):
def get_translator(catalog='sphinx', namespace='general'):
# type: (unicode, unicode) -> NullTranslations
# type: (str, str) -> NullTranslations
return translators[(namespace, catalog)]
def is_translator_registered(catalog='sphinx', namespace='general'):
# type: (unicode, unicode) -> bool
# type: (str, str) -> bool
return (namespace, catalog) in translators
def _lazy_translate(catalog, namespace, message):
# type: (unicode, unicode, unicode) -> unicode
# type: (str, str, str) -> str
"""Used instead of _ when creating TranslationProxy, because _ is
not bound yet at that time.
"""
@ -261,7 +259,7 @@ def get_translation(catalog, namespace='general'):
.. versionadded:: 1.8
"""
def gettext(message, *args):
# type: (unicode, *Any) -> unicode
# type: (str, *Any) -> str
if not is_translator_registered(catalog, namespace):
# not initialized yet
return _TranslationProxy(_lazy_translate, catalog, namespace, message) # type: ignore # NOQA
@ -302,10 +300,10 @@ admonitionlabels = {
'seealso': _('See also'),
'tip': _('Tip'),
'warning': _('Warning'),
} # type: Dict[unicode, unicode]
}
# Moved to sphinx.directives.other (will be overrided later)
versionlabels = {} # type: Dict[unicode, unicode]
versionlabels = {} # type: Dict[str, str]
# Moved to sphinx.domains.python (will be overrided later)
pairindextypes = {} # type: Dict[unicode, unicode]
pairindextypes = {} # type: Dict[str, str]

View File

@ -23,7 +23,6 @@ if False:
from docutils import nodes # NOQA
from docutils.transforms import Transform # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.typing import unicode # NOQA
class Parser(docutils.parsers.Parser):
@ -104,7 +103,7 @@ class RSTParser(docutils.parsers.rst.Parser, Parser):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.add_source_parser(RSTParser)
return {

View File

@ -20,17 +20,16 @@ from sphinx.util.osutil import SEP, relpath
if TYPE_CHECKING:
from typing import Dict, List, Set # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
EXCLUDE_PATHS = ['**/_sources', '.#*', '**/.#*', '*.lproj/**'] # type: List[unicode]
EXCLUDE_PATHS = ['**/_sources', '.#*', '**/.#*', '*.lproj/**']
class Project(object):
"""A project is source code set of Sphinx document."""
def __init__(self, srcdir, source_suffix):
# type: (unicode, Dict[unicode, unicode]) -> None
# type: (str, Dict[str, str]) -> None
#: Source directory.
self.srcdir = srcdir
@ -38,7 +37,7 @@ class Project(object):
self.source_suffix = source_suffix
#: The name of documents belongs to this project.
self.docnames = set() # type: Set[unicode]
self.docnames = set() # type: Set[str]
def restore(self, other):
# type: (Project) -> None
@ -46,7 +45,7 @@ class Project(object):
self.docnames = other.docnames
def discover(self, exclude_paths=[]):
# type: (List[unicode]) -> Set[unicode]
# type: (List[str]) -> Set[str]
"""Find all document files in the source directory and put them in
:attr:`docnames`.
"""
@ -63,7 +62,7 @@ class Project(object):
return self.docnames
def path2doc(self, filename):
# type: (unicode) -> unicode
# type: (str) -> str
"""Return the docname for the filename if the file is document.
*filename* should be absolute or relative to the source directory.
@ -78,7 +77,7 @@ class Project(object):
return None
def doc2path(self, docname, basedir=True):
# type: (unicode, bool) -> unicode
# type: (str, bool) -> str
"""Return the filename for the document name.
If *basedir* is True, return as an absolute path.

View File

@ -8,14 +8,11 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import re
from io import BytesIO
from io import BytesIO, StringIO
from zipfile import ZipFile
from six import StringIO
from sphinx.errors import PycodeError
from sphinx.pycode.parser import Parser
from sphinx.util import get_module_source, detect_encoding
@ -23,23 +20,22 @@ from sphinx.util import get_module_source, detect_encoding
if False:
# For type annotation
from typing import Any, Dict, IO, List, Tuple # NOQA
from sphinx.util.typing import unicode # NOQA
class ModuleAnalyzer:
# cache for analyzer objects -- caches both by module and file name
cache = {} # type: Dict[Tuple[unicode, unicode], Any]
cache = {} # type: Dict[Tuple[str, str], Any]
@classmethod
def for_string(cls, string, modname, srcname='<string>'):
# type: (unicode, unicode, unicode) -> ModuleAnalyzer
# type: (str, str, str) -> ModuleAnalyzer
if isinstance(string, bytes):
return cls(BytesIO(string), modname, srcname)
return cls(StringIO(string), modname, srcname, decoded=True)
@classmethod
def for_file(cls, filename, modname):
# type: (unicode, unicode) -> ModuleAnalyzer
# type: (str, str) -> ModuleAnalyzer
if ('file', filename) in cls.cache:
return cls.cache['file', filename]
try:
@ -55,7 +51,7 @@ class ModuleAnalyzer:
@classmethod
def for_egg(cls, filename, modname):
# type: (unicode, unicode) -> ModuleAnalyzer
# type: (str, str) -> ModuleAnalyzer
eggpath, relpath = re.split('(?<=\\.egg)/', filename)
try:
with ZipFile(eggpath) as egg:
@ -86,7 +82,7 @@ class ModuleAnalyzer:
return obj
def __init__(self, source, modname, srcname, decoded=False):
# type: (IO, unicode, unicode, bool) -> None
# type: (IO, str, str, bool) -> None
self.modname = modname # name of the module
self.srcname = srcname # name of the source file
@ -101,9 +97,9 @@ class ModuleAnalyzer:
self.code = source.read()
# will be filled by parse()
self.attr_docs = None # type: Dict[Tuple[unicode, unicode], List[unicode]]
self.tagorder = None # type: Dict[unicode, int]
self.tags = None # type: Dict[unicode, Tuple[unicode, int, int]]
self.attr_docs = None # type: Dict[Tuple[str, str], List[str]]
self.tagorder = None # type: Dict[str, int]
self.tags = None # type: Dict[str, Tuple[str, int, int]]
def parse(self):
# type: () -> None
@ -125,7 +121,7 @@ class ModuleAnalyzer:
raise PycodeError('parsing %r failed: %r' % (self.srcname, exc))
def find_attr_docs(self):
# type: () -> Dict[Tuple[unicode, unicode], List[unicode]]
# type: () -> Dict[Tuple[str, str], List[str]]
"""Find class and module-level attributes and their documentation."""
if self.attr_docs is None:
self.parse()
@ -133,7 +129,7 @@ class ModuleAnalyzer:
return self.attr_docs
def find_tags(self):
# type: () -> Dict[unicode, Tuple[unicode, int, int]]
# type: () -> Dict[str, Tuple[str, int, int]]
"""Find class, function and method definitions and their location."""
if self.tags is None:
self.parse()

View File

@ -22,7 +22,6 @@ from six import text_type
if False:
# For type annotation
from typing import Any, Dict, IO, List, Tuple # NOQA
from sphinx.util.typing import unicode # NOQA
comment_re = re.compile(u'^\\s*#: ?(.*)\r?\n?$')
indent_re = re.compile(u'^\\s*$')
@ -36,7 +35,7 @@ else:
def filter_whitespace(code):
# type: (unicode) -> unicode
# type: (str) -> str
return code.replace('\f', ' ') # replace FF (form feed) with whitespace
@ -50,7 +49,7 @@ def get_assign_targets(node):
def get_lvar_names(node, self=None):
# type: (ast.AST, ast.arg) -> List[unicode]
# type: (ast.AST, ast.arg) -> List[str]
"""Convert assignment-AST to variable names.
This raises `TypeError` if the assignment does not create new variable::
@ -93,7 +92,7 @@ def get_lvar_names(node, self=None):
def dedent_docstring(s):
# type: (unicode) -> unicode
# type: (str) -> str
"""Remove common leading indentation from docstring."""
def dummy():
# type: () -> None
@ -109,7 +108,7 @@ class Token:
"""Better token wrapper for tokenize module."""
def __init__(self, kind, value, start, end, source):
# type: (int, Any, Tuple[int, int], Tuple[int, int], unicode) -> None # NOQA
# type: (int, Any, Tuple[int, int], Tuple[int, int], str) -> None
self.kind = kind
self.value = value
self.start = start
@ -141,7 +140,7 @@ class Token:
class TokenProcessor:
def __init__(self, buffers):
# type: (List[unicode]) -> None
# type: (List[str]) -> None
lines = iter(buffers)
self.buffers = buffers
self.tokens = tokenize.generate_tokens(lambda: next(lines))
@ -149,7 +148,7 @@ class TokenProcessor:
self.previous = None # type: Token
def get_line(self, lineno):
# type: (int) -> unicode
# type: (int) -> str
"""Returns specified line."""
return self.buffers[lineno - 1]
@ -196,9 +195,9 @@ class AfterCommentParser(TokenProcessor):
"""
def __init__(self, lines):
# type: (List[unicode]) -> None
# type: (List[str]) -> None
super(AfterCommentParser, self).__init__(lines)
self.comment = None # type: unicode
self.comment = None # type: str
def fetch_rvalue(self):
# type: () -> List[Token]
@ -240,20 +239,20 @@ class VariableCommentPicker(ast.NodeVisitor):
"""Python source code parser to pick up variable comments."""
def __init__(self, buffers, encoding):
# type: (List[unicode], unicode) -> None
# type: (List[str], str) -> None
self.counter = itertools.count()
self.buffers = buffers
self.encoding = encoding
self.context = [] # type: List[unicode]
self.current_classes = [] # type: List[unicode]
self.context = [] # type: List[str]
self.current_classes = [] # type: List[str]
self.current_function = None # type: ast.FunctionDef
self.comments = {} # type: Dict[Tuple[unicode, unicode], unicode]
self.comments = {} # type: Dict[Tuple[str, str], str]
self.previous = None # type: ast.AST
self.deforders = {} # type: Dict[unicode, int]
self.deforders = {} # type: Dict[str, int]
super(VariableCommentPicker, self).__init__()
def add_entry(self, name):
# type: (unicode) -> None
# type: (str) -> None
if self.current_function:
if self.current_classes and self.context[-1] == "__init__":
# store variable comments inside __init__ method of classes
@ -266,7 +265,7 @@ class VariableCommentPicker(ast.NodeVisitor):
self.deforders[".".join(definition)] = next(self.counter)
def add_variable_comment(self, name, comment):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
if self.current_function:
if self.current_classes and self.context[-1] == "__init__":
# store variable comments inside __init__ method of classes
@ -287,7 +286,7 @@ class VariableCommentPicker(ast.NodeVisitor):
return None
def get_line(self, lineno):
# type: (int) -> unicode
# type: (int) -> str
"""Returns specified line."""
return self.buffers[lineno - 1]
@ -388,15 +387,15 @@ class VariableCommentPicker(ast.NodeVisitor):
class DefinitionFinder(TokenProcessor):
def __init__(self, lines):
# type: (List[unicode]) -> None
# type: (List[str]) -> None
super(DefinitionFinder, self).__init__(lines)
self.decorator = None # type: Token
self.context = [] # type: List[unicode]
self.context = [] # type: List[str]
self.indents = [] # type: List
self.definitions = {} # type: Dict[unicode, Tuple[unicode, int, int]]
self.definitions = {} # type: Dict[str, Tuple[str, int, int]]
def add_definition(self, name, entry):
# type: (unicode, Tuple[unicode, int, int]) -> None
# type: (str, Tuple[str, int, int]) -> None
if self.indents and self.indents[-1][0] == 'def' and entry[0] == 'def':
# ignore definition of inner function
pass
@ -425,7 +424,7 @@ class DefinitionFinder(TokenProcessor):
self.finalize_block()
def parse_definition(self, typ):
# type: (unicode) -> None
# type: (str) -> None
name = self.fetch_token()
self.context.append(name.value)
funcname = '.'.join(self.context)
@ -465,12 +464,12 @@ class Parser:
"""
def __init__(self, code, encoding='utf-8'):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
self.code = filter_whitespace(code)
self.encoding = encoding
self.comments = {} # type: Dict[Tuple[unicode, unicode], unicode]
self.deforders = {} # type: Dict[unicode, int]
self.definitions = {} # type: Dict[unicode, Tuple[unicode, int, int]]
self.comments = {} # type: Dict[Tuple[str, str], str]
self.deforders = {} # type: Dict[str, int]
self.definitions = {} # type: Dict[str, Tuple[str, int, int]]
def parse(self):
# type: () -> None

View File

@ -8,7 +8,6 @@
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import traceback
import warnings
@ -44,7 +43,7 @@ if False:
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
from sphinx.io import SphinxFileInput # NOQA
from sphinx.util.typing import RoleFunction, TitleGetter, unicode # NOQA
from sphinx.util.typing import RoleFunction, TitleGetter # NOQA
logger = logging.getLogger(__name__)
@ -52,76 +51,76 @@ logger = logging.getLogger(__name__)
# Values are Sphinx version that merge the extension.
EXTENSION_BLACKLIST = {
"sphinxjp.themecore": "1.2"
} # type: Dict[unicode, unicode]
}
class SphinxComponentRegistry:
def __init__(self):
# type: () -> None
#: special attrgetter for autodoc; class object -> attrgetter
self.autodoc_attrgettrs = {} # type: Dict[Type, Callable[[Any, unicode, Any], Any]]
self.autodoc_attrgettrs = {} # type: Dict[Type, Callable[[Any, str, Any], Any]]
#: builders; a dict of builder name -> bulider class
self.builders = {} # type: Dict[unicode, Type[Builder]]
self.builders = {} # type: Dict[str, Type[Builder]]
#: autodoc documenters; a dict of documenter name -> documenter class
self.documenters = {} # type: Dict[unicode, Type[Documenter]]
self.documenters = {} # type: Dict[str, Type[Documenter]]
#: css_files; a list of tuple of filename and attributes
self.css_files = [] # type: List[Tuple[unicode, Dict[unicode, unicode]]]
self.css_files = [] # type: List[Tuple[str, Dict[str, str]]]
#: domains; a dict of domain name -> domain class
self.domains = {} # type: Dict[unicode, Type[Domain]]
self.domains = {} # type: Dict[str, Type[Domain]]
#: additional directives for domains
#: a dict of domain name -> dict of directive name -> directive
self.domain_directives = {} # type: Dict[unicode, Dict[unicode, Any]]
self.domain_directives = {} # type: Dict[str, Dict[str, Any]]
#: additional indices for domains
#: a dict of domain name -> list of index class
self.domain_indices = {} # type: Dict[unicode, List[Type[Index]]]
self.domain_indices = {} # type: Dict[str, List[Type[Index]]]
#: additional object types for domains
#: a dict of domain name -> dict of objtype name -> objtype
self.domain_object_types = {} # type: Dict[unicode, Dict[unicode, ObjType]]
self.domain_object_types = {} # type: Dict[str, Dict[str, ObjType]]
#: additional roles for domains
#: a dict of domain name -> dict of role name -> role impl.
self.domain_roles = {} # type: Dict[unicode, Dict[unicode, Union[RoleFunction, XRefRole]]] # NOQA
self.domain_roles = {} # type: Dict[str, Dict[str, Union[RoleFunction, XRefRole]]] # NOQA
#: additional enumerable nodes
#: a dict of node class -> tuple of figtype and title_getter function
self.enumerable_nodes = {} # type: Dict[Type[nodes.Node], Tuple[unicode, TitleGetter]] # NOQA
self.enumerable_nodes = {} # type: Dict[Type[nodes.Node], Tuple[str, TitleGetter]]
#: HTML inline and block math renderers
#: a dict of name -> tuple of visit function and depart function
self.html_inline_math_renderers = {} # type: Dict[unicode, Tuple[Callable, Callable]] # NOQA
self.html_block_math_renderers = {} # type: Dict[unicode, Tuple[Callable, Callable]] # NOQA
self.html_inline_math_renderers = {} # type: Dict[str, Tuple[Callable, Callable]]
self.html_block_math_renderers = {} # type: Dict[str, Tuple[Callable, Callable]]
#: js_files; list of JS paths or URLs
self.js_files = [] # type: List[Tuple[unicode, Dict[unicode, unicode]]]
self.js_files = [] # type: List[Tuple[str, Dict[str, str]]]
#: LaTeX packages; list of package names and its options
self.latex_packages = [] # type: List[Tuple[unicode, unicode]]
self.latex_packages = [] # type: List[Tuple[str, str]]
#: post transforms; list of transforms
self.post_transforms = [] # type: List[Type[Transform]]
#: source paresrs; file type -> parser class
self.source_parsers = {} # type: Dict[unicode, Type[Parser]]
self.source_parsers = {} # type: Dict[str, Type[Parser]]
#: source inputs; file type -> input class
self.source_inputs = {} # type: Dict[unicode, Type[Input]]
self.source_inputs = {} # type: Dict[str, Type[Input]]
#: source suffix: suffix -> file type
self.source_suffix = {} # type: Dict[unicode, unicode]
self.source_suffix = {} # type: Dict[str, str]
#: custom translators; builder name -> translator class
self.translators = {} # type: Dict[unicode, Type[nodes.NodeVisitor]]
self.translators = {} # type: Dict[str, Type[nodes.NodeVisitor]]
#: custom handlers for translators
#: a dict of builder name -> dict of node name -> visitor and departure functions
self.translation_handlers = {} # type: Dict[unicode, Dict[unicode, Tuple[Callable, Callable]]] # NOQA
self.translation_handlers = {} # type: Dict[str, Dict[str, Tuple[Callable, Callable]]]
#: additional transforms; list of transforms
self.transforms = [] # type: List[Type[Transform]]
@ -137,7 +136,7 @@ class SphinxComponentRegistry:
self.builders[builder.name] = builder
def preload_builder(self, app, name):
# type: (Sphinx, unicode) -> None
# type: (Sphinx, str) -> None
if name is None:
return
@ -152,7 +151,7 @@ class SphinxComponentRegistry:
self.load_extension(app, entry_point.module_name)
def create_builder(self, app, name):
# type: (Sphinx, unicode) -> Builder
# type: (Sphinx, str) -> Builder
if name not in self.builders:
raise SphinxError(__('Builder name %s not registered') % name)
@ -166,7 +165,7 @@ class SphinxComponentRegistry:
self.domains[domain.name] = domain
def has_domain(self, domain):
# type: (unicode) -> bool
# type: (str) -> bool
return domain in self.domains
def create_domains(self, env):
@ -192,7 +191,7 @@ class SphinxComponentRegistry:
def add_directive_to_domain(self, domain, name, obj, has_content=None, argument_spec=None,
override=False, **option_spec):
# type: (unicode, unicode, Any, bool, Any, bool, Any) -> None
# type: (str, str, Any, bool, Any, bool, Any) -> None
logger.debug('[app] adding directive to domain: %r',
(domain, name, obj, has_content, argument_spec, option_spec))
if domain not in self.domains:
@ -208,7 +207,7 @@ class SphinxComponentRegistry:
directives[name] = obj
def add_role_to_domain(self, domain, name, role, override=False):
# type: (unicode, unicode, Union[RoleFunction, XRefRole], bool) -> None
# type: (str, str, Union[RoleFunction, XRefRole], bool) -> None
logger.debug('[app] adding role to domain: %r', (domain, name, role))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
@ -219,7 +218,7 @@ class SphinxComponentRegistry:
roles[name] = role
def add_index_to_domain(self, domain, index, override=False):
# type: (unicode, Type[Index], bool) -> None
# type: (str, Type[Index], bool) -> None
logger.debug('[app] adding index to domain: %r', (domain, index))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
@ -232,7 +231,7 @@ class SphinxComponentRegistry:
def add_object_type(self, directivename, rolename, indextemplate='',
parse_node=None, ref_nodeclass=None, objname='',
doc_field_types=[], override=False):
# type: (unicode, unicode, unicode, Callable, Type[nodes.TextElement], unicode, List, bool) -> None # NOQA
# type: (str, str, str, Callable, Type[nodes.TextElement], str, List, bool) -> None
logger.debug('[app] adding object type: %r',
(directivename, rolename, indextemplate, parse_node,
ref_nodeclass, objname, doc_field_types))
@ -255,7 +254,7 @@ class SphinxComponentRegistry:
def add_crossref_type(self, directivename, rolename, indextemplate='',
ref_nodeclass=None, objname='', override=False):
# type: (unicode, unicode, unicode, Type[nodes.TextElement], unicode, bool) -> None
# type: (str, str, str, Type[nodes.TextElement], str, bool) -> None
logger.debug('[app] adding crossref type: %r',
(directivename, rolename, indextemplate, ref_nodeclass, objname))
@ -274,7 +273,7 @@ class SphinxComponentRegistry:
object_types[directivename] = ObjType(objname or directivename, rolename)
def add_source_suffix(self, suffix, filetype, override=False):
# type: (unicode, unicode, bool) -> None
# type: (str, str, bool) -> None
logger.debug('[app] adding source_suffix: %r, %r', suffix, filetype)
if suffix in self.source_suffix and not override:
raise ExtensionError(__('source_suffix %r is already registered') % suffix)
@ -286,7 +285,7 @@ class SphinxComponentRegistry:
logger.debug('[app] adding search source_parser: %r', args)
if len(args) == 1:
# new sytle arguments: (source_parser)
suffix = None # type: unicode
suffix = None # type: str
parser = args[0] # type: Type[Parser]
else:
# old style arguments: (suffix, source_parser)
@ -319,18 +318,18 @@ class SphinxComponentRegistry:
self.source_parsers[suffix] = parser
def get_source_parser(self, filetype):
# type: (unicode) -> Type[Parser]
# type: (str) -> Type[Parser]
try:
return self.source_parsers[filetype]
except KeyError:
raise SphinxError(__('Source parser for %s not registered') % filetype)
def get_source_parsers(self):
# type: () -> Dict[unicode, Type[Parser]]
# type: () -> Dict[str, Type[Parser]]
return self.source_parsers
def create_source_parser(self, app, filename):
# type: (Sphinx, unicode) -> Parser
# type: (Sphinx, str) -> Parser
parser_class = self.get_source_parser(filename)
parser = parser_class()
if isinstance(parser, SphinxParser):
@ -346,7 +345,7 @@ class SphinxComponentRegistry:
self.source_inputs[filetype] = input_class
def get_source_input(self, filetype):
# type: (unicode) -> Type[Input]
# type: (str) -> Type[Input]
try:
return self.source_inputs[filetype]
except KeyError:
@ -357,7 +356,7 @@ class SphinxComponentRegistry:
raise SphinxError(__('source_input for %s not registered') % filetype)
def add_translator(self, name, translator, override=False):
# type: (unicode, Type[nodes.NodeVisitor], bool) -> None
# type: (str, Type[nodes.NodeVisitor], bool) -> None
logger.debug('[app] Change of translator for the %s builder.' % name)
if name in self.translators and not override:
raise ExtensionError(__('Translator for %r already exists') % name)
@ -418,35 +417,35 @@ class SphinxComponentRegistry:
return self.post_transforms
def add_documenter(self, objtype, documenter):
# type: (unicode, Type[Documenter]) -> None
# type: (str, Type[Documenter]) -> None
self.documenters[objtype] = documenter
def add_autodoc_attrgetter(self, typ, attrgetter):
# type: (Type, Callable[[Any, unicode, Any], Any]) -> None
# type: (Type, Callable[[Any, str, Any], Any]) -> None
self.autodoc_attrgettrs[typ] = attrgetter
def add_css_files(self, filename, **attributes):
self.css_files.append((filename, attributes))
def add_js_file(self, filename, **attributes):
# type: (unicode, **unicode) -> None
# type: (str, **str) -> None
logger.debug('[app] adding js_file: %r, %r', filename, attributes)
self.js_files.append((filename, attributes))
def add_latex_package(self, name, options):
# type: (unicode, unicode) -> None
# type: (str, str) -> None
logger.debug('[app] adding latex package: %r', name)
self.latex_packages.append((name, options))
def add_enumerable_node(self, node, figtype, title_getter=None, override=False):
# type: (Type[nodes.Node], unicode, TitleGetter, bool) -> None
# type: (Type[nodes.Node], str, TitleGetter, bool) -> None
logger.debug('[app] adding enumerable node: (%r, %r, %r)', node, figtype, title_getter)
if node in self.enumerable_nodes and not override:
raise ExtensionError(__('enumerable_node %r already registered') % node)
self.enumerable_nodes[node] = (figtype, title_getter)
def add_html_math_renderer(self, name, inline_renderers, block_renderers):
# type: (unicode, Tuple[Callable, Callable], Tuple[Callable, Callable]) -> None
# type: (str, Tuple[Callable, Callable], Tuple[Callable, Callable]) -> None
logger.debug('[app] adding html_math_renderer: %s, %r, %r',
name, inline_renderers, block_renderers)
if name in self.html_inline_math_renderers:
@ -456,7 +455,7 @@ class SphinxComponentRegistry:
self.html_block_math_renderers[name] = block_renderers
def load_extension(self, app, extname):
# type: (Sphinx, unicode) -> None
# type: (Sphinx, str) -> None
"""Load a Sphinx extension."""
if extname in app.extensions: # alread loaded
return
@ -478,7 +477,7 @@ class SphinxComponentRegistry:
if not hasattr(mod, 'setup'):
logger.warning(__('extension %r has no setup() function; is it really '
'a Sphinx extension module?'), extname)
metadata = {} # type: Dict[unicode, Any]
metadata = {} # type: Dict[str, Any]
else:
try:
metadata = mod.setup(app)
@ -501,7 +500,7 @@ class SphinxComponentRegistry:
app.extensions[extname] = Extension(extname, mod, **metadata)
def get_envversion(self, app):
# type: (Sphinx) -> Dict[unicode, unicode]
# type: (Sphinx) -> Dict[str, str]
from sphinx.environment import ENV_VERSION
envversion = {ext.name: ext.metadata['env_version'] for ext in app.extensions.values()
if ext.metadata.get('env_version')}
@ -525,7 +524,7 @@ def merge_source_suffix(app, config):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
app.connect('config-inited', merge_source_suffix)
return {

View File

@ -26,7 +26,7 @@ if False:
from docutils.parsers.rst.states import Inliner # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import RoleFunction, unicode # NOQA
from sphinx.util.typing import RoleFunction # NOQA
generic_docroles = {
@ -84,7 +84,7 @@ class XRefRole:
self.innernodeclass = innernodeclass
def _fix_parens(self, env, has_explicit_title, title, target):
# type: (BuildEnvironment, bool, unicode, unicode) -> Tuple[unicode, unicode]
# type: (BuildEnvironment, bool, str, str) -> Tuple[str, str]
if not has_explicit_title:
if title.endswith('()'):
# remove parentheses
@ -99,7 +99,7 @@ class XRefRole:
def __call__(self, typ, rawtext, text, lineno, inliner,
options={}, content=[]):
# type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
# type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
env = inliner.document.settings.env
if not typ:
typ = env.temp_data.get('default_role')
@ -110,7 +110,7 @@ class XRefRole:
else:
typ = typ.lower()
if ':' not in typ:
domain, role = '', typ # type: unicode, unicode
domain, role = '', typ
classes = ['xref', role]
else:
domain, role = typ.split(':', 1)
@ -150,7 +150,7 @@ class XRefRole:
# methods that can be overwritten
def process_link(self, env, refnode, has_explicit_title, title, target):
# type: (BuildEnvironment, nodes.Element, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
# type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
"""Called after parsing title and target text, and creating the
reference node (given in *refnode*). This method can alter the
reference node and must return a new (or the same) ``(title, target)``
@ -170,7 +170,7 @@ class XRefRole:
class AnyXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
# type: (BuildEnvironment, nodes.Element, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
# type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
result = super(AnyXRefRole, self).process_link(env, refnode, has_explicit_title,
title, target)
# add all possible context info (i.e. std:program, py:module etc.)
@ -179,7 +179,7 @@ class AnyXRefRole(XRefRole):
def indexmarkup_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
# type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
# type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
"""Role for PEP/RFC references that generate an index entry."""
env = inliner.document.settings.env
if not typ:
@ -199,7 +199,7 @@ def indexmarkup_role(typ, rawtext, text, lineno, inliner, options={}, content=[]
indexnode['entries'] = [
('single', _('Python Enhancement Proposals; PEP %s') % target,
targetid, '', None)]
anchor = '' # type: unicode
anchor = ''
anchorindex = target.find('#')
if anchorindex > 0:
target, anchor = target[:anchorindex], target[anchorindex:]
@ -248,7 +248,7 @@ _amp_re = re.compile(r'(?<!&)&(?![&\s])')
def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
# type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
# type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
env = inliner.document.settings.env
if not typ:
assert env.temp_data['default_role']
@ -287,7 +287,7 @@ parens_re = re.compile(r'(\\*{|\\*})')
def emph_literal_role(typ, rawtext, text, lineno, inliner,
options={}, content=[]):
# type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
# type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
env = inliner.document.settings.env
if not typ:
assert env.temp_data['default_role']
@ -339,20 +339,20 @@ _abbr_re = re.compile(r'\((.*)\)$', re.S)
def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
# type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
# type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
text = utils.unescape(text)
m = _abbr_re.search(text)
if m is None:
return [addnodes.abbreviation(text, text, **options)], []
return [nodes.abbreviation(text, text, **options)], []
abbr = text[:m.start()].strip()
expl = m.group(1)
options = options.copy()
options['explanation'] = expl
return [addnodes.abbreviation(abbr, abbr, **options)], []
return [nodes.abbreviation(abbr, abbr, **options)], []
def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
# type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
# type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
# create new reference target
env = inliner.document.settings.env
targetid = 'index-%s' % env.new_serialno('index')
@ -398,7 +398,7 @@ specific_docroles = {
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
# type: (Sphinx) -> Dict[str, Any]
from docutils.parsers.rst import roles
for rolename, nodeclass in generic_docroles.items():

View File

@ -29,7 +29,6 @@ if False:
from typing import Any, Dict, IO, Iterable, List, Tuple, Type, Set # NOQA
from docutils import nodes # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import unicode # NOQA
class SearchLanguage:
@ -54,10 +53,10 @@ class SearchLanguage:
This class is used to preprocess search word which Sphinx HTML readers
type, before searching index. Default implementation does nothing.
"""
lang = None # type: unicode
language_name = None # type: unicode
stopwords = set() # type: Set[unicode]
js_stemmer_rawcode = None # type: unicode
lang = None # type: str
language_name = None # type: str
stopwords = set() # type: Set[str]
js_stemmer_rawcode = None # type: str
js_stemmer_code = """
/**
* Dummy stemmer for languages without stemming rules.
@ -67,7 +66,7 @@ var Stemmer = function() {
return w;
}
}
""" # type: unicode
"""
_word_re = re.compile(r'(?u)\w+')
@ -83,7 +82,7 @@ var Stemmer = function() {
"""
def split(self, input):
# type: (unicode) -> List[unicode]
# type: (str) -> List[str]
"""
This method splits a sentence into words. Default splitter splits input
at white spaces, which should be enough for most languages except CJK
@ -92,7 +91,7 @@ var Stemmer = function() {
return self._word_re.findall(input)
def stem(self, word):
# type: (unicode) -> unicode
# type: (str) -> str
"""
This method implements stemming algorithm of the Python version.
@ -106,7 +105,7 @@ var Stemmer = function() {
return word
def word_filter(self, word):
# type: (unicode) -> bool
# type: (str) -> bool
"""
Return true if the target word should be registered in the search index.
This method is called after stemming.
@ -124,13 +123,13 @@ from sphinx.search.en import SearchEnglish
def parse_stop_word(source):
# type: (unicode) -> Set[unicode]
# type: (str) -> Set[str]
"""
parse snowball style word list like this:
* http://snowball.tartarus.org/algorithms/finnish/stop.txt
"""
result = set() # type: Set[unicode]
result = set() # type: Set[str]
for line in source.splitlines():
line = line.split('|')[0] # remove comment
result.update(line.split())
@ -156,7 +155,7 @@ languages = {
'sv': 'sphinx.search.sv.SearchSwedish',
'tr': 'sphinx.search.tr.SearchTurkish',
'zh': 'sphinx.search.zh.SearchChinese',
} # type: Dict[unicode, Any]
} # type: Dict[str, Any]
class _JavaScriptIndex:
@ -169,7 +168,7 @@ class _JavaScriptIndex:
SUFFIX = ')'
def dumps(self, data):
# type: (Any) -> unicode
# type: (Any) -> str
return self.PREFIX + jsdump.dumps(data) + self.SUFFIX
def loads(self, s):
@ -200,8 +199,8 @@ class WordCollector(nodes.NodeVisitor):
def __init__(self, document, lang):
# type: (nodes.document, SearchLanguage) -> None
super(WordCollector, self).__init__(document)
self.found_words = [] # type: List[unicode]
self.found_title_words = [] # type: List[unicode]
self.found_words = [] # type: List[str]
self.found_title_words = [] # type: List[str]
self.lang = lang
def is_meta_keywords(self, node, nodetype=None):
@ -251,24 +250,24 @@ class IndexBuilder:
formats = {
'jsdump': jsdump,
'pickle': pickle
} # type: Dict[unicode, Any]
}
def __init__(self, env, lang, options, scoring):
# type: (BuildEnvironment, unicode, Dict, unicode) -> None
# type: (BuildEnvironment, str, Dict, str) -> None
self.env = env
self._titles = {} # type: Dict[unicode, unicode]
self._titles = {} # type: Dict[str, str]
# docname -> title
self._filenames = {} # type: Dict[unicode, unicode]
self._filenames = {} # type: Dict[str, str]
# docname -> filename
self._mapping = {} # type: Dict[unicode, Set[unicode]]
self._mapping = {} # type: Dict[str, Set[str]]
# stemmed word -> set(docname)
self._title_mapping = {} # type: Dict[unicode, Set[unicode]]
self._title_mapping = {} # type: Dict[str, Set[str]]
# stemmed words in titles -> set(docname)
self._stem_cache = {} # type: Dict[unicode, unicode]
self._stem_cache = {} # type: Dict[str, str]
# word -> stemmed word
self._objtypes = {} # type: Dict[Tuple[unicode, unicode], int]
self._objtypes = {} # type: Dict[Tuple[str, str], int]
# objtype -> index
self._objnames = {} # type: Dict[int, Tuple[unicode, unicode, unicode]]
self._objnames = {} # type: Dict[int, Tuple[str, str, str]]
# objtype index -> (domain, type, objname (localized))
lang_class = languages.get(lang) # type: Type[SearchLanguage]
# add language-specific SearchLanguage instance
@ -310,7 +309,7 @@ class IndexBuilder:
self._titles = dict(zip(index2fn, frozen['titles']))
def load_terms(mapping):
# type: (Dict[unicode, Any]) -> Dict[unicode, Set[unicode]]
# type: (Dict[str, Any]) -> Dict[str, Set[str]]
rv = {}
for k, v in mapping.items():
if isinstance(v, int):
@ -331,8 +330,8 @@ class IndexBuilder:
format.dump(self.freeze(), stream)
def get_objects(self, fn2index):
# type: (Dict[unicode, int]) -> Dict[unicode, Dict[unicode, Tuple[int, int, int, unicode]]] # NOQA
rv = {} # type: Dict[unicode, Dict[unicode, Tuple[int, int, int, unicode]]]
# type: (Dict[str, int]) -> Dict[str, Dict[str, Tuple[int, int, int, str]]]
rv = {} # type: Dict[str, Dict[str, Tuple[int, int, int, str]]]
otypes = self._objtypes
onames = self._objnames
for domainname, domain in sorted(self.env.domains.items()):
@ -359,7 +358,7 @@ class IndexBuilder:
else:
onames[typeindex] = (domainname, type, type)
if anchor == fullname:
shortanchor = '' # type: unicode
shortanchor = ''
elif anchor == type + '-' + fullname:
shortanchor = '-'
else:
@ -368,8 +367,8 @@ class IndexBuilder:
return rv
def get_terms(self, fn2index):
# type: (Dict) -> Tuple[Dict[unicode, List[unicode]], Dict[unicode, List[unicode]]]
rvs = {}, {} # type: Tuple[Dict[unicode, List[unicode]], Dict[unicode, List[unicode]]]
# type: (Dict) -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]
rvs = {}, {} # type: Tuple[Dict[str, List[str]], Dict[str, List[str]]]
for rv, mapping in zip(rvs, (self._mapping, self._title_mapping)):
for k, v in mapping.items():
if len(v) == 1:
@ -381,7 +380,7 @@ class IndexBuilder:
return rvs
def freeze(self):
# type: () -> Dict[unicode, Any]
# type: () -> Dict[str, Any]
"""Create a usable data structure for serializing."""
docnames, titles = zip(*sorted(self._titles.items()))
filenames = [self._filenames.get(docname) for docname in docnames]
@ -397,11 +396,11 @@ class IndexBuilder:
titleterms=title_terms, envversion=self.env.version)
def label(self):
# type: () -> unicode
# type: () -> str
return "%s (code: %s)" % (self.lang.language_name, self.lang.lang)
def prune(self, docnames):
# type: (Iterable[unicode]) -> None
# type: (Iterable[str]) -> None
"""Remove data for all docnames not in the list."""
new_titles = {}
new_filenames = {}
@ -417,7 +416,7 @@ class IndexBuilder:
wordnames.intersection_update(docnames)
def feed(self, docname, filename, title, doctree):
# type: (unicode, unicode, unicode, nodes.document) -> None
# type: (str, str, str, nodes.document) -> None
"""Feed a doctree to the index."""
self._titles[docname] = title
self._filenames[docname] = filename
@ -427,7 +426,7 @@ class IndexBuilder:
# memoize self.lang.stem
def stem(word):
# type: (unicode) -> unicode
# type: (str) -> str
try:
return self._stem_cache[word]
except KeyError:
@ -452,7 +451,7 @@ class IndexBuilder:
self._mapping.setdefault(stemmed_word, set()).add(docname)
def context_for_searchtool(self):
# type: () -> Dict[unicode, Any]
# type: () -> Dict[str, Any]
return {
'search_language_stemming_code': self.lang.js_stemmer_code,
'search_language_stop_words': jsdump.dumps(sorted(self.lang.stopwords)),
@ -461,7 +460,7 @@ class IndexBuilder:
}
def get_js_stemmer_rawcode(self):
# type: () -> unicode
# type: () -> str
if self.lang.js_stemmer_rawcode:
return path.join(package_dir, 'search', 'non-minified-js',
self.lang.js_stemmer_rawcode)

View File

@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
from sphinx.util.typing import unicode # NOQA
danish_stopwords = parse_stop_word(u'''
@ -135,5 +134,5 @@ class SearchDanish(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('danish')
def stem(self, word):
# type: (unicode) -> unicode
# type: (str) -> str
return self.stemmer.stemWord(word.lower())

View File

@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
from sphinx.util.typing import unicode # NOQA
german_stopwords = parse_stop_word(u'''
@ -318,5 +317,5 @@ class SearchGerman(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('german')
def stem(self, word):
# type: (unicode) -> unicode
# type: (str) -> str
return self.stemmer.stemWord(word.lower())

View File

@ -15,7 +15,6 @@ from sphinx.util.stemmer import get_stemmer
if False:
# For type annotation
from typing import Dict # NOQA
from sphinx.util.typing import unicode # NOQA
english_stopwords = set(u"""
a and are as at
@ -227,5 +226,5 @@ class SearchEnglish(SearchLanguage):
self.stemmer = get_stemmer()
def stem(self, word):
# type: (unicode) -> unicode
# type: (str) -> str
return self.stemmer.stem(word.lower())

View File

@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
from sphinx.util.typing import unicode # NOQA
spanish_stopwords = parse_stop_word(u'''
@ -378,5 +377,5 @@ class SearchSpanish(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('spanish')
def stem(self, word):
# type: (unicode) -> unicode
# type: (str) -> str
return self.stemmer.stemWord(word.lower())

View File

@ -16,7 +16,6 @@ import snowballstemmer
if False:
# For type annotation
from typing import Any # NOQA
from sphinx.util.typing import unicode # NOQA
finnish_stopwords = parse_stop_word(u'''
@ -128,5 +127,5 @@ class SearchFinnish(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('finnish')
def stem(self, word):
# type: (unicode) -> unicode
# type: (str) -> str
return self.stemmer.stemWord(word.lower())

Some files were not shown because too many files have changed in this diff Show More