Merge branch '2.0'

This commit is contained in:
Takeshi KOMIYA 2019-07-07 18:50:58 +09:00
commit 1c152d249c
14 changed files with 219 additions and 281 deletions

View File

@ -77,6 +77,10 @@ Bugs fixed
* #6511: LaTeX: autonumbered list can not be customized in LaTeX
since Sphinx 1.8.0 (refs: #6533)
* #6531: Failed to load last environment object when extension added
* #736: Invalid sort in pair index
* #6527: :confval:`last_updated` wrongly assumes timezone as UTC
* #5592: std domain: :rst:dir:`option` directive registers an index entry for
each comma separated option
Testing
--------

View File

@ -72,10 +72,9 @@ PyPI
https://pythonhosted.org/.
GitHub Pages
Directories starting with underscores are ignored by default which breaks
static files in Sphinx. GitHub's preprocessor can be `disabled
<https://github.com/blog/572-bypassing-jekyll-on-github-pages>`_ to support
Sphinx HTML output properly.
Please add :py:mod:`sphinx.ext.githubpages` to your project. It allows you
to publish your document in GitHub Pages. It generates helper files for
GitHub Pages on building HTML document automatically.
MediaWiki
See https://bitbucket.org/kevindunn/sphinx-wiki/wiki/Home, a project by

View File

@ -712,52 +712,66 @@ class StandaloneHTMLBuilder(Builder):
logger.warning(__('cannot copy downloadable file %r: %s'),
path.join(self.srcdir, src), err)
def create_pygments_style_file(self) -> None:
"""create a style file for pygments."""
with open(path.join(self.outdir, '_static', 'pygments.css'), 'w') as f:
f.write(self.highlighter.get_stylesheet())
def copy_translation_js(self) -> None:
"""Copy a JavaScript file for translations."""
if self.config.language is not None:
jsfile = self._get_translations_js()
if jsfile:
copyfile(jsfile, path.join(self.outdir, '_static', 'translations.js'))
def copy_stemmer_js(self) -> None:
"""Copy a JavaScript file for stemmer."""
if self.indexer is not None:
jsfile = self.indexer.get_js_stemmer_rawcode()
if jsfile:
copyfile(jsfile, path.join(self.outdir, '_static', '_stemmer.js'))
def copy_theme_static_files(self, context: Dict) -> None:
if self.theme:
for entry in self.theme.get_theme_dirs()[::-1]:
copy_asset(path.join(entry, 'static'),
path.join(self.outdir, '_static'),
excluded=DOTFILES, context=context, renderer=self.templates)
def copy_html_static_files(self, context: Dict) -> None:
excluded = Matcher(self.config.exclude_patterns + ["**/.*"])
for entry in self.config.html_static_path:
copy_asset(path.join(self.confdir, entry),
path.join(self.outdir, '_static'),
excluded, context=context, renderer=self.templates)
def copy_html_logo(self) -> None:
if self.config.html_logo:
copy_asset(path.join(self.confdir, self.config.html_logo),
path.join(self.outdir, '_static'))
def copy_html_favicon(self) -> None:
if self.config.html_favicon:
copy_asset(path.join(self.confdir, self.config.html_favicon),
path.join(self.outdir, '_static'))
def copy_static_files(self) -> None:
try:
# copy static files
with progress_message(__('copying static files... ')):
ensuredir(path.join(self.outdir, '_static'))
# first, create pygments style file
with open(path.join(self.outdir, '_static', 'pygments.css'), 'w') as f:
f.write(self.highlighter.get_stylesheet())
# then, copy translations JavaScript file
if self.config.language is not None:
jsfile = self._get_translations_js()
if jsfile:
copyfile(jsfile, path.join(self.outdir, '_static',
'translations.js'))
# copy non-minified stemmer JavaScript file
# prepare context for templates
context = self.globalcontext.copy()
if self.indexer is not None:
jsfile = self.indexer.get_js_stemmer_rawcode()
if jsfile:
copyfile(jsfile, path.join(self.outdir, '_static', '_stemmer.js'))
context.update(self.indexer.context_for_searchtool())
ctx = self.globalcontext.copy()
# add context items for search function used in searchtools.js_t
if self.indexer is not None:
ctx.update(self.indexer.context_for_searchtool())
# then, copy over theme-supplied static files
if self.theme:
for theme_path in self.theme.get_theme_dirs()[::-1]:
entry = path.join(theme_path, 'static')
copy_asset(entry, path.join(self.outdir, '_static'), excluded=DOTFILES,
context=ctx, renderer=self.templates)
# then, copy over all user-supplied static files
excluded = Matcher(self.config.exclude_patterns + ["**/.*"])
for static_path in self.config.html_static_path:
entry = path.join(self.confdir, static_path)
copy_asset(entry, path.join(self.outdir, '_static'), excluded,
context=ctx, renderer=self.templates)
# copy logo and favicon files if not already in static path
if self.config.html_logo:
entry = path.join(self.confdir, self.config.html_logo)
copy_asset(entry, path.join(self.outdir, '_static'))
if self.config.html_favicon:
entry = path.join(self.confdir, self.config.html_favicon)
copy_asset(entry, path.join(self.outdir, '_static'))
self.create_pygments_style_file()
self.copy_translation_js()
self.copy_stemmer_js()
self.copy_theme_static_files(context)
self.copy_html_static_files(context)
self.copy_html_logo()
self.copy_html_favicon()
except OSError as err:
logger.warning(__('cannot copy static file %r'), err)

View File

@ -197,12 +197,14 @@ class Cmdoption(ObjectDescription):
domain.add_program_option(currprogram, optname,
self.env.docname, signode['ids'][0])
# create only one index entry for the whole option
if optname == firstname:
self.indexnode['entries'].append(
('pair', _('%scommand line option; %s') %
((currprogram and currprogram + ' ' or ''), sig),
signode['ids'][0], '', None))
# create an index entry
if currprogram:
descr = _('%s command line option') % currprogram
else:
descr = _('command line option')
for option in sig.split(', '):
entry = '; '.join([descr, option])
self.indexnode['entries'].append(('pair', entry, signode['ids'][0], '', None))
class Program(SphinxDirective):

View File

@ -133,11 +133,21 @@ class IndexEntries:
oldsubitems = subitems
i += 1
# sort the sub-index entries
def keyfunc2(entry: Tuple[str, List]) -> str:
key = unicodedata.normalize('NFD', entry[0].lower())
if key.startswith('\N{RIGHT-TO-LEFT MARK}'):
key = key[1:]
if key[0:1].isalpha() or key.startswith('_'):
key = chr(127) + key
return key
# group the entries by letter
def keyfunc2(item: Tuple[str, List]) -> str:
def keyfunc3(item: Tuple[str, List]) -> str:
# hack: mutating the subitems dicts to a list in the keyfunc
k, v = item
v[1] = sorted((si, se) for (si, (se, void, void)) in v[1].items())
v[1] = sorted(((si, se) for (si, (se, void, void)) in v[1].items()),
key=keyfunc2)
if v[2] is None:
# now calculate the key
if k.startswith('\N{RIGHT-TO-LEFT MARK}'):
@ -151,4 +161,4 @@ class IndexEntries:
else:
return v[2]
return [(key_, list(group))
for (key_, group) in groupby(newlist, keyfunc2)]
for (key_, group) in groupby(newlist, keyfunc3)]

View File

@ -11,29 +11,25 @@
import re
from io import StringIO
from os import path
from typing import Any, Dict, IO, List, Tuple
from zipfile import ZipFile
from sphinx.errors import PycodeError
from sphinx.pycode.parser import Parser
from sphinx.util import get_module_source, detect_encoding
if False:
# For type annotation
from typing import Any, Dict, IO, List, Tuple # NOQA
class ModuleAnalyzer:
# cache for analyzer objects -- caches both by module and file name
cache = {} # type: Dict[Tuple[str, str], Any]
@classmethod
def for_string(cls, string, modname, srcname='<string>'):
# type: (str, str, str) -> ModuleAnalyzer
def for_string(cls, string: str, modname: str, srcname: str = '<string>'
) -> "ModuleAnalyzer":
return cls(StringIO(string), modname, srcname, decoded=True)
@classmethod
def for_file(cls, filename, modname):
# type: (str, str) -> ModuleAnalyzer
def for_file(cls, filename: str, modname: str) -> "ModuleAnalyzer":
if ('file', filename) in cls.cache:
return cls.cache['file', filename]
try:
@ -48,8 +44,7 @@ class ModuleAnalyzer:
return obj
@classmethod
def for_egg(cls, filename, modname):
# type: (str, str) -> ModuleAnalyzer
def for_egg(cls, filename: str, modname: str) -> "ModuleAnalyzer":
SEP = re.escape(path.sep)
eggpath, relpath = re.split('(?<=\\.egg)' + SEP, filename)
try:
@ -60,8 +55,7 @@ class ModuleAnalyzer:
raise PycodeError('error opening %r' % filename, exc)
@classmethod
def for_module(cls, modname):
# type: (str) -> ModuleAnalyzer
def for_module(cls, modname: str) -> "ModuleAnalyzer":
if ('module', modname) in cls.cache:
entry = cls.cache['module', modname]
if isinstance(entry, PycodeError):
@ -80,8 +74,7 @@ class ModuleAnalyzer:
cls.cache['module', modname] = obj
return obj
def __init__(self, source, modname, srcname, decoded=False):
# type: (IO, str, str, bool) -> None
def __init__(self, source: IO, modname: str, srcname: str, decoded: bool = False) -> None:
self.modname = modname # name of the module
self.srcname = srcname # name of the source file
@ -100,8 +93,7 @@ class ModuleAnalyzer:
self.tagorder = None # type: Dict[str, int]
self.tags = None # type: Dict[str, Tuple[str, int, int]]
def parse(self):
# type: () -> None
def parse(self) -> None:
"""Parse the source code."""
try:
parser = Parser(self.code, self.encoding)
@ -119,16 +111,14 @@ class ModuleAnalyzer:
except Exception as exc:
raise PycodeError('parsing %r failed: %r' % (self.srcname, exc))
def find_attr_docs(self):
# type: () -> Dict[Tuple[str, str], List[str]]
def find_attr_docs(self) -> Dict[Tuple[str, str], List[str]]:
"""Find class and module-level attributes and their documentation."""
if self.attr_docs is None:
self.parse()
return self.attr_docs
def find_tags(self):
# type: () -> Dict[str, Tuple[str, int, int]]
def find_tags(self) -> Dict[str, Tuple[str, int, int]]:
"""Find class, function and method definitions and their location."""
if self.tags is None:
self.parse()

View File

@ -15,10 +15,8 @@ import sys
import tokenize
from token import NAME, NEWLINE, INDENT, DEDENT, NUMBER, OP, STRING
from tokenize import COMMENT, NL
from typing import Any, Dict, List, Tuple
if False:
# For type annotation
from typing import Any, Dict, List, Tuple # NOQA
comment_re = re.compile('^\\s*#: ?(.*)\r?\n?$')
indent_re = re.compile('^\\s*$')
@ -31,13 +29,11 @@ else:
ASSIGN_NODES = (ast.Assign)
def filter_whitespace(code):
# type: (str) -> str
def filter_whitespace(code: str) -> str:
return code.replace('\f', ' ') # replace FF (form feed) with whitespace
def get_assign_targets(node):
# type: (ast.AST) -> List[ast.expr]
def get_assign_targets(node: ast.AST) -> List[ast.expr]:
"""Get list of targets from Assign and AnnAssign node."""
if isinstance(node, ast.Assign):
return node.targets
@ -45,8 +41,7 @@ def get_assign_targets(node):
return [node.target] # type: ignore
def get_lvar_names(node, self=None):
# type: (ast.AST, ast.arg) -> List[str]
def get_lvar_names(node: ast.AST, self: ast.arg = None) -> List[str]:
"""Convert assignment-AST to variable names.
This raises `TypeError` if the assignment does not create new variable::
@ -88,11 +83,9 @@ def get_lvar_names(node, self=None):
raise NotImplementedError('Unexpected node name %r' % node_name)
def dedent_docstring(s):
# type: (str) -> str
def dedent_docstring(s: str) -> str:
"""Remove common leading indentation from docstring."""
def dummy():
# type: () -> None
def dummy() -> None:
# dummy function to mock `inspect.getdoc`.
pass
@ -104,16 +97,15 @@ def dedent_docstring(s):
class Token:
"""Better token wrapper for tokenize module."""
def __init__(self, kind, value, start, end, source):
# type: (int, Any, Tuple[int, int], Tuple[int, int], str) -> None
def __init__(self, kind: int, value: Any, start: Tuple[int, int], end: Tuple[int, int],
source: str) -> None:
self.kind = kind
self.value = value
self.start = start
self.end = end
self.source = source
def __eq__(self, other):
# type: (Any) -> bool
def __eq__(self, other: Any) -> bool:
if isinstance(other, int):
return self.kind == other
elif isinstance(other, str):
@ -125,32 +117,27 @@ class Token:
else:
raise ValueError('Unknown value: %r' % other)
def match(self, *conditions):
# type: (Any) -> bool
def match(self, *conditions) -> bool:
return any(self == candidate for candidate in conditions)
def __repr__(self):
# type: () -> str
def __repr__(self) -> str:
return '<Token kind=%r value=%r>' % (tokenize.tok_name[self.kind],
self.value.strip())
class TokenProcessor:
def __init__(self, buffers):
# type: (List[str]) -> None
def __init__(self, buffers: List[str]) -> None:
lines = iter(buffers)
self.buffers = buffers
self.tokens = tokenize.generate_tokens(lambda: next(lines))
self.current = None # type: Token
self.previous = None # type: Token
def get_line(self, lineno):
# type: (int) -> str
def get_line(self, lineno: int) -> str:
"""Returns specified line."""
return self.buffers[lineno - 1]
def fetch_token(self):
# type: () -> Token
def fetch_token(self) -> Token:
"""Fetch a next token from source code.
Returns ``False`` if sequence finished.
@ -163,8 +150,7 @@ class TokenProcessor:
return self.current
def fetch_until(self, condition):
# type: (Any) -> List[Token]
def fetch_until(self, condition: Any) -> List[Token]:
"""Fetch tokens until specified token appeared.
.. note:: This also handles parenthesis well.
@ -191,13 +177,11 @@ class AfterCommentParser(TokenProcessor):
and returns the comments for variable if exists.
"""
def __init__(self, lines):
# type: (List[str]) -> None
def __init__(self, lines: List[str]) -> None:
super().__init__(lines)
self.comment = None # type: str
def fetch_rvalue(self):
# type: () -> List[Token]
def fetch_rvalue(self) -> List[Token]:
"""Fetch right-hand value of assignment."""
tokens = []
while self.fetch_token():
@ -217,8 +201,7 @@ class AfterCommentParser(TokenProcessor):
return tokens
def parse(self):
# type: () -> None
def parse(self) -> None:
"""Parse the code and obtain comment after assignment."""
# skip lvalue (or whole of AnnAssign)
while not self.fetch_token().match([OP, '='], NEWLINE, COMMENT):
@ -235,8 +218,7 @@ class AfterCommentParser(TokenProcessor):
class VariableCommentPicker(ast.NodeVisitor):
"""Python source code parser to pick up variable comments."""
def __init__(self, buffers, encoding):
# type: (List[str], str) -> None
def __init__(self, buffers: List[str], encoding: str) -> None:
self.counter = itertools.count()
self.buffers = buffers
self.encoding = encoding
@ -248,8 +230,7 @@ class VariableCommentPicker(ast.NodeVisitor):
self.deforders = {} # type: Dict[str, int]
super().__init__()
def add_entry(self, name):
# type: (str) -> None
def add_entry(self, name: str) -> None:
if self.current_function:
if self.current_classes and self.context[-1] == "__init__":
# store variable comments inside __init__ method of classes
@ -261,8 +242,7 @@ class VariableCommentPicker(ast.NodeVisitor):
self.deforders[".".join(definition)] = next(self.counter)
def add_variable_comment(self, name, comment):
# type: (str, str) -> None
def add_variable_comment(self, name: str, comment: str) -> None:
if self.current_function:
if self.current_classes and self.context[-1] == "__init__":
# store variable comments inside __init__ method of classes
@ -274,27 +254,23 @@ class VariableCommentPicker(ast.NodeVisitor):
self.comments[(context, name)] = comment
def get_self(self):
# type: () -> ast.arg
def get_self(self) -> ast.arg:
"""Returns the name of first argument if in function."""
if self.current_function and self.current_function.args.args:
return self.current_function.args.args[0]
else:
return None
def get_line(self, lineno):
# type: (int) -> str
def get_line(self, lineno: int) -> str:
"""Returns specified line."""
return self.buffers[lineno - 1]
def visit(self, node):
# type: (ast.AST) -> None
def visit(self, node: ast.AST) -> None:
"""Updates self.previous to ."""
super().visit(node)
self.previous = node
def visit_Assign(self, node):
# type: (ast.Assign) -> None
def visit_Assign(self, node: ast.Assign) -> None:
"""Handles Assign node and pick up a variable comment."""
try:
targets = get_assign_targets(node)
@ -334,13 +310,11 @@ class VariableCommentPicker(ast.NodeVisitor):
for varname in varnames:
self.add_entry(varname)
def visit_AnnAssign(self, node):
# type: (ast.AST) -> None
def visit_AnnAssign(self, node: ast.AST) -> None: # Note: ast.AnnAssign not found in py35
"""Handles AnnAssign node and pick up a variable comment."""
self.visit_Assign(node) # type: ignore
def visit_Expr(self, node):
# type: (ast.Expr) -> None
def visit_Expr(self, node: ast.Expr) -> None:
"""Handles Expr node and pick up a comment if string."""
if (isinstance(self.previous, ASSIGN_NODES) and isinstance(node.value, ast.Str)):
try:
@ -357,8 +331,7 @@ class VariableCommentPicker(ast.NodeVisitor):
except TypeError:
pass # this assignment is not new definition!
def visit_Try(self, node):
# type: (ast.Try) -> None
def visit_Try(self, node: ast.Try) -> None:
"""Handles Try node and processes body and else-clause.
.. note:: pycode parser ignores objects definition in except-clause.
@ -368,8 +341,7 @@ class VariableCommentPicker(ast.NodeVisitor):
for subnode in node.orelse:
self.visit(subnode)
def visit_ClassDef(self, node):
# type: (ast.ClassDef) -> None
def visit_ClassDef(self, node: ast.ClassDef) -> None:
"""Handles ClassDef node and set context."""
self.current_classes.append(node.name)
self.add_entry(node.name)
@ -380,8 +352,7 @@ class VariableCommentPicker(ast.NodeVisitor):
self.context.pop()
self.current_classes.pop()
def visit_FunctionDef(self, node):
# type: (ast.FunctionDef) -> None
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
"""Handles FunctionDef node and set context."""
if self.current_function is None:
self.add_entry(node.name) # should be called before setting self.current_function
@ -392,8 +363,7 @@ class VariableCommentPicker(ast.NodeVisitor):
self.context.pop()
self.current_function = None
def visit_AsyncFunctionDef(self, node):
# type: (ast.AsyncFunctionDef) -> None
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
"""Handles AsyncFunctionDef node and set context."""
self.visit_FunctionDef(node) # type: ignore
@ -403,16 +373,14 @@ class DefinitionFinder(TokenProcessor):
classes and methods.
"""
def __init__(self, lines):
# type: (List[str]) -> None
def __init__(self, lines: List[str]) -> None:
super().__init__(lines)
self.decorator = None # type: Token
self.context = [] # type: List[str]
self.indents = [] # type: List
self.definitions = {} # type: Dict[str, Tuple[str, int, int]]
def add_definition(self, name, entry):
# type: (str, Tuple[str, int, int]) -> None
def add_definition(self, name: str, entry: Tuple[str, int, int]) -> None:
"""Add a location of definition."""
if self.indents and self.indents[-1][0] == 'def' and entry[0] == 'def':
# ignore definition of inner function
@ -420,8 +388,7 @@ class DefinitionFinder(TokenProcessor):
else:
self.definitions[name] = entry
def parse(self):
# type: () -> None
def parse(self) -> None:
"""Parse the code to obtain location of definitions."""
while True:
token = self.fetch_token()
@ -442,8 +409,7 @@ class DefinitionFinder(TokenProcessor):
elif token == DEDENT:
self.finalize_block()
def parse_definition(self, typ):
# type: (str) -> None
def parse_definition(self, typ: str) -> None:
"""Parse AST of definition."""
name = self.fetch_token()
self.context.append(name.value)
@ -464,8 +430,7 @@ class DefinitionFinder(TokenProcessor):
self.add_definition(funcname, (typ, start_pos, name.end[0]))
self.context.pop()
def finalize_block(self):
# type: () -> None
def finalize_block(self) -> None:
"""Finalize definition block."""
definition = self.indents.pop()
if definition[0] != 'other':
@ -484,22 +449,19 @@ class Parser:
This is a better wrapper for ``VariableCommentPicker``.
"""
def __init__(self, code, encoding='utf-8'):
# type: (str, str) -> None
def __init__(self, code: str, encoding: str = 'utf-8') -> None:
self.code = filter_whitespace(code)
self.encoding = encoding
self.comments = {} # type: Dict[Tuple[str, str], str]
self.deforders = {} # type: Dict[str, int]
self.definitions = {} # type: Dict[str, Tuple[str, int, int]]
def parse(self):
# type: () -> None
def parse(self) -> None:
"""Parse the source code."""
self.parse_comments()
self.parse_definition()
def parse_comments(self):
# type: () -> None
def parse_comments(self) -> None:
"""Parse the code and pick up comments."""
tree = ast.parse(self.code.encode())
picker = VariableCommentPicker(self.code.splitlines(True), self.encoding)
@ -507,8 +469,7 @@ class Parser:
self.comments = picker.comments
self.deforders = picker.deforders
def parse_definition(self):
# type: () -> None
def parse_definition(self) -> None:
"""Parse the location of definitions from the code."""
parser = DefinitionFinder(self.code.splitlines(True))
parser.parse()

View File

@ -14,25 +14,20 @@ import sys
from collections import namedtuple
from io import StringIO
from subprocess import PIPE
from typing import Any, Dict
import pytest
from . import util
if False:
# For type annotation
from typing import Any, Dict, Union # NOQA
@pytest.fixture(scope='session')
def rootdir():
# type: () -> None
def rootdir() -> None:
return None
@pytest.fixture
def app_params(request, test_params, shared_result, sphinx_test_tempdir, rootdir):
# type: (Any, Any, Any, Any, Any) -> None
"""
parameters that is specified by 'pytest.mark.sphinx' for
sphinx.application.Sphinx initialization
@ -158,10 +153,10 @@ def make_app(test_params, monkeypatch):
status, warning = StringIO(), StringIO()
kwargs.setdefault('status', status)
kwargs.setdefault('warning', warning)
app_ = util.SphinxTestApp(*args, **kwargs) # type: Union[util.SphinxTestApp, util.SphinxTestAppWrapperForSkipBuilding] # NOQA
app_ = util.SphinxTestApp(*args, **kwargs) # type: Any
apps.append(app_)
if test_params['shared_result']:
app_ = util.SphinxTestAppWrapperForSkipBuilding(app_) # type: ignore
app_ = util.SphinxTestAppWrapperForSkipBuilding(app_)
return app_
yield make

View File

@ -5,14 +5,12 @@
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import builtins
import os
import shutil
import sys
if False:
# For type annotation
import builtins # NOQA
from typing import Any, Callable, IO, List # NOQA
from typing import Any, Callable, IO, List
FILESYSTEMENCODING = sys.getfilesystemencoding() or sys.getdefaultencoding()
@ -24,61 +22,52 @@ class path(str):
"""
@property
def parent(self):
# type: () -> path
def parent(self) -> "path":
"""
The name of the directory the file or directory is in.
"""
return self.__class__(os.path.dirname(self))
def basename(self):
# type: () -> str
def basename(self) -> str:
return os.path.basename(self)
def abspath(self):
# type: () -> path
def abspath(self) -> "path":
"""
Returns the absolute path.
"""
return self.__class__(os.path.abspath(self))
def isabs(self):
# type: () -> bool
def isabs(self) -> bool:
"""
Returns ``True`` if the path is absolute.
"""
return os.path.isabs(self)
def isdir(self):
# type: () -> bool
def isdir(self) -> bool:
"""
Returns ``True`` if the path is a directory.
"""
return os.path.isdir(self)
def isfile(self):
# type: () -> bool
def isfile(self) -> bool:
"""
Returns ``True`` if the path is a file.
"""
return os.path.isfile(self)
def islink(self):
# type: () -> bool
def islink(self) -> bool:
"""
Returns ``True`` if the path is a symbolic link.
"""
return os.path.islink(self)
def ismount(self):
# type: () -> bool
def ismount(self) -> bool:
"""
Returns ``True`` if the path is a mount point.
"""
return os.path.ismount(self)
def rmtree(self, ignore_errors=False, onerror=None):
# type: (bool, Callable) -> None
def rmtree(self, ignore_errors: bool = False, onerror: Callable = None) -> None:
"""
Removes the file or directory and any files or directories it may
contain.
@ -96,8 +85,7 @@ class path(str):
"""
shutil.rmtree(self, ignore_errors=ignore_errors, onerror=onerror)
def copytree(self, destination, symlinks=False):
# type: (str, bool) -> None
def copytree(self, destination: str, symlinks: bool = False) -> None:
"""
Recursively copy a directory to the given `destination`. If the given
`destination` does not exist it will be created.
@ -109,8 +97,7 @@ class path(str):
"""
shutil.copytree(self, destination, symlinks=symlinks)
def movetree(self, destination):
# type: (str) -> None
def movetree(self, destination: str) -> None:
"""
Recursively move the file or directory to the given `destination`
similar to the Unix "mv" command.
@ -122,54 +109,46 @@ class path(str):
move = movetree
def unlink(self):
# type: () -> None
def unlink(self) -> None:
"""
Removes a file.
"""
os.unlink(self)
def stat(self):
# type: () -> Any
def stat(self) -> Any:
"""
Returns a stat of the file.
"""
return os.stat(self)
def utime(self, arg):
# type: (Any) -> None
def utime(self, arg: Any) -> None:
os.utime(self, arg)
def open(self, mode='r', **kwargs):
# type: (str, Any) -> IO
def open(self, mode: str = 'r', **kwargs) -> IO:
return open(self, mode, **kwargs)
def write_text(self, text, encoding='utf-8', **kwargs):
# type: (str, str, Any) -> None
def write_text(self, text: str, encoding: str = 'utf-8', **kwargs) -> None:
"""
Writes the given `text` to the file.
"""
with open(self, 'w', encoding=encoding, **kwargs) as f:
f.write(text)
def text(self, encoding='utf-8', **kwargs):
# type: (str, Any) -> str
def text(self, encoding: str = 'utf-8', **kwargs) -> str:
"""
Returns the text in the file.
"""
with open(self, encoding=encoding, **kwargs) as f:
return f.read()
def bytes(self):
# type: () -> builtins.bytes
def bytes(self) -> builtins.bytes:
"""
Returns the bytes in the file.
"""
with open(self, mode='rb') as f:
return f.read()
def write_bytes(self, bytes, append=False):
# type: (str, bool) -> None
def write_bytes(self, bytes: str, append: bool = False) -> None:
"""
Writes the given `bytes` to the file.
@ -183,41 +162,35 @@ class path(str):
with open(self, mode=mode) as f:
f.write(bytes)
def exists(self):
# type: () -> bool
def exists(self) -> bool:
"""
Returns ``True`` if the path exist.
"""
return os.path.exists(self)
def lexists(self):
# type: () -> bool
def lexists(self) -> bool:
"""
Returns ``True`` if the path exists unless it is a broken symbolic
link.
"""
return os.path.lexists(self)
def makedirs(self, mode=0o777, exist_ok=False):
# type: (int, bool) -> None
def makedirs(self, mode: int = 0o777, exist_ok: bool = False) -> None:
"""
Recursively create directories.
"""
os.makedirs(self, mode, exist_ok=exist_ok)
def joinpath(self, *args):
# type: (Any) -> path
def joinpath(self, *args) -> "path":
"""
Joins the path with the argument given and returns the result.
"""
return self.__class__(os.path.join(self, *map(self.__class__, args)))
def listdir(self):
# type: () -> List[str]
def listdir(self) -> List[str]:
return os.listdir(self)
__div__ = __truediv__ = joinpath
def __repr__(self):
# type: () -> str
def __repr__(self) -> str:
return '%s(%s)' % (self.__class__.__name__, super().__repr__())

View File

@ -8,21 +8,16 @@
from os import path
from docutils import nodes
from docutils.core import publish_doctree
from sphinx.application import Sphinx
from sphinx.io import SphinxStandaloneReader
from sphinx.parsers import RSTParser
from sphinx.util.docutils import sphinx_domains
if False:
# For type annotation
from docutils import nodes # NOQA
from sphinx.application import Sphinx # NOQA
def parse(app, text, docname='index'):
# type: (Sphinx, str, str) -> nodes.document
def parse(app: Sphinx, text: str, docname: str = 'index') -> nodes.document:
"""Parse a string as reStructuredText with Sphinx application."""
try:
app.env.temp_data['docname'] = docname

View File

@ -11,6 +11,7 @@ import os
import re
import sys
import warnings
from typing import Any, Dict, Generator, IO, List, Pattern
from xml.etree import ElementTree
from docutils import nodes
@ -23,10 +24,6 @@ from sphinx.pycode import ModuleAnalyzer
from sphinx.testing.path import path
from sphinx.util.osutil import relpath
if False:
# For type annotation
from typing import Any, Dict, Generator, IO, List, Pattern # NOQA
__all__ = [
'Struct',
@ -35,26 +32,22 @@ __all__ = [
]
def assert_re_search(regex, text, flags=0):
# type: (Pattern, str, int) -> None
def assert_re_search(regex: Pattern, text: str, flags: int = 0) -> None:
if not re.search(regex, text, flags):
assert False, '%r did not match %r' % (regex, text)
def assert_not_re_search(regex, text, flags=0):
# type: (Pattern, str, int) -> None
def assert_not_re_search(regex: Pattern, text: str, flags: int = 0) -> None:
if re.search(regex, text, flags):
assert False, '%r did match %r' % (regex, text)
def assert_startswith(thing, prefix):
# type: (str, str) -> None
def assert_startswith(thing: str, prefix: str) -> None:
if not thing.startswith(prefix):
assert False, '%r does not start with %r' % (thing, prefix)
def assert_node(node, cls=None, xpath="", **kwargs):
# type: (nodes.Node, Any, str, Any) -> None
def assert_node(node: nodes.Node, cls: Any = None, xpath: str = "", **kwargs) -> None:
if cls:
if isinstance(cls, list):
assert_node(node, cls[0], xpath=xpath, **kwargs)
@ -92,16 +85,14 @@ def assert_node(node, cls=None, xpath="", **kwargs):
'The node%s[%s] is not %r: %r' % (xpath, key, value, node[key])
def etree_parse(path):
# type: (str) -> Any
def etree_parse(path: str) -> Any:
with warnings.catch_warnings(record=False):
warnings.filterwarnings("ignore", category=DeprecationWarning)
return ElementTree.parse(path)
class Struct:
def __init__(self, **kwds):
# type: (Any) -> None
def __init__(self, **kwds) -> None:
self.__dict__.update(kwds)
@ -111,10 +102,9 @@ class SphinxTestApp(application.Sphinx):
better default values for the initialization parameters.
"""
def __init__(self, buildername='html', srcdir=None,
freshenv=False, confoverrides=None, status=None, warning=None,
tags=None, docutilsconf=None):
# type: (str, path, bool, Dict, IO, IO, List[str], str) -> None
def __init__(self, buildername: str = 'html', srcdir: path = None, freshenv: bool = False,
confoverrides: Dict = None, status: IO = None, warning: IO = None,
tags: List[str] = None, docutilsconf: str = None) -> None:
if docutilsconf is not None:
(srcdir / 'docutils.conf').write_text(docutilsconf)
@ -144,8 +134,7 @@ class SphinxTestApp(application.Sphinx):
self.cleanup()
raise
def cleanup(self, doctrees=False):
# type: (bool) -> None
def cleanup(self, doctrees: bool = False) -> None:
ModuleAnalyzer.cache.clear()
LaTeXBuilder.usepackages = []
locale.translators.clear()
@ -159,8 +148,7 @@ class SphinxTestApp(application.Sphinx):
delattr(nodes.GenericNodeVisitor, 'visit_' + method[6:])
delattr(nodes.GenericNodeVisitor, 'depart_' + method[6:])
def __repr__(self):
# type: () -> str
def __repr__(self) -> str:
return '<%s buildername=%r>' % (self.__class__.__name__, self.builder.name)
@ -171,16 +159,13 @@ class SphinxTestAppWrapperForSkipBuilding:
file.
"""
def __init__(self, app_):
# type: (SphinxTestApp) -> None
def __init__(self, app_: SphinxTestApp) -> None:
self.app = app_
def __getattr__(self, name):
# type: (str) -> Any
def __getattr__(self, name: str) -> Any:
return getattr(self.app, name)
def build(self, *args, **kw):
# type: (Any, Any) -> None
def build(self, *args, **kw) -> None:
if not self.app.outdir.listdir(): # type: ignore
# if listdir is empty, do build.
self.app.build(*args, **kw)
@ -190,15 +175,13 @@ class SphinxTestAppWrapperForSkipBuilding:
_unicode_literals_re = re.compile(r'u(".*?")|u(\'.*?\')')
def remove_unicode_literals(s):
# type: (str) -> str
def remove_unicode_literals(s: str) -> str:
warnings.warn('remove_unicode_literals() is deprecated.',
RemovedInSphinx40Warning, stacklevel=2)
return _unicode_literals_re.sub(lambda x: x.group(1) or x.group(2), s)
def find_files(root, suffix=None):
# type: (str, bool) -> Generator
def find_files(root: str, suffix: bool = None) -> Generator[str, None, None]:
for dirpath, dirs, files in os.walk(root, followlinks=True):
dirpath = path(dirpath)
for f in [f for f in files if not suffix or f.endswith(suffix)]: # type: ignore
@ -206,6 +189,5 @@ def find_files(root, suffix=None):
yield relpath(fpath, root)
def strip_escseq(text):
# type: (str) -> str
def strip_escseq(text: str) -> str:
return re.sub('\x1b.*?m', '', text)

View File

@ -12,7 +12,7 @@ import os
import re
import warnings
from collections import namedtuple
from datetime import datetime
from datetime import datetime, timezone
from os import path
from typing import Callable, Generator, List, Set, Tuple
@ -270,7 +270,7 @@ def format_date(format: str, date: datetime = None, language: str = None) -> str
if source_date_epoch is not None:
date = datetime.utcfromtimestamp(float(source_date_epoch))
else:
date = datetime.utcnow()
date = datetime.now(timezone.utc).astimezone()
result = []
tokens = date_format_re.split(format)

View File

@ -261,22 +261,24 @@ def test_cmdoption(app):
def test_multiple_cmdoptions(app):
text = (".. program:: ls\n"
text = (".. program:: cmd\n"
"\n"
".. option:: -h, --help\n")
".. option:: -o directory, --output directory\n")
domain = app.env.get_domain('std')
doctree = restructuredtext.parse(app, text)
assert_node(doctree, (addnodes.index,
[desc, ([desc_signature, ([desc_name, "-h"],
[desc_addname, ()],
[desc, ([desc_signature, ([desc_name, "-o"],
[desc_addname, " directory"],
[desc_addname, ", "],
[desc_name, "--help"],
[desc_addname, ()])],
[desc_name, "--output"],
[desc_addname, " directory"])],
[desc_content, ()])]))
assert_node(doctree[0], addnodes.index,
entries=[('pair', 'ls command line option; -h, --help',
'cmdoption-ls-h', '', None)])
assert ('ls', '-h') in domain.progoptions
assert ('ls', '--help') in domain.progoptions
assert domain.progoptions[('ls', '-h')] == ('index', 'cmdoption-ls-h')
assert domain.progoptions[('ls', '--help')] == ('index', 'cmdoption-ls-h')
entries=[('pair', 'cmd command line option; -o directory',
'cmdoption-cmd-o', '', None),
('pair', 'cmd command line option; --output directory',
'cmdoption-cmd-o', '', None)])
assert ('cmd', '-o') in domain.progoptions
assert ('cmd', '--output') in domain.progoptions
assert domain.progoptions[('cmd', '-o')] == ('index', 'cmdoption-cmd-o')
assert domain.progoptions[('cmd', '--output')] == ('index', 'cmdoption-cmd-o')

View File

@ -47,19 +47,30 @@ def test_create_pair_index(app):
app.env.indexentries.clear()
text = (".. index:: pair: docutils; reStructuredText\n"
".. index:: pair: Python; interpreter\n"
".. index:: pair: Sphinx; documentation tool\n")
".. index:: pair: Sphinx; documentation tool\n"
".. index:: pair: Sphinx; :+1:\n"
".. index:: pair: Sphinx; Ель\n"
".. index:: pair: Sphinx; ёлка\n")
restructuredtext.parse(app, text)
index = IndexEntries(app.env).create_index(app.builder)
assert len(index) == 5
assert index[0] == ('D',
assert len(index) == 7
assert index[0] == ('Symbols', [(':+1:', [[], [('Sphinx', [('', '#index-3')])], None])])
assert index[1] == ('D',
[('documentation tool', [[], [('Sphinx', [('', '#index-2')])], None]),
('docutils', [[], [('reStructuredText', [('', '#index-0')])], None])])
assert index[1] == ('I', [('interpreter', [[], [('Python', [('', '#index-1')])], None])])
assert index[2] == ('P', [('Python', [[], [('interpreter', [('', '#index-1')])], None])])
assert index[3] == ('R',
assert index[2] == ('I', [('interpreter', [[], [('Python', [('', '#index-1')])], None])])
assert index[3] == ('P', [('Python', [[], [('interpreter', [('', '#index-1')])], None])])
assert index[4] == ('R',
[('reStructuredText', [[], [('docutils', [('', '#index-0')])], None])])
assert index[4] == ('S',
[('Sphinx', [[], [('documentation tool', [('', '#index-2')])], None])])
assert index[5] == ('S',
[('Sphinx', [[],
[(':+1:', [('', '#index-3')]),
('documentation tool', [('', '#index-2')]),
('ёлка', [('', '#index-5')]),
('Ель', [('', '#index-4')])],
None])])
assert index[6] == ('Е', [('ёлка', [[], [('Sphinx', [('', '#index-5')])], None]),
('Ель', [[], [('Sphinx', [('', '#index-4')])], None])])
@pytest.mark.sphinx('dummy')