mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Merged in shimizukawa/sphinx-py3-native (pull request #243)
native py2/py3 support without 2to3. refs #1350
This commit is contained in:
commit
a335414b81
1
CHANGES
1
CHANGES
@ -16,6 +16,7 @@ Incompatible changes
|
|||||||
New features
|
New features
|
||||||
------------
|
------------
|
||||||
|
|
||||||
|
* Add support for Python 3.4.
|
||||||
* Added ``sphinx.ext.napoleon`` extension for NumPy and Google style docstring
|
* Added ``sphinx.ext.napoleon`` extension for NumPy and Google style docstring
|
||||||
support.
|
support.
|
||||||
* PR#214: Added stemming support for 14 languages, so that the built-in document
|
* PR#214: Added stemming support for 14 languages, so that the built-in document
|
||||||
|
@ -1,12 +0,0 @@
|
|||||||
from lib2to3.fixer_base import BaseFix
|
|
||||||
from lib2to3.fixer_util import Name
|
|
||||||
|
|
||||||
class FixAltUnicode(BaseFix):
|
|
||||||
PATTERN = """
|
|
||||||
func=funcdef< 'def' name='__unicode__'
|
|
||||||
parameters< '(' NAME ')' > any+ >
|
|
||||||
"""
|
|
||||||
|
|
||||||
def transform(self, node, results):
|
|
||||||
name = results['name']
|
|
||||||
name.replace(Name('__str__', prefix=name.prefix))
|
|
11
setup.py
11
setup.py
@ -52,14 +52,6 @@ if (3, 0) <= sys.version_info < (3, 3):
|
|||||||
else: # 2.6, 2.7, 3.3 or later
|
else: # 2.6, 2.7, 3.3 or later
|
||||||
requires.append('Jinja2>=2.3')
|
requires.append('Jinja2>=2.3')
|
||||||
|
|
||||||
# tell distribute to use 2to3 with our own fixers
|
|
||||||
extra = {}
|
|
||||||
if sys.version_info >= (3, 0):
|
|
||||||
extra.update(
|
|
||||||
use_2to3=True,
|
|
||||||
use_2to3_fixers=['custom_fixers']
|
|
||||||
)
|
|
||||||
|
|
||||||
# Provide a "compile_catalog" command that also creates the translated
|
# Provide a "compile_catalog" command that also creates the translated
|
||||||
# JavaScript files if Babel is available.
|
# JavaScript files if Babel is available.
|
||||||
|
|
||||||
@ -181,7 +173,7 @@ setup(
|
|||||||
'Topic :: Utilities',
|
'Topic :: Utilities',
|
||||||
],
|
],
|
||||||
platforms='any',
|
platforms='any',
|
||||||
packages=find_packages(exclude=['custom_fixers', 'test']),
|
packages=find_packages(exclude=['test']),
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
entry_points={
|
entry_points={
|
||||||
'console_scripts': [
|
'console_scripts': [
|
||||||
@ -196,5 +188,4 @@ setup(
|
|||||||
},
|
},
|
||||||
install_requires=requires,
|
install_requires=requires,
|
||||||
cmdclass=cmdclass,
|
cmdclass=cmdclass,
|
||||||
**extra
|
|
||||||
)
|
)
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from os import path, walk
|
from os import path, walk
|
||||||
from codecs import open
|
from codecs import open
|
||||||
@ -27,7 +28,7 @@ from sphinx.util.osutil import safe_relpath, ensuredir, find_catalog, SEP
|
|||||||
from sphinx.util.console import darkgreen, purple, bold
|
from sphinx.util.console import darkgreen, purple, bold
|
||||||
from sphinx.locale import pairindextypes
|
from sphinx.locale import pairindextypes
|
||||||
|
|
||||||
POHEADER = ur"""
|
POHEADER = r"""
|
||||||
# SOME DESCRIPTIVE TITLE.
|
# SOME DESCRIPTIVE TITLE.
|
||||||
# Copyright (C) %(copyright)s
|
# Copyright (C) %(copyright)s
|
||||||
# This file is distributed under the same license as the %(project)s package.
|
# This file is distributed under the same license as the %(project)s package.
|
||||||
@ -204,19 +205,19 @@ class MessageCatalogBuilder(I18nBuilder):
|
|||||||
|
|
||||||
if self.config.gettext_location:
|
if self.config.gettext_location:
|
||||||
# generate "#: file1:line1\n#: file2:line2 ..."
|
# generate "#: file1:line1\n#: file2:line2 ..."
|
||||||
pofile.write(u"#: %s\n" % "\n#: ".join("%s:%s" %
|
pofile.write("#: %s\n" % "\n#: ".join("%s:%s" %
|
||||||
(safe_relpath(source, self.outdir), line)
|
(safe_relpath(source, self.outdir), line)
|
||||||
for source, line, _ in positions))
|
for source, line, _ in positions))
|
||||||
if self.config.gettext_uuid:
|
if self.config.gettext_uuid:
|
||||||
# generate "# uuid1\n# uuid2\n ..."
|
# generate "# uuid1\n# uuid2\n ..."
|
||||||
pofile.write(u"# %s\n" % "\n# ".join(
|
pofile.write("# %s\n" % "\n# ".join(
|
||||||
uid for _, _, uid in positions))
|
uid for _, _, uid in positions))
|
||||||
|
|
||||||
# message contains *one* line of text ready for translation
|
# message contains *one* line of text ready for translation
|
||||||
message = message.replace(u'\\', ur'\\'). \
|
message = message.replace('\\', r'\\'). \
|
||||||
replace(u'"', ur'\"'). \
|
replace('"', r'\"'). \
|
||||||
replace(u'\n', u'\\n"\n"')
|
replace('\n', '\\n"\n"')
|
||||||
pofile.write(u'msgid "%s"\nmsgstr ""\n\n' % message)
|
pofile.write('msgid "%s"\nmsgstr ""\n\n' % message)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
pofile.close()
|
pofile.close()
|
||||||
|
@ -32,7 +32,6 @@ from sphinx.util.osutil import SEP, os_path, relative_uri, ensuredir, \
|
|||||||
movefile, ustrftime, copyfile
|
movefile, ustrftime, copyfile
|
||||||
from sphinx.util.nodes import inline_all_toctrees
|
from sphinx.util.nodes import inline_all_toctrees
|
||||||
from sphinx.util.matching import patmatch, compile_matchers
|
from sphinx.util.matching import patmatch, compile_matchers
|
||||||
from sphinx.util.pycompat import b
|
|
||||||
from sphinx.locale import _
|
from sphinx.locale import _
|
||||||
from sphinx.search import js_index
|
from sphinx.search import js_index
|
||||||
from sphinx.theming import Theme
|
from sphinx.theming import Theme
|
||||||
@ -221,7 +220,7 @@ class StandaloneHTMLBuilder(Builder):
|
|||||||
"""Utility: Render a lone doctree node."""
|
"""Utility: Render a lone doctree node."""
|
||||||
if node is None:
|
if node is None:
|
||||||
return {'fragment': ''}
|
return {'fragment': ''}
|
||||||
doc = new_document(b('<partial node>'))
|
doc = new_document(b'<partial node>')
|
||||||
doc.append(node)
|
doc.append(node)
|
||||||
|
|
||||||
if self._publisher is None:
|
if self._publisher is None:
|
||||||
|
@ -57,7 +57,7 @@ class LaTeXBuilder(Builder):
|
|||||||
return self.get_target_uri(to, typ)
|
return self.get_target_uri(to, typ)
|
||||||
|
|
||||||
def init_document_data(self):
|
def init_document_data(self):
|
||||||
preliminary_document_data = map(list, self.config.latex_documents)
|
preliminary_document_data = [list(x) for x in self.config.latex_documents]
|
||||||
if not preliminary_document_data:
|
if not preliminary_document_data:
|
||||||
self.warn('no "latex_documents" config value found; no documents '
|
self.warn('no "latex_documents" config value found; no documents '
|
||||||
'will be written')
|
'will be written')
|
||||||
|
@ -89,7 +89,7 @@ class CheckExternalLinksBuilder(Builder):
|
|||||||
name = 'linkcheck'
|
name = 'linkcheck'
|
||||||
|
|
||||||
def init(self):
|
def init(self):
|
||||||
self.to_ignore = map(re.compile, self.app.config.linkcheck_ignore)
|
self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore]
|
||||||
self.good = set()
|
self.good = set()
|
||||||
self.broken = {}
|
self.broken = {}
|
||||||
self.redirected = {}
|
self.redirected = {}
|
||||||
|
@ -108,7 +108,7 @@ class TexinfoBuilder(Builder):
|
|||||||
return self.get_target_uri(to, typ)
|
return self.get_target_uri(to, typ)
|
||||||
|
|
||||||
def init_document_data(self):
|
def init_document_data(self):
|
||||||
preliminary_document_data = map(list, self.config.texinfo_documents)
|
preliminary_document_data = [list(x) for x in self.config.texinfo_documents]
|
||||||
if not preliminary_document_data:
|
if not preliminary_document_data:
|
||||||
self.warn('no "texinfo_documents" config value found; no documents '
|
self.warn('no "texinfo_documents" config value found; no documents '
|
||||||
'will be written')
|
'will be written')
|
||||||
|
@ -89,7 +89,7 @@ def main(argv):
|
|||||||
try:
|
try:
|
||||||
opts, args = getopt.getopt(argv[1:], 'ab:t:d:c:CD:A:nNEqQWw:PThvj:',
|
opts, args = getopt.getopt(argv[1:], 'ab:t:d:c:CD:A:nNEqQWw:PThvj:',
|
||||||
['help', 'version'])
|
['help', 'version'])
|
||||||
except getopt.error, err:
|
except getopt.error as err:
|
||||||
usage(argv, 'Error: %s' % err)
|
usage(argv, 'Error: %s' % err)
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
@ -18,9 +18,9 @@ from six import PY3, iteritems, string_types, binary_type, integer_types
|
|||||||
from sphinx.errors import ConfigError
|
from sphinx.errors import ConfigError
|
||||||
from sphinx.locale import l_
|
from sphinx.locale import l_
|
||||||
from sphinx.util.osutil import make_filename
|
from sphinx.util.osutil import make_filename
|
||||||
from sphinx.util.pycompat import b, execfile_
|
from sphinx.util.pycompat import execfile_
|
||||||
|
|
||||||
nonascii_re = re.compile(b(r'[\x80-\xff]'))
|
nonascii_re = re.compile(br'[\x80-\xff]')
|
||||||
|
|
||||||
CONFIG_SYNTAX_ERROR = "There is a syntax error in your configuration file: %s"
|
CONFIG_SYNTAX_ERROR = "There is a syntax error in your configuration file: %s"
|
||||||
if PY3:
|
if PY3:
|
||||||
|
@ -1268,7 +1268,7 @@ class CPPDomain(Domain):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def clear_doc(self, docname):
|
def clear_doc(self, docname):
|
||||||
for fullname, (fn, _, _) in self.data['objects'].items():
|
for fullname, (fn, _, _) in list(self.data['objects'].items()):
|
||||||
if fn == docname:
|
if fn == docname:
|
||||||
del self.data['objects'][fullname]
|
del self.data['objects'][fullname]
|
||||||
|
|
||||||
|
@ -1560,8 +1560,7 @@ class BuildEnvironment:
|
|||||||
if lckey[0:1] in lcletters:
|
if lckey[0:1] in lcletters:
|
||||||
return chr(127) + lckey
|
return chr(127) + lckey
|
||||||
return lckey
|
return lckey
|
||||||
newlist = new.items()
|
newlist = sorted(new.items(), key=keyfunc)
|
||||||
newlist.sort(key=keyfunc)
|
|
||||||
|
|
||||||
if group_entries:
|
if group_entries:
|
||||||
# fixup entries: transform
|
# fixup entries: transform
|
||||||
|
@ -54,9 +54,10 @@ class DefDict(dict):
|
|||||||
return dict.__getitem__(self, key)
|
return dict.__getitem__(self, key)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
return self.default
|
return self.default
|
||||||
def __nonzero__(self):
|
def __bool__(self):
|
||||||
# docutils check "if option_spec"
|
# docutils check "if option_spec"
|
||||||
return True
|
return True
|
||||||
|
__nonzero__ = __bool__ # for python2 compatibility
|
||||||
|
|
||||||
identity = lambda x: x
|
identity = lambda x: x
|
||||||
|
|
||||||
|
@ -110,14 +110,11 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
|
|||||||
# read
|
# read
|
||||||
items = find_autosummary_in_files(sources)
|
items = find_autosummary_in_files(sources)
|
||||||
|
|
||||||
# remove possible duplicates
|
|
||||||
items = dict([(item, True) for item in items]).keys()
|
|
||||||
|
|
||||||
# keep track of new files
|
# keep track of new files
|
||||||
new_files = []
|
new_files = []
|
||||||
|
|
||||||
# write
|
# write
|
||||||
for name, path, template_name in sorted(items, key=str):
|
for name, path, template_name in sorted(set(items), key=str):
|
||||||
if path is None:
|
if path is None:
|
||||||
# The corresponding autosummary:: directive did not have
|
# The corresponding autosummary:: directive did not have
|
||||||
# a :toctree: option
|
# a :toctree: option
|
||||||
|
@ -213,8 +213,7 @@ class CoverageBuilder(Builder):
|
|||||||
try:
|
try:
|
||||||
if self.config.coverage_write_headline:
|
if self.config.coverage_write_headline:
|
||||||
write_header(op, 'Undocumented Python objects', '=')
|
write_header(op, 'Undocumented Python objects', '=')
|
||||||
keys = self.py_undoc.keys()
|
keys = sorted(self.py_undoc.keys())
|
||||||
keys.sort()
|
|
||||||
for name in keys:
|
for name in keys:
|
||||||
undoc = self.py_undoc[name]
|
undoc = self.py_undoc[name]
|
||||||
if 'error' in undoc:
|
if 'error' in undoc:
|
||||||
|
@ -38,7 +38,6 @@ from docutils.utils import relative_path
|
|||||||
|
|
||||||
from sphinx.locale import _
|
from sphinx.locale import _
|
||||||
from sphinx.builders.html import INVENTORY_FILENAME
|
from sphinx.builders.html import INVENTORY_FILENAME
|
||||||
from sphinx.util.pycompat import b
|
|
||||||
|
|
||||||
|
|
||||||
handlers = [request.ProxyHandler(), request.HTTPRedirectHandler(),
|
handlers = [request.ProxyHandler(), request.HTTPRedirectHandler(),
|
||||||
@ -86,19 +85,19 @@ def read_inventory_v2(f, uri, join, bufsize=16*1024):
|
|||||||
|
|
||||||
def read_chunks():
|
def read_chunks():
|
||||||
decompressor = zlib.decompressobj()
|
decompressor = zlib.decompressobj()
|
||||||
for chunk in iter(lambda: f.read(bufsize), b('')):
|
for chunk in iter(lambda: f.read(bufsize), b''):
|
||||||
yield decompressor.decompress(chunk)
|
yield decompressor.decompress(chunk)
|
||||||
yield decompressor.flush()
|
yield decompressor.flush()
|
||||||
|
|
||||||
def split_lines(iter):
|
def split_lines(iter):
|
||||||
buf = b('')
|
buf = b''
|
||||||
for chunk in iter:
|
for chunk in iter:
|
||||||
buf += chunk
|
buf += chunk
|
||||||
lineend = buf.find(b('\n'))
|
lineend = buf.find(b'\n')
|
||||||
while lineend != -1:
|
while lineend != -1:
|
||||||
yield buf[:lineend].decode('utf-8')
|
yield buf[:lineend].decode('utf-8')
|
||||||
buf = buf[lineend+1:]
|
buf = buf[lineend+1:]
|
||||||
lineend = buf.find(b('\n'))
|
lineend = buf.find(b'\n')
|
||||||
assert not buf
|
assert not buf
|
||||||
|
|
||||||
for line in split_lines(read_chunks()):
|
for line in split_lines(read_chunks()):
|
||||||
|
@ -27,7 +27,7 @@ from docutils import nodes
|
|||||||
from sphinx.errors import SphinxError
|
from sphinx.errors import SphinxError
|
||||||
from sphinx.util.png import read_png_depth, write_png_depth
|
from sphinx.util.png import read_png_depth, write_png_depth
|
||||||
from sphinx.util.osutil import ensuredir, ENOENT
|
from sphinx.util.osutil import ensuredir, ENOENT
|
||||||
from sphinx.util.pycompat import b, sys_encoding
|
from sphinx.util.pycompat import sys_encoding
|
||||||
from sphinx.ext.mathbase import setup_math as mathbase_setup, wrap_displaymath
|
from sphinx.ext.mathbase import setup_math as mathbase_setup, wrap_displaymath
|
||||||
|
|
||||||
class MathExtError(SphinxError):
|
class MathExtError(SphinxError):
|
||||||
@ -67,7 +67,7 @@ DOC_BODY_PREVIEW = r'''
|
|||||||
\end{document}
|
\end{document}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
depth_re = re.compile(b(r'\[\d+ depth=(-?\d+)\]'))
|
depth_re = re.compile(br'\[\d+ depth=(-?\d+)\]')
|
||||||
|
|
||||||
def render_math(self, math):
|
def render_math(self, math):
|
||||||
"""Render the LaTeX math expression *math* using latex and dvipng.
|
"""Render the LaTeX math expression *math* using latex and dvipng.
|
||||||
|
@ -114,7 +114,7 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
|
|||||||
self.pathchain = pathchain
|
self.pathchain = pathchain
|
||||||
|
|
||||||
# make the paths into loaders
|
# make the paths into loaders
|
||||||
self.loaders = map(SphinxFileSystemLoader, loaderchain)
|
self.loaders = [SphinxFileSystemLoader(x) for x in loaderchain]
|
||||||
|
|
||||||
use_i18n = builder.app.translator is not None
|
use_i18n = builder.app.translator is not None
|
||||||
extensions = use_i18n and ['jinja2.ext.i18n'] or []
|
extensions = use_i18n and ['jinja2.ext.i18n'] or []
|
||||||
|
@ -60,8 +60,9 @@ class _TranslationProxy(UserString, object):
|
|||||||
def __contains__(self, key):
|
def __contains__(self, key):
|
||||||
return key in self.data
|
return key in self.data
|
||||||
|
|
||||||
def __nonzero__(self):
|
def __bool__(self):
|
||||||
return bool(self.data)
|
return bool(self.data)
|
||||||
|
__nonzero__ = __bool__ # for python2 compatibility
|
||||||
|
|
||||||
def __dir__(self):
|
def __dir__(self):
|
||||||
return dir(text_type)
|
return dir(text_type)
|
||||||
|
@ -335,7 +335,7 @@ class ParserGenerator(object):
|
|||||||
try:
|
try:
|
||||||
msg = msg % args
|
msg = msg % args
|
||||||
except:
|
except:
|
||||||
msg = " ".join([msg] + map(str, args))
|
msg = " ".join([msg] + [str(x) for x in args])
|
||||||
raise SyntaxError(msg, (self.filename, self.end[0],
|
raise SyntaxError(msg, (self.filename, self.end[0],
|
||||||
self.end[1], self.line))
|
self.end[1], self.line))
|
||||||
|
|
||||||
@ -353,7 +353,7 @@ class DFAState(object):
|
|||||||
|
|
||||||
def __init__(self, nfaset, final):
|
def __init__(self, nfaset, final):
|
||||||
assert isinstance(nfaset, dict)
|
assert isinstance(nfaset, dict)
|
||||||
assert isinstance(iter(nfaset).next(), NFAState)
|
assert isinstance(next(iter(nfaset)), NFAState)
|
||||||
assert isinstance(final, NFAState)
|
assert isinstance(final, NFAState)
|
||||||
self.nfaset = nfaset
|
self.nfaset = nfaset
|
||||||
self.isfinal = final in nfaset
|
self.isfinal = final in nfaset
|
||||||
|
@ -97,8 +97,9 @@ ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
|
|||||||
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
|
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
|
||||||
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
|
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
|
||||||
|
|
||||||
tokenprog, pseudoprog, single3prog, double3prog = map(
|
tokenprog, pseudoprog, single3prog, double3prog = [
|
||||||
re.compile, (Token, PseudoToken, Single3, Double3))
|
re.compile(x) for x in (Token, PseudoToken, Single3, Double3)
|
||||||
|
]
|
||||||
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
|
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
|
||||||
"'''": single3prog, '"""': double3prog,
|
"'''": single3prog, '"""': double3prog,
|
||||||
"r'''": single3prog, 'r"""': double3prog,
|
"r'''": single3prog, 'r"""': double3prog,
|
||||||
|
@ -316,8 +316,8 @@ class IndexBuilder(object):
|
|||||||
|
|
||||||
def freeze(self):
|
def freeze(self):
|
||||||
"""Create a usable data structure for serializing."""
|
"""Create a usable data structure for serializing."""
|
||||||
filenames = self._titles.keys()
|
filenames = list(self._titles.keys())
|
||||||
titles = self._titles.values()
|
titles = list(self._titles.values())
|
||||||
fn2index = dict((f, i) for (i, f) in enumerate(filenames))
|
fn2index = dict((f, i) for (i, f) in enumerate(filenames))
|
||||||
terms, title_terms = self.get_terms(fn2index)
|
terms, title_terms = self.get_terms(fn2index)
|
||||||
|
|
||||||
|
@ -15,11 +15,10 @@ from __future__ import print_function
|
|||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import types
|
|
||||||
from distutils.cmd import Command
|
from distutils.cmd import Command
|
||||||
from distutils.errors import DistutilsOptionError
|
from distutils.errors import DistutilsOptionError
|
||||||
|
|
||||||
from six import StringIO
|
from six import StringIO, string_types
|
||||||
|
|
||||||
from sphinx.application import Sphinx
|
from sphinx.application import Sphinx
|
||||||
from sphinx.util.console import darkred, nocolor, color_terminal
|
from sphinx.util.console import darkred, nocolor, color_terminal
|
||||||
@ -110,7 +109,7 @@ class BuildDoc(Command):
|
|||||||
if val is None:
|
if val is None:
|
||||||
setattr(self, option, default)
|
setattr(self, option, default)
|
||||||
return default
|
return default
|
||||||
elif not isinstance(val, types.StringTypes):
|
elif not isinstance(val, string_types):
|
||||||
raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
|
raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
|
||||||
% (option, what, val))
|
% (option, what, val))
|
||||||
return val
|
return val
|
||||||
|
@ -368,7 +368,7 @@ def rpartition(s, t):
|
|||||||
|
|
||||||
def split_into(n, type, value):
|
def split_into(n, type, value):
|
||||||
"""Split an index entry into a given number of parts at semicolons."""
|
"""Split an index entry into a given number of parts at semicolons."""
|
||||||
parts = map(lambda x: x.strip(), value.split(';', n-1))
|
parts = [x.strip() for x in value.split(';', n-1)]
|
||||||
if sum(1 for part in parts if part) < n:
|
if sum(1 for part in parts if part) < n:
|
||||||
raise ValueError('invalid %s index entry %r' % (type, value))
|
raise ValueError('invalid %s index entry %r' % (type, value))
|
||||||
return parts
|
return parts
|
||||||
|
@ -189,12 +189,6 @@ def find_catalog_files(docname, srcdir, locale_dirs, lang, compaction):
|
|||||||
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
|
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
|
||||||
|
|
||||||
|
|
||||||
if PY2:
|
|
||||||
bytes = str
|
|
||||||
else:
|
|
||||||
bytes = bytes
|
|
||||||
|
|
||||||
|
|
||||||
def abspath(pathdir):
|
def abspath(pathdir):
|
||||||
pathdir = path.abspath(pathdir)
|
pathdir = path.abspath(pathdir)
|
||||||
if isinstance(pathdir, bytes):
|
if isinstance(pathdir, bytes):
|
||||||
|
@ -12,14 +12,13 @@
|
|||||||
import struct
|
import struct
|
||||||
import binascii
|
import binascii
|
||||||
|
|
||||||
from sphinx.util.pycompat import b
|
|
||||||
|
|
||||||
LEN_IEND = 12
|
LEN_IEND = 12
|
||||||
LEN_DEPTH = 22
|
LEN_DEPTH = 22
|
||||||
|
|
||||||
DEPTH_CHUNK_LEN = struct.pack('!i', 10)
|
DEPTH_CHUNK_LEN = struct.pack('!i', 10)
|
||||||
DEPTH_CHUNK_START = b('tEXtDepth\x00')
|
DEPTH_CHUNK_START = b'tEXtDepth\x00'
|
||||||
IEND_CHUNK = b('\x00\x00\x00\x00IEND\xAE\x42\x60\x82')
|
IEND_CHUNK = b'\x00\x00\x00\x00IEND\xAE\x42\x60\x82'
|
||||||
|
|
||||||
|
|
||||||
def read_png_depth(filename):
|
def read_png_depth(filename):
|
||||||
|
@ -19,9 +19,6 @@ from six import PY3, text_type, exec_
|
|||||||
|
|
||||||
if PY3:
|
if PY3:
|
||||||
# Python 3
|
# Python 3
|
||||||
# the ubiquitous "bytes" helper functions
|
|
||||||
def b(s):
|
|
||||||
return s.encode('utf-8')
|
|
||||||
# prefix for Unicode strings
|
# prefix for Unicode strings
|
||||||
u = ''
|
u = ''
|
||||||
from io import TextIOWrapper
|
from io import TextIOWrapper
|
||||||
@ -57,7 +54,6 @@ if PY3:
|
|||||||
|
|
||||||
else:
|
else:
|
||||||
# Python 2
|
# Python 2
|
||||||
b = str
|
|
||||||
u = 'u'
|
u = 'u'
|
||||||
# no need to refactor on 2.x versions
|
# no need to refactor on 2.x versions
|
||||||
convert_with_2to3 = None
|
convert_with_2to3 = None
|
||||||
@ -92,7 +88,7 @@ def execfile_(filepath, _globals):
|
|||||||
|
|
||||||
# py26 accept only LF eol instead of CRLF
|
# py26 accept only LF eol instead of CRLF
|
||||||
if sys.version_info[:2] == (2, 6):
|
if sys.version_info[:2] == (2, 6):
|
||||||
source = source.replace(b('\r\n'), b('\n'))
|
source = source.replace(b'\r\n', b'\n')
|
||||||
|
|
||||||
# compile to a code object, handle syntax errors
|
# compile to a code object, handle syntax errors
|
||||||
filepath_enc = filepath.encode(fs_encoding)
|
filepath_enc = filepath.encode(fs_encoding)
|
||||||
|
@ -9,93 +9,95 @@
|
|||||||
:license: BSD, see LICENSE for details.
|
:license: BSD, see LICENSE for details.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
tex_replacements = [
|
tex_replacements = [
|
||||||
# map TeX special chars
|
# map TeX special chars
|
||||||
(u'$', ur'\$'),
|
('$', r'\$'),
|
||||||
(u'%', ur'\%'),
|
('%', r'\%'),
|
||||||
(u'&', ur'\&'),
|
('&', r'\&'),
|
||||||
(u'#', ur'\#'),
|
('#', r'\#'),
|
||||||
(u'_', ur'\_'),
|
('_', r'\_'),
|
||||||
(u'{', ur'\{'),
|
('{', r'\{'),
|
||||||
(u'}', ur'\}'),
|
('}', r'\}'),
|
||||||
(u'[', ur'{[}'),
|
('[', r'{[}'),
|
||||||
(u']', ur'{]}'),
|
(']', r'{]}'),
|
||||||
(u'`', ur'{}`'),
|
('`', r'{}`'),
|
||||||
(u'\\',ur'\textbackslash{}'),
|
('\\',r'\textbackslash{}'),
|
||||||
(u'~', ur'\textasciitilde{}'),
|
('~', r'\textasciitilde{}'),
|
||||||
(u'<', ur'\textless{}'),
|
('<', r'\textless{}'),
|
||||||
(u'>', ur'\textgreater{}'),
|
('>', r'\textgreater{}'),
|
||||||
(u'^', ur'\textasciicircum{}'),
|
('^', r'\textasciicircum{}'),
|
||||||
# map special Unicode characters to TeX commands
|
# map special Unicode characters to TeX commands
|
||||||
(u'¶', ur'\P{}'),
|
('¶', r'\P{}'),
|
||||||
(u'§', ur'\S{}'),
|
('§', r'\S{}'),
|
||||||
(u'€', ur'\texteuro{}'),
|
('€', r'\texteuro{}'),
|
||||||
(u'∞', ur'\(\infty\)'),
|
('∞', r'\(\infty\)'),
|
||||||
(u'±', ur'\(\pm\)'),
|
('±', r'\(\pm\)'),
|
||||||
(u'→', ur'\(\rightarrow\)'),
|
('→', r'\(\rightarrow\)'),
|
||||||
(u'‣', ur'\(\rightarrow\)'),
|
('‣', r'\(\rightarrow\)'),
|
||||||
# used to separate -- in options
|
# used to separate -- in options
|
||||||
(u'', ur'{}'),
|
('', r'{}'),
|
||||||
# map some special Unicode characters to similar ASCII ones
|
# map some special Unicode characters to similar ASCII ones
|
||||||
(u'─', ur'-'),
|
('─', r'-'),
|
||||||
(u'⎽', ur'\_'),
|
('⎽', r'\_'),
|
||||||
(u'╲', ur'\textbackslash{}'),
|
('╲', r'\textbackslash{}'),
|
||||||
(u'|', ur'\textbar{}'),
|
('|', r'\textbar{}'),
|
||||||
(u'│', ur'\textbar{}'),
|
('│', r'\textbar{}'),
|
||||||
(u'ℯ', ur'e'),
|
('ℯ', r'e'),
|
||||||
(u'ⅈ', ur'i'),
|
('ⅈ', r'i'),
|
||||||
(u'₁', ur'1'),
|
('₁', r'1'),
|
||||||
(u'₂', ur'2'),
|
('₂', r'2'),
|
||||||
# map Greek alphabet
|
# map Greek alphabet
|
||||||
(u'α', ur'\(\alpha\)'),
|
('α', r'\(\alpha\)'),
|
||||||
(u'β', ur'\(\beta\)'),
|
('β', r'\(\beta\)'),
|
||||||
(u'γ', ur'\(\gamma\)'),
|
('γ', r'\(\gamma\)'),
|
||||||
(u'δ', ur'\(\delta\)'),
|
('δ', r'\(\delta\)'),
|
||||||
(u'ε', ur'\(\epsilon\)'),
|
('ε', r'\(\epsilon\)'),
|
||||||
(u'ζ', ur'\(\zeta\)'),
|
('ζ', r'\(\zeta\)'),
|
||||||
(u'η', ur'\(\eta\)'),
|
('η', r'\(\eta\)'),
|
||||||
(u'θ', ur'\(\theta\)'),
|
('θ', r'\(\theta\)'),
|
||||||
(u'ι', ur'\(\iota\)'),
|
('ι', r'\(\iota\)'),
|
||||||
(u'κ', ur'\(\kappa\)'),
|
('κ', r'\(\kappa\)'),
|
||||||
(u'λ', ur'\(\lambda\)'),
|
('λ', r'\(\lambda\)'),
|
||||||
(u'μ', ur'\(\mu\)'),
|
('μ', r'\(\mu\)'),
|
||||||
(u'ν', ur'\(\nu\)'),
|
('ν', r'\(\nu\)'),
|
||||||
(u'ξ', ur'\(\xi\)'),
|
('ξ', r'\(\xi\)'),
|
||||||
(u'ο', ur'o'),
|
('ο', r'o'),
|
||||||
(u'π', ur'\(\pi\)'),
|
('π', r'\(\pi\)'),
|
||||||
(u'ρ', ur'\(\rho\)'),
|
('ρ', r'\(\rho\)'),
|
||||||
(u'σ', ur'\(\sigma\)'),
|
('σ', r'\(\sigma\)'),
|
||||||
(u'τ', ur'\(\tau\)'),
|
('τ', r'\(\tau\)'),
|
||||||
(u'υ', u'\\(\\upsilon\\)'),
|
('υ', '\\(\\upsilon\\)'),
|
||||||
(u'φ', ur'\(\phi\)'),
|
('φ', r'\(\phi\)'),
|
||||||
(u'χ', ur'\(\chi\)'),
|
('χ', r'\(\chi\)'),
|
||||||
(u'ψ', ur'\(\psi\)'),
|
('ψ', r'\(\psi\)'),
|
||||||
(u'ω', ur'\(\omega\)'),
|
('ω', r'\(\omega\)'),
|
||||||
(u'Α', ur'A'),
|
('Α', r'A'),
|
||||||
(u'Β', ur'B'),
|
('Β', r'B'),
|
||||||
(u'Γ', ur'\(\Gamma\)'),
|
('Γ', r'\(\Gamma\)'),
|
||||||
(u'Δ', ur'\(\Delta\)'),
|
('Δ', r'\(\Delta\)'),
|
||||||
(u'Ε', ur'E'),
|
('Ε', r'E'),
|
||||||
(u'Ζ', ur'Z'),
|
('Ζ', r'Z'),
|
||||||
(u'Η', ur'H'),
|
('Η', r'H'),
|
||||||
(u'Θ', ur'\(\Theta\)'),
|
('Θ', r'\(\Theta\)'),
|
||||||
(u'Ι', ur'I'),
|
('Ι', r'I'),
|
||||||
(u'Κ', ur'K'),
|
('Κ', r'K'),
|
||||||
(u'Λ', ur'\(\Lambda\)'),
|
('Λ', r'\(\Lambda\)'),
|
||||||
(u'Μ', ur'M'),
|
('Μ', r'M'),
|
||||||
(u'Ν', ur'N'),
|
('Ν', r'N'),
|
||||||
(u'Ξ', ur'\(\Xi\)'),
|
('Ξ', r'\(\Xi\)'),
|
||||||
(u'Ο', ur'O'),
|
('Ο', r'O'),
|
||||||
(u'Π', ur'\(\Pi\)'),
|
('Π', r'\(\Pi\)'),
|
||||||
(u'Ρ', ur'P'),
|
('Ρ', r'P'),
|
||||||
(u'Σ', ur'\(\Sigma\)'),
|
('Σ', r'\(\Sigma\)'),
|
||||||
(u'Τ', ur'T'),
|
('Τ', r'T'),
|
||||||
(u'Υ', u'\\(\\Upsilon\\)'),
|
('Υ', '\\(\\Upsilon\\)'),
|
||||||
(u'Φ', ur'\(\Phi\)'),
|
('Φ', r'\(\Phi\)'),
|
||||||
(u'Χ', ur'X'),
|
('Χ', r'X'),
|
||||||
(u'Ψ', ur'\(\Psi\)'),
|
('Ψ', r'\(\Psi\)'),
|
||||||
(u'Ω', ur'\(\Omega\)'),
|
('Ω', r'\(\Omega\)'),
|
||||||
(u'Ω', ur'\(\Omega\)'),
|
('Ω', r'\(\Omega\)'),
|
||||||
]
|
]
|
||||||
|
|
||||||
tex_escape_map = {}
|
tex_escape_map = {}
|
||||||
@ -105,8 +107,8 @@ tex_hl_escape_map_new = {}
|
|||||||
def init():
|
def init():
|
||||||
for a, b in tex_replacements:
|
for a, b in tex_replacements:
|
||||||
tex_escape_map[ord(a)] = b
|
tex_escape_map[ord(a)] = b
|
||||||
tex_replace_map[ord(a)] = u'_'
|
tex_replace_map[ord(a)] = '_'
|
||||||
|
|
||||||
for a, b in tex_replacements:
|
for a, b in tex_replacements:
|
||||||
if a in u'[]{}\\': continue
|
if a in '[]{}\\': continue
|
||||||
tex_hl_escape_map_new[ord(a)] = b
|
tex_hl_escape_map_new[ord(a)] = b
|
||||||
|
@ -711,7 +711,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
|
|||||||
self.body.append('\n\\hline\n')
|
self.body.append('\n\\hline\n')
|
||||||
self.body.extend(self.tableheaders)
|
self.body.extend(self.tableheaders)
|
||||||
self.body.append('\\endhead\n\n')
|
self.body.append('\\endhead\n\n')
|
||||||
self.body.append(ur'\hline \multicolumn{%s}{|r|}{{\textsf{%s}}} \\ \hline'
|
self.body.append(r'\hline \multicolumn{%s}{|r|}{{\textsf{%s}}} \\ \hline'
|
||||||
% (self.table.colcount,
|
% (self.table.colcount,
|
||||||
_('Continued on next page')))
|
_('Continued on next page')))
|
||||||
self.body.append('\n\\endfoot\n\n')
|
self.body.append('\n\\endfoot\n\n')
|
||||||
@ -1137,21 +1137,21 @@ class LaTeXTranslator(nodes.NodeVisitor):
|
|||||||
p = scre.sub('!', self.encode(string))
|
p = scre.sub('!', self.encode(string))
|
||||||
self.body.append(r'\index{%s%s}' % (p, m))
|
self.body.append(r'\index{%s%s}' % (p, m))
|
||||||
elif type == 'pair':
|
elif type == 'pair':
|
||||||
p1, p2 = map(self.encode, split_into(2, 'pair', string))
|
p1, p2 = [self.encode(x) for x in split_into(2, 'pair', string)]
|
||||||
self.body.append(r'\index{%s!%s%s}\index{%s!%s%s}' %
|
self.body.append(r'\index{%s!%s%s}\index{%s!%s%s}' %
|
||||||
(p1, p2, m, p2, p1, m))
|
(p1, p2, m, p2, p1, m))
|
||||||
elif type == 'triple':
|
elif type == 'triple':
|
||||||
p1, p2, p3 = map(self.encode,
|
p1, p2, p3 = [self.encode(x)
|
||||||
split_into(3, 'triple', string))
|
for x in split_into(3, 'triple', string)]
|
||||||
self.body.append(
|
self.body.append(
|
||||||
r'\index{%s!%s %s%s}\index{%s!%s, %s%s}'
|
r'\index{%s!%s %s%s}\index{%s!%s, %s%s}'
|
||||||
r'\index{%s!%s %s%s}' %
|
r'\index{%s!%s %s%s}' %
|
||||||
(p1, p2, p3, m, p2, p3, p1, m, p3, p1, p2, m))
|
(p1, p2, p3, m, p2, p3, p1, m, p3, p1, p2, m))
|
||||||
elif type == 'see':
|
elif type == 'see':
|
||||||
p1, p2 = map(self.encode, split_into(2, 'see', string))
|
p1, p2 = [self.encode(x) for x in split_into(2, 'see', string)]
|
||||||
self.body.append(r'\index{%s|see{%s}}' % (p1, p2))
|
self.body.append(r'\index{%s|see{%s}}' % (p1, p2))
|
||||||
elif type == 'seealso':
|
elif type == 'seealso':
|
||||||
p1, p2 = map(self.encode, split_into(2, 'seealso', string))
|
p1, p2 = [self.encode(x) for x in split_into(2, 'seealso', string)]
|
||||||
self.body.append(r'\index{%s|see{%s}}' % (p1, p2))
|
self.body.append(r'\index{%s|see{%s}}' % (p1, p2))
|
||||||
else:
|
else:
|
||||||
self.builder.warn(
|
self.builder.warn(
|
||||||
|
@ -488,7 +488,7 @@ class TextTranslator(nodes.NodeVisitor):
|
|||||||
for i, cell in enumerate(line):
|
for i, cell in enumerate(line):
|
||||||
par = my_wrap(cell, width=colwidths[i])
|
par = my_wrap(cell, width=colwidths[i])
|
||||||
if par:
|
if par:
|
||||||
maxwidth = max(map(column_width, par))
|
maxwidth = max(column_width(x) for x in par)
|
||||||
else:
|
else:
|
||||||
maxwidth = 0
|
maxwidth = 0
|
||||||
realwidths[i] = max(realwidths[i], maxwidth)
|
realwidths[i] = max(realwidths[i], maxwidth)
|
||||||
|
@ -401,7 +401,7 @@ class coverage:
|
|||||||
if settings.get('collect'):
|
if settings.get('collect'):
|
||||||
self.collect()
|
self.collect()
|
||||||
if not args:
|
if not args:
|
||||||
args = self.cexecuted.keys()
|
args = list(self.cexecuted.keys())
|
||||||
|
|
||||||
ignore_errors = settings.get('ignore-errors')
|
ignore_errors = settings.get('ignore-errors')
|
||||||
show_missing = settings.get('show-missing')
|
show_missing = settings.get('show-missing')
|
||||||
@ -743,10 +743,8 @@ class coverage:
|
|||||||
visitor = StatementFindingAstVisitor(statements, excluded, suite_spots)
|
visitor = StatementFindingAstVisitor(statements, excluded, suite_spots)
|
||||||
compiler.walk(ast, visitor, walker=visitor)
|
compiler.walk(ast, visitor, walker=visitor)
|
||||||
|
|
||||||
lines = statements.keys()
|
lines = sorted(statements.keys())
|
||||||
lines.sort()
|
excluded_lines = sorted(excluded.keys())
|
||||||
excluded_lines = excluded.keys()
|
|
||||||
excluded_lines.sort()
|
|
||||||
return lines, excluded_lines, suite_spots
|
return lines, excluded_lines, suite_spots
|
||||||
|
|
||||||
# format_lines(statements, lines). Format a list of line numbers
|
# format_lines(statements, lines). Format a list of line numbers
|
||||||
@ -850,7 +848,7 @@ class coverage:
|
|||||||
morfs = self.filter_by_prefix(morfs, omit_prefixes)
|
morfs = self.filter_by_prefix(morfs, omit_prefixes)
|
||||||
morfs.sort(self.morf_name_compare)
|
morfs.sort(self.morf_name_compare)
|
||||||
|
|
||||||
max_name = max([5,] + map(len, map(self.morf_name, morfs)))
|
max_name = max(5, *map(len, map(self.morf_name, morfs)))
|
||||||
fmt_name = "%%- %ds " % max_name
|
fmt_name = "%%- %ds " % max_name
|
||||||
fmt_err = fmt_name + "%s: %s"
|
fmt_err = fmt_name + "%s: %s"
|
||||||
header = fmt_name % "Name" + " Stmts Exec Cover"
|
header = fmt_name % "Name" + " Stmts Exec Cover"
|
||||||
|
@ -177,7 +177,7 @@ class _SelectorContext:
|
|||||||
|
|
||||||
def find(elem, path):
|
def find(elem, path):
|
||||||
try:
|
try:
|
||||||
return findall(elem, path).next()
|
return next(findall(elem, path))
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -194,17 +194,17 @@ def findall(elem, path):
|
|||||||
if path[:1] == "/":
|
if path[:1] == "/":
|
||||||
raise SyntaxError("cannot use absolute path on element")
|
raise SyntaxError("cannot use absolute path on element")
|
||||||
stream = iter(xpath_tokenizer(path))
|
stream = iter(xpath_tokenizer(path))
|
||||||
next = stream.next; token = next()
|
next_ = lambda: next(stream); token = next_()
|
||||||
selector = []
|
selector = []
|
||||||
while 1:
|
while 1:
|
||||||
try:
|
try:
|
||||||
selector.append(ops[token[0]](next, token))
|
selector.append(ops[token[0]](next_, token))
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
raise SyntaxError("invalid path")
|
raise SyntaxError("invalid path")
|
||||||
try:
|
try:
|
||||||
token = next()
|
token = next_()
|
||||||
if token[0] == "/":
|
if token[0] == "/":
|
||||||
token = next()
|
token = next_()
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
break
|
break
|
||||||
_cache[path] = selector
|
_cache[path] = selector
|
||||||
@ -220,7 +220,7 @@ def findall(elem, path):
|
|||||||
|
|
||||||
def findtext(elem, path, default=None):
|
def findtext(elem, path, default=None):
|
||||||
try:
|
try:
|
||||||
elem = findall(elem, path).next()
|
elem = next(findall(elem, path))
|
||||||
return elem.text
|
return elem.text
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
return default
|
return default
|
||||||
|
@ -246,7 +246,7 @@ class Element(object):
|
|||||||
def __len__(self):
|
def __len__(self):
|
||||||
return len(self._children)
|
return len(self._children)
|
||||||
|
|
||||||
def __nonzero__(self):
|
def __bool__(self):
|
||||||
import warnings
|
import warnings
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
"The behavior of this method will change in future versions. "
|
"The behavior of this method will change in future versions. "
|
||||||
@ -254,6 +254,7 @@ class Element(object):
|
|||||||
FutureWarning
|
FutureWarning
|
||||||
)
|
)
|
||||||
return len(self._children) != 0 # emulate old behaviour
|
return len(self._children) != 0 # emulate old behaviour
|
||||||
|
__nonzero__ = __bool__ # for python2 compatibility
|
||||||
|
|
||||||
##
|
##
|
||||||
# Returns the given subelement.
|
# Returns the given subelement.
|
||||||
@ -866,7 +867,7 @@ def _serialize_xml(write, elem, encoding, qnames, namespaces):
|
|||||||
write("<" + tag)
|
write("<" + tag)
|
||||||
items = elem.items()
|
items = elem.items()
|
||||||
if items or namespaces:
|
if items or namespaces:
|
||||||
items.sort() # lexical order
|
items = sorted(items) # lexical order
|
||||||
for k, v in items:
|
for k, v in items:
|
||||||
if isinstance(k, QName):
|
if isinstance(k, QName):
|
||||||
k = k.text
|
k = k.text
|
||||||
@ -877,7 +878,7 @@ def _serialize_xml(write, elem, encoding, qnames, namespaces):
|
|||||||
write(" %s=\"%s\"" % (qnames[k], v))
|
write(" %s=\"%s\"" % (qnames[k], v))
|
||||||
if namespaces:
|
if namespaces:
|
||||||
items = namespaces.items()
|
items = namespaces.items()
|
||||||
items.sort(key=lambda x: x[1]) # sort on prefix
|
items = sorted(items, key=lambda x: x[1]) # sort on prefix
|
||||||
for v, k in items:
|
for v, k in items:
|
||||||
if k:
|
if k:
|
||||||
k = ":" + k
|
k = ":" + k
|
||||||
@ -923,7 +924,7 @@ def _serialize_html(write, elem, encoding, qnames, namespaces):
|
|||||||
write("<" + tag)
|
write("<" + tag)
|
||||||
items = elem.items()
|
items = elem.items()
|
||||||
if items or namespaces:
|
if items or namespaces:
|
||||||
items.sort() # lexical order
|
items = sorted(items) # lexical order
|
||||||
for k, v in items:
|
for k, v in items:
|
||||||
if isinstance(k, QName):
|
if isinstance(k, QName):
|
||||||
k = k.text
|
k = k.text
|
||||||
@ -935,7 +936,7 @@ def _serialize_html(write, elem, encoding, qnames, namespaces):
|
|||||||
write(" %s=\"%s\"" % (qnames[k], v))
|
write(" %s=\"%s\"" % (qnames[k], v))
|
||||||
if namespaces:
|
if namespaces:
|
||||||
items = namespaces.items()
|
items = namespaces.items()
|
||||||
items.sort(key=lambda x: x[1]) # sort on prefix
|
items = sorted(items, key=lambda x: x[1]) # sort on prefix
|
||||||
for v, k in items:
|
for v, k in items:
|
||||||
if k:
|
if k:
|
||||||
k = ":" + k
|
k = ":" + k
|
||||||
|
14
tests/run.py
14
tests/run.py
@ -15,8 +15,6 @@ import sys
|
|||||||
from os import path, chdir, listdir, environ
|
from os import path, chdir, listdir, environ
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
from six import PY3
|
|
||||||
|
|
||||||
|
|
||||||
testroot = path.dirname(__file__) or '.'
|
testroot = path.dirname(__file__) or '.'
|
||||||
if 'BUILD_TEST_PATH' in environ:
|
if 'BUILD_TEST_PATH' in environ:
|
||||||
@ -28,15 +26,9 @@ else:
|
|||||||
newroot = path.join(newroot, listdir(newroot)[0], 'tests')
|
newroot = path.join(newroot, listdir(newroot)[0], 'tests')
|
||||||
|
|
||||||
shutil.rmtree(newroot, ignore_errors=True)
|
shutil.rmtree(newroot, ignore_errors=True)
|
||||||
|
# just copying test directory to parallel testing
|
||||||
if PY3:
|
print('Copying sources to build/lib/tests...')
|
||||||
print('Copying and converting sources to build/lib/tests...')
|
shutil.copytree(testroot, newroot)
|
||||||
from distutils.util import copydir_run_2to3
|
|
||||||
copydir_run_2to3(testroot, newroot)
|
|
||||||
else:
|
|
||||||
# just copying test directory to parallel testing
|
|
||||||
print('Copying sources to build/lib/tests...')
|
|
||||||
shutil.copytree(testroot, newroot)
|
|
||||||
|
|
||||||
# always test the sphinx package from build/lib/
|
# always test the sphinx package from build/lib/
|
||||||
sys.path.insert(0, path.abspath(path.join(newroot, path.pardir)))
|
sys.path.insert(0, path.abspath(path.join(newroot, path.pardir)))
|
||||||
|
@ -35,7 +35,7 @@ def test_mangle_signature():
|
|||||||
(a=1, b=<SomeClass: a, b, c>, c=3) :: ([a, b, c])
|
(a=1, b=<SomeClass: a, b, c>, c=3) :: ([a, b, c])
|
||||||
"""
|
"""
|
||||||
|
|
||||||
TEST = [map(lambda x: x.strip(), x.split("::")) for x in TEST.split("\n")
|
TEST = [[y.strip() for y in x.split("::")] for x in TEST.split("\n")
|
||||||
if '::' in x]
|
if '::' in x]
|
||||||
for inp, outp in TEST:
|
for inp, outp in TEST:
|
||||||
res = mangle_signature(inp).strip().replace(u"\u00a0", " ")
|
res = mangle_signature(inp).strip().replace(u"\u00a0", " ")
|
||||||
|
@ -266,7 +266,7 @@ if pygments:
|
|||||||
(".//div[@class='inc-lines highlight-text']//pre",
|
(".//div[@class='inc-lines highlight-text']//pre",
|
||||||
r'^class Foo:\n pass\nclass Bar:\n$'),
|
r'^class Foo:\n pass\nclass Bar:\n$'),
|
||||||
(".//div[@class='inc-startend highlight-text']//pre",
|
(".//div[@class='inc-startend highlight-text']//pre",
|
||||||
ur'^foo = "Including Unicode characters: üöä"\n$'),
|
u'^foo = "Including Unicode characters: üöä"\\n$'),
|
||||||
(".//div[@class='inc-preappend highlight-text']//pre",
|
(".//div[@class='inc-preappend highlight-text']//pre",
|
||||||
r'(?m)^START CODE$'),
|
r'(?m)^START CODE$'),
|
||||||
(".//div[@class='inc-pyobj-dedent highlight-python']//span",
|
(".//div[@class='inc-pyobj-dedent highlight-python']//span",
|
||||||
|
@ -15,7 +15,6 @@ from util import TestApp, with_app, with_tempdir, raises, raises_msg
|
|||||||
|
|
||||||
from sphinx.config import Config
|
from sphinx.config import Config
|
||||||
from sphinx.errors import ExtensionError, ConfigError, VersionRequirementError
|
from sphinx.errors import ExtensionError, ConfigError, VersionRequirementError
|
||||||
from sphinx.util.pycompat import b
|
|
||||||
|
|
||||||
|
|
||||||
@with_app(confoverrides={'master_doc': 'master', 'nonexisting_value': 'True',
|
@with_app(confoverrides={'master_doc': 'master', 'nonexisting_value': 'True',
|
||||||
@ -122,8 +121,8 @@ def test_needs_sphinx():
|
|||||||
def test_config_eol(tmpdir):
|
def test_config_eol(tmpdir):
|
||||||
# test config file's eol patterns: LF, CRLF
|
# test config file's eol patterns: LF, CRLF
|
||||||
configfile = tmpdir / 'conf.py'
|
configfile = tmpdir / 'conf.py'
|
||||||
for eol in ('\n', '\r\n'):
|
for eol in (b'\n', b'\r\n'):
|
||||||
configfile.write_bytes(b('project = "spam"' + eol))
|
configfile.write_bytes(b'project = "spam"' + eol)
|
||||||
cfg = Config(tmpdir, 'conf.py', {}, None)
|
cfg = Config(tmpdir, 'conf.py', {}, None)
|
||||||
cfg.init_values(lambda warning: 1/0)
|
cfg.init_values(lambda warning: 1/0)
|
||||||
assert cfg.project == u'spam'
|
assert cfg.project == u'spam'
|
||||||
|
@ -38,7 +38,7 @@ def test_build(app):
|
|||||||
undoc_py, undoc_c = pickle.loads((app.outdir / 'undoc.pickle').bytes())
|
undoc_py, undoc_c = pickle.loads((app.outdir / 'undoc.pickle').bytes())
|
||||||
assert len(undoc_c) == 1
|
assert len(undoc_c) == 1
|
||||||
# the key is the full path to the header file, which isn't testable
|
# the key is the full path to the header file, which isn't testable
|
||||||
assert undoc_c.values()[0] == [('function', 'Py_SphinxTest')]
|
assert list(undoc_c.values())[0] == [('function', 'Py_SphinxTest')]
|
||||||
|
|
||||||
assert 'test_autodoc' in undoc_py
|
assert 'test_autodoc' in undoc_py
|
||||||
assert 'funcs' in undoc_py['test_autodoc']
|
assert 'funcs' in undoc_py['test_autodoc']
|
||||||
|
@ -98,7 +98,7 @@ def assert_elem(elem, texts=None, refs=None, names=None):
|
|||||||
_texts = elem_gettexts(elem)
|
_texts = elem_gettexts(elem)
|
||||||
assert _texts == texts
|
assert _texts == texts
|
||||||
if refs is not None:
|
if refs is not None:
|
||||||
_refs = map(elem_getref, elem.findall('reference'))
|
_refs = [elem_getref(x) for x in elem.findall('reference')]
|
||||||
assert _refs == refs
|
assert _refs == refs
|
||||||
if names is not None:
|
if names is not None:
|
||||||
_names = elem.attrib.get('names').split()
|
_names = elem.attrib.get('names').split()
|
||||||
|
@ -15,7 +15,6 @@ from docutils import frontend, utils, nodes
|
|||||||
from docutils.parsers import rst
|
from docutils.parsers import rst
|
||||||
|
|
||||||
from sphinx.util import texescape
|
from sphinx.util import texescape
|
||||||
from sphinx.util.pycompat import b
|
|
||||||
from sphinx.writers.html import HTMLWriter, SmartyPantsHTMLTranslator
|
from sphinx.writers.html import HTMLWriter, SmartyPantsHTMLTranslator
|
||||||
from sphinx.writers.latex import LaTeXWriter, LaTeXTranslator
|
from sphinx.writers.latex import LaTeXWriter, LaTeXTranslator
|
||||||
|
|
||||||
@ -54,7 +53,7 @@ class ForgivingLaTeXTranslator(LaTeXTranslator, ForgivingTranslator):
|
|||||||
|
|
||||||
|
|
||||||
def verify_re(rst, html_expected, latex_expected):
|
def verify_re(rst, html_expected, latex_expected):
|
||||||
document = utils.new_document(b('test data'), settings)
|
document = utils.new_document(b'test data', settings)
|
||||||
document['file'] = 'dummy'
|
document['file'] = 'dummy'
|
||||||
parser.parse(rst, document)
|
parser.parse(rst, document)
|
||||||
for msg in document.traverse(nodes.system_message):
|
for msg in document.traverse(nodes.system_message):
|
||||||
@ -128,7 +127,7 @@ def test_inline():
|
|||||||
def test_latex_escaping():
|
def test_latex_escaping():
|
||||||
# correct escaping in normal mode
|
# correct escaping in normal mode
|
||||||
yield (verify, u'Γ\\\\∞$', None,
|
yield (verify, u'Γ\\\\∞$', None,
|
||||||
ur'\(\Gamma\)\textbackslash{}\(\infty\)\$')
|
r'\(\Gamma\)\textbackslash{}\(\infty\)\$')
|
||||||
# in verbatim code fragments
|
# in verbatim code fragments
|
||||||
yield (verify, u'::\n\n @Γ\\∞${}', None,
|
yield (verify, u'::\n\n @Γ\\∞${}', None,
|
||||||
u'\\begin{Verbatim}[commandchars=\\\\\\{\\}]\n'
|
u'\\begin{Verbatim}[commandchars=\\\\\\{\\}]\n'
|
||||||
@ -136,4 +135,4 @@ def test_latex_escaping():
|
|||||||
u'\\end{Verbatim}')
|
u'\\end{Verbatim}')
|
||||||
# in URIs
|
# in URIs
|
||||||
yield (verify_re, u'`test <http://example.com/~me/>`_', None,
|
yield (verify_re, u'`test <http://example.com/~me/>`_', None,
|
||||||
ur'\\href{http://example.com/~me/}{test}.*')
|
r'\\href{http://example.com/~me/}{test}.*')
|
||||||
|
@ -13,7 +13,6 @@ from docutils import frontend, utils
|
|||||||
from docutils.parsers import rst
|
from docutils.parsers import rst
|
||||||
|
|
||||||
from sphinx.search import IndexBuilder
|
from sphinx.search import IndexBuilder
|
||||||
from sphinx.util.pycompat import b
|
|
||||||
|
|
||||||
|
|
||||||
settings = parser = None
|
settings = parser = None
|
||||||
@ -32,7 +31,7 @@ test that non-comments are indexed: fermion
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
def test_wordcollector():
|
def test_wordcollector():
|
||||||
doc = utils.new_document(b('test data'), settings)
|
doc = utils.new_document(b'test data', settings)
|
||||||
doc['file'] = 'dummy'
|
doc['file'] = 'dummy'
|
||||||
parser.parse(FILE_CONTENTS, doc)
|
parser.parse(FILE_CONTENTS, doc)
|
||||||
|
|
||||||
|
2
tox.ini
2
tox.ini
@ -1,5 +1,5 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist=py26,py27,py32,py33,pypy,du11,du10
|
envlist=py26,py27,py32,py33,py34,pypy,du11,du10
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
deps=
|
deps=
|
||||||
|
@ -17,12 +17,6 @@ import cStringIO
|
|||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
from os.path import join, splitext, abspath
|
from os.path import join, splitext, abspath
|
||||||
|
|
||||||
if sys.version_info >= (3, 0):
|
|
||||||
def b(s):
|
|
||||||
return s.encode('utf-8')
|
|
||||||
else:
|
|
||||||
b = str
|
|
||||||
|
|
||||||
|
|
||||||
checkers = {}
|
checkers = {}
|
||||||
|
|
||||||
@ -37,24 +31,24 @@ def checker(*suffixes, **kwds):
|
|||||||
|
|
||||||
|
|
||||||
name_mail_re = r'[\w ]+(<.*?>)?'
|
name_mail_re = r'[\w ]+(<.*?>)?'
|
||||||
copyright_re = re.compile(b(r'^ :copyright: Copyright 200\d(-20\d\d)? '
|
copyright_re = re.compile(br'^ :copyright: Copyright 200\d(-20\d\d)? '
|
||||||
r'by %s(, %s)*[,.]$' %
|
br'by %s(, %s)*[,.]$' %
|
||||||
(name_mail_re, name_mail_re)))
|
(name_mail_re, name_mail_re))
|
||||||
license_re = re.compile(b(r" :license: (.*?).\n"))
|
license_re = re.compile(br" :license: (.*?).\n")
|
||||||
copyright_2_re = re.compile(b(r'^ %s(, %s)*[,.]$' %
|
copyright_2_re = re.compile(br'^ %s(, %s)*[,.]$' %
|
||||||
(name_mail_re, name_mail_re)))
|
(name_mail_re, name_mail_re))
|
||||||
coding_re = re.compile(b(r'coding[:=]\s*([-\w.]+)'))
|
coding_re = re.compile(br'coding[:=]\s*([-\w.]+)')
|
||||||
not_ix_re = re.compile(b(r'\bnot\s+\S+?\s+i[sn]\s\S+'))
|
not_ix_re = re.compile(br'\bnot\s+\S+?\s+i[sn]\s\S+')
|
||||||
is_const_re = re.compile(b(r'if.*?==\s+(None|False|True)\b'))
|
is_const_re = re.compile(br'if.*?==\s+(None|False|True)\b')
|
||||||
|
|
||||||
misspellings = [b("developement"), b("adress"), # ALLOW-MISSPELLING
|
misspellings = [b"developement", b"adress", # ALLOW-MISSPELLING
|
||||||
b("verificate"), b("informations")] # ALLOW-MISSPELLING
|
b"verificate", b"informations"] # ALLOW-MISSPELLING
|
||||||
|
|
||||||
if sys.version_info < (3, 0):
|
if sys.version_info < (3, 0):
|
||||||
@checker('.py')
|
@checker('.py')
|
||||||
def check_syntax(fn, lines):
|
def check_syntax(fn, lines):
|
||||||
try:
|
try:
|
||||||
compile(b('').join(lines), fn, "exec")
|
compile(b''.join(lines), fn, "exec")
|
||||||
except SyntaxError as err:
|
except SyntaxError as err:
|
||||||
yield 0, "not compilable: %s" % err
|
yield 0, "not compilable: %s" % err
|
||||||
|
|
||||||
@ -69,7 +63,7 @@ def check_style_and_encoding(fn, lines):
|
|||||||
co = coding_re.search(line)
|
co = coding_re.search(line)
|
||||||
if co:
|
if co:
|
||||||
encoding = co.group(1).decode('ascii')
|
encoding = co.group(1).decode('ascii')
|
||||||
if line.strip().startswith(b('#')):
|
if line.strip().startswith(b'#'):
|
||||||
continue
|
continue
|
||||||
#m = not_ix_re.search(line)
|
#m = not_ix_re.search(line)
|
||||||
#if m:
|
#if m:
|
||||||
@ -89,7 +83,7 @@ def check_style_and_encoding(fn, lines):
|
|||||||
def check_fileheader(fn, lines):
|
def check_fileheader(fn, lines):
|
||||||
# line number correction
|
# line number correction
|
||||||
c = 1
|
c = 1
|
||||||
if lines[0:1] == [b('#!/usr/bin/env python\n')]:
|
if lines[0:1] == [b'#!/usr/bin/env python\n']:
|
||||||
lines = lines[1:]
|
lines = lines[1:]
|
||||||
c = 2
|
c = 2
|
||||||
|
|
||||||
@ -98,38 +92,38 @@ def check_fileheader(fn, lines):
|
|||||||
for lno, l in enumerate(lines):
|
for lno, l in enumerate(lines):
|
||||||
llist.append(l)
|
llist.append(l)
|
||||||
if lno == 0:
|
if lno == 0:
|
||||||
if l == b('# -*- coding: rot13 -*-\n'):
|
if l == b'# -*- coding: rot13 -*-\n':
|
||||||
# special-case pony package
|
# special-case pony package
|
||||||
return
|
return
|
||||||
elif l != b('# -*- coding: utf-8 -*-\n'):
|
elif l != b'# -*- coding: utf-8 -*-\n':
|
||||||
yield 1, "missing coding declaration"
|
yield 1, "missing coding declaration"
|
||||||
elif lno == 1:
|
elif lno == 1:
|
||||||
if l != b('"""\n') and l != b('r"""\n'):
|
if l != b'"""\n' and l != b'r"""\n':
|
||||||
yield 2, 'missing docstring begin (""")'
|
yield 2, 'missing docstring begin (""")'
|
||||||
else:
|
else:
|
||||||
docopen = True
|
docopen = True
|
||||||
elif docopen:
|
elif docopen:
|
||||||
if l == b('"""\n'):
|
if l == b'"""\n':
|
||||||
# end of docstring
|
# end of docstring
|
||||||
if lno <= 4:
|
if lno <= 4:
|
||||||
yield lno+c, "missing module name in docstring"
|
yield lno+c, "missing module name in docstring"
|
||||||
break
|
break
|
||||||
|
|
||||||
if l != b("\n") and l[:4] != b(' ') and docopen:
|
if l != b"\n" and l[:4] != b' ' and docopen:
|
||||||
yield lno+c, "missing correct docstring indentation"
|
yield lno+c, "missing correct docstring indentation"
|
||||||
|
|
||||||
if lno == 2:
|
if lno == 2:
|
||||||
# if not in package, don't check the module name
|
# if not in package, don't check the module name
|
||||||
modname = fn[:-3].replace('/', '.').replace('.__init__', '')
|
modname = fn[:-3].replace('/', '.').replace('.__init__', '')
|
||||||
while modname:
|
while modname:
|
||||||
if l.lower()[4:-1] == b(modname):
|
if l.lower()[4:-1] == bytes(modname):
|
||||||
break
|
break
|
||||||
modname = '.'.join(modname.split('.')[1:])
|
modname = '.'.join(modname.split('.')[1:])
|
||||||
else:
|
else:
|
||||||
yield 3, "wrong module name in docstring heading"
|
yield 3, "wrong module name in docstring heading"
|
||||||
modnamelen = len(l.strip())
|
modnamelen = len(l.strip())
|
||||||
elif lno == 3:
|
elif lno == 3:
|
||||||
if l.strip() != modnamelen * b("~"):
|
if l.strip() != modnamelen * b"~":
|
||||||
yield 4, "wrong module name underline, should be ~~~...~"
|
yield 4, "wrong module name underline, should be ~~~...~"
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@ -152,16 +146,16 @@ def check_fileheader(fn, lines):
|
|||||||
@checker('.py', '.html', '.rst')
|
@checker('.py', '.html', '.rst')
|
||||||
def check_whitespace_and_spelling(fn, lines):
|
def check_whitespace_and_spelling(fn, lines):
|
||||||
for lno, line in enumerate(lines):
|
for lno, line in enumerate(lines):
|
||||||
if b("\t") in line:
|
if b"\t" in line:
|
||||||
yield lno+1, "OMG TABS!!!1 "
|
yield lno+1, "OMG TABS!!!1 "
|
||||||
if line[:-1].rstrip(b(' \t')) != line[:-1]:
|
if line[:-1].rstrip(b' \t') != line[:-1]:
|
||||||
yield lno+1, "trailing whitespace"
|
yield lno+1, "trailing whitespace"
|
||||||
for word in misspellings:
|
for word in misspellings:
|
||||||
if word in line and b('ALLOW-MISSPELLING') not in line:
|
if word in line and b'ALLOW-MISSPELLING' not in line:
|
||||||
yield lno+1, '"%s" used' % word
|
yield lno+1, '"%s" used' % word
|
||||||
|
|
||||||
|
|
||||||
bad_tags = map(b, ['<u>', '<s>', '<strike>', '<center>', '<font'])
|
bad_tags = [b'<u>', b'<s>', b'<strike>', b'<center>', b'<font']
|
||||||
|
|
||||||
@checker('.html')
|
@checker('.html')
|
||||||
def check_xhtml(fn, lines):
|
def check_xhtml(fn, lines):
|
||||||
|
Loading…
Reference in New Issue
Block a user