Merged in shimizukawa/sphinx-py3-native (pull request #243)

native py2/py3 support without 2to3. refs #1350
This commit is contained in:
Takayuki Shimizukawa 2014-05-27 23:01:35 +09:00
commit a335414b81
44 changed files with 192 additions and 243 deletions

View File

@ -16,6 +16,7 @@ Incompatible changes
New features
------------
* Add support for Python 3.4.
* Added ``sphinx.ext.napoleon`` extension for NumPy and Google style docstring
support.
* PR#214: Added stemming support for 14 languages, so that the built-in document

View File

@ -1,12 +0,0 @@
from lib2to3.fixer_base import BaseFix
from lib2to3.fixer_util import Name
class FixAltUnicode(BaseFix):
PATTERN = """
func=funcdef< 'def' name='__unicode__'
parameters< '(' NAME ')' > any+ >
"""
def transform(self, node, results):
name = results['name']
name.replace(Name('__str__', prefix=name.prefix))

View File

@ -52,14 +52,6 @@ if (3, 0) <= sys.version_info < (3, 3):
else: # 2.6, 2.7, 3.3 or later
requires.append('Jinja2>=2.3')
# tell distribute to use 2to3 with our own fixers
extra = {}
if sys.version_info >= (3, 0):
extra.update(
use_2to3=True,
use_2to3_fixers=['custom_fixers']
)
# Provide a "compile_catalog" command that also creates the translated
# JavaScript files if Babel is available.
@ -181,7 +173,7 @@ setup(
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(exclude=['custom_fixers', 'test']),
packages=find_packages(exclude=['test']),
include_package_data=True,
entry_points={
'console_scripts': [
@ -196,5 +188,4 @@ setup(
},
install_requires=requires,
cmdclass=cmdclass,
**extra
)

View File

@ -10,6 +10,7 @@
"""
from __future__ import with_statement
from __future__ import unicode_literals
from os import path, walk
from codecs import open
@ -27,7 +28,7 @@ from sphinx.util.osutil import safe_relpath, ensuredir, find_catalog, SEP
from sphinx.util.console import darkgreen, purple, bold
from sphinx.locale import pairindextypes
POHEADER = ur"""
POHEADER = r"""
# SOME DESCRIPTIVE TITLE.
# Copyright (C) %(copyright)s
# This file is distributed under the same license as the %(project)s package.
@ -204,19 +205,19 @@ class MessageCatalogBuilder(I18nBuilder):
if self.config.gettext_location:
# generate "#: file1:line1\n#: file2:line2 ..."
pofile.write(u"#: %s\n" % "\n#: ".join("%s:%s" %
pofile.write("#: %s\n" % "\n#: ".join("%s:%s" %
(safe_relpath(source, self.outdir), line)
for source, line, _ in positions))
if self.config.gettext_uuid:
# generate "# uuid1\n# uuid2\n ..."
pofile.write(u"# %s\n" % "\n# ".join(
pofile.write("# %s\n" % "\n# ".join(
uid for _, _, uid in positions))
# message contains *one* line of text ready for translation
message = message.replace(u'\\', ur'\\'). \
replace(u'"', ur'\"'). \
replace(u'\n', u'\\n"\n"')
pofile.write(u'msgid "%s"\nmsgstr ""\n\n' % message)
message = message.replace('\\', r'\\'). \
replace('"', r'\"'). \
replace('\n', '\\n"\n"')
pofile.write('msgid "%s"\nmsgstr ""\n\n' % message)
finally:
pofile.close()

View File

@ -32,7 +32,6 @@ from sphinx.util.osutil import SEP, os_path, relative_uri, ensuredir, \
movefile, ustrftime, copyfile
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.matching import patmatch, compile_matchers
from sphinx.util.pycompat import b
from sphinx.locale import _
from sphinx.search import js_index
from sphinx.theming import Theme
@ -221,7 +220,7 @@ class StandaloneHTMLBuilder(Builder):
"""Utility: Render a lone doctree node."""
if node is None:
return {'fragment': ''}
doc = new_document(b('<partial node>'))
doc = new_document(b'<partial node>')
doc.append(node)
if self._publisher is None:

View File

@ -57,7 +57,7 @@ class LaTeXBuilder(Builder):
return self.get_target_uri(to, typ)
def init_document_data(self):
preliminary_document_data = map(list, self.config.latex_documents)
preliminary_document_data = [list(x) for x in self.config.latex_documents]
if not preliminary_document_data:
self.warn('no "latex_documents" config value found; no documents '
'will be written')

View File

@ -89,7 +89,7 @@ class CheckExternalLinksBuilder(Builder):
name = 'linkcheck'
def init(self):
self.to_ignore = map(re.compile, self.app.config.linkcheck_ignore)
self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore]
self.good = set()
self.broken = {}
self.redirected = {}

View File

@ -108,7 +108,7 @@ class TexinfoBuilder(Builder):
return self.get_target_uri(to, typ)
def init_document_data(self):
preliminary_document_data = map(list, self.config.texinfo_documents)
preliminary_document_data = [list(x) for x in self.config.texinfo_documents]
if not preliminary_document_data:
self.warn('no "texinfo_documents" config value found; no documents '
'will be written')

View File

@ -89,7 +89,7 @@ def main(argv):
try:
opts, args = getopt.getopt(argv[1:], 'ab:t:d:c:CD:A:nNEqQWw:PThvj:',
['help', 'version'])
except getopt.error, err:
except getopt.error as err:
usage(argv, 'Error: %s' % err)
return 1

View File

@ -18,9 +18,9 @@ from six import PY3, iteritems, string_types, binary_type, integer_types
from sphinx.errors import ConfigError
from sphinx.locale import l_
from sphinx.util.osutil import make_filename
from sphinx.util.pycompat import b, execfile_
from sphinx.util.pycompat import execfile_
nonascii_re = re.compile(b(r'[\x80-\xff]'))
nonascii_re = re.compile(br'[\x80-\xff]')
CONFIG_SYNTAX_ERROR = "There is a syntax error in your configuration file: %s"
if PY3:

View File

@ -1268,7 +1268,7 @@ class CPPDomain(Domain):
}
def clear_doc(self, docname):
for fullname, (fn, _, _) in self.data['objects'].items():
for fullname, (fn, _, _) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]

View File

@ -1560,8 +1560,7 @@ class BuildEnvironment:
if lckey[0:1] in lcletters:
return chr(127) + lckey
return lckey
newlist = new.items()
newlist.sort(key=keyfunc)
newlist = sorted(new.items(), key=keyfunc)
if group_entries:
# fixup entries: transform

View File

@ -54,9 +54,10 @@ class DefDict(dict):
return dict.__getitem__(self, key)
except KeyError:
return self.default
def __nonzero__(self):
def __bool__(self):
# docutils check "if option_spec"
return True
__nonzero__ = __bool__ # for python2 compatibility
identity = lambda x: x

View File

@ -110,14 +110,11 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
# read
items = find_autosummary_in_files(sources)
# remove possible duplicates
items = dict([(item, True) for item in items]).keys()
# keep track of new files
new_files = []
# write
for name, path, template_name in sorted(items, key=str):
for name, path, template_name in sorted(set(items), key=str):
if path is None:
# The corresponding autosummary:: directive did not have
# a :toctree: option

View File

@ -213,8 +213,7 @@ class CoverageBuilder(Builder):
try:
if self.config.coverage_write_headline:
write_header(op, 'Undocumented Python objects', '=')
keys = self.py_undoc.keys()
keys.sort()
keys = sorted(self.py_undoc.keys())
for name in keys:
undoc = self.py_undoc[name]
if 'error' in undoc:

View File

@ -38,7 +38,6 @@ from docutils.utils import relative_path
from sphinx.locale import _
from sphinx.builders.html import INVENTORY_FILENAME
from sphinx.util.pycompat import b
handlers = [request.ProxyHandler(), request.HTTPRedirectHandler(),
@ -86,19 +85,19 @@ def read_inventory_v2(f, uri, join, bufsize=16*1024):
def read_chunks():
decompressor = zlib.decompressobj()
for chunk in iter(lambda: f.read(bufsize), b('')):
for chunk in iter(lambda: f.read(bufsize), b''):
yield decompressor.decompress(chunk)
yield decompressor.flush()
def split_lines(iter):
buf = b('')
buf = b''
for chunk in iter:
buf += chunk
lineend = buf.find(b('\n'))
lineend = buf.find(b'\n')
while lineend != -1:
yield buf[:lineend].decode('utf-8')
buf = buf[lineend+1:]
lineend = buf.find(b('\n'))
lineend = buf.find(b'\n')
assert not buf
for line in split_lines(read_chunks()):

View File

@ -27,7 +27,7 @@ from docutils import nodes
from sphinx.errors import SphinxError
from sphinx.util.png import read_png_depth, write_png_depth
from sphinx.util.osutil import ensuredir, ENOENT
from sphinx.util.pycompat import b, sys_encoding
from sphinx.util.pycompat import sys_encoding
from sphinx.ext.mathbase import setup_math as mathbase_setup, wrap_displaymath
class MathExtError(SphinxError):
@ -67,7 +67,7 @@ DOC_BODY_PREVIEW = r'''
\end{document}
'''
depth_re = re.compile(b(r'\[\d+ depth=(-?\d+)\]'))
depth_re = re.compile(br'\[\d+ depth=(-?\d+)\]')
def render_math(self, math):
"""Render the LaTeX math expression *math* using latex and dvipng.

View File

@ -114,7 +114,7 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
self.pathchain = pathchain
# make the paths into loaders
self.loaders = map(SphinxFileSystemLoader, loaderchain)
self.loaders = [SphinxFileSystemLoader(x) for x in loaderchain]
use_i18n = builder.app.translator is not None
extensions = use_i18n and ['jinja2.ext.i18n'] or []

View File

@ -60,8 +60,9 @@ class _TranslationProxy(UserString, object):
def __contains__(self, key):
return key in self.data
def __nonzero__(self):
def __bool__(self):
return bool(self.data)
__nonzero__ = __bool__ # for python2 compatibility
def __dir__(self):
return dir(text_type)

View File

@ -335,7 +335,7 @@ class ParserGenerator(object):
try:
msg = msg % args
except:
msg = " ".join([msg] + map(str, args))
msg = " ".join([msg] + [str(x) for x in args])
raise SyntaxError(msg, (self.filename, self.end[0],
self.end[1], self.line))
@ -353,7 +353,7 @@ class DFAState(object):
def __init__(self, nfaset, final):
assert isinstance(nfaset, dict)
assert isinstance(iter(nfaset).next(), NFAState)
assert isinstance(next(iter(nfaset)), NFAState)
assert isinstance(final, NFAState)
self.nfaset = nfaset
self.isfinal = final in nfaset

View File

@ -97,8 +97,9 @@ ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
tokenprog, pseudoprog, single3prog, double3prog = [
re.compile(x) for x in (Token, PseudoToken, Single3, Double3)
]
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,

View File

@ -316,8 +316,8 @@ class IndexBuilder(object):
def freeze(self):
"""Create a usable data structure for serializing."""
filenames = self._titles.keys()
titles = self._titles.values()
filenames = list(self._titles.keys())
titles = list(self._titles.values())
fn2index = dict((f, i) for (i, f) in enumerate(filenames))
terms, title_terms = self.get_terms(fn2index)

View File

@ -15,11 +15,10 @@ from __future__ import print_function
import sys
import os
import types
from distutils.cmd import Command
from distutils.errors import DistutilsOptionError
from six import StringIO
from six import StringIO, string_types
from sphinx.application import Sphinx
from sphinx.util.console import darkred, nocolor, color_terminal
@ -110,7 +109,7 @@ class BuildDoc(Command):
if val is None:
setattr(self, option, default)
return default
elif not isinstance(val, types.StringTypes):
elif not isinstance(val, string_types):
raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
% (option, what, val))
return val

View File

@ -368,7 +368,7 @@ def rpartition(s, t):
def split_into(n, type, value):
"""Split an index entry into a given number of parts at semicolons."""
parts = map(lambda x: x.strip(), value.split(';', n-1))
parts = [x.strip() for x in value.split(';', n-1)]
if sum(1 for part in parts if part) < n:
raise ValueError('invalid %s index entry %r' % (type, value))
return parts

View File

@ -189,12 +189,6 @@ def find_catalog_files(docname, srcdir, locale_dirs, lang, compaction):
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
if PY2:
bytes = str
else:
bytes = bytes
def abspath(pathdir):
pathdir = path.abspath(pathdir)
if isinstance(pathdir, bytes):

View File

@ -12,14 +12,13 @@
import struct
import binascii
from sphinx.util.pycompat import b
LEN_IEND = 12
LEN_DEPTH = 22
DEPTH_CHUNK_LEN = struct.pack('!i', 10)
DEPTH_CHUNK_START = b('tEXtDepth\x00')
IEND_CHUNK = b('\x00\x00\x00\x00IEND\xAE\x42\x60\x82')
DEPTH_CHUNK_START = b'tEXtDepth\x00'
IEND_CHUNK = b'\x00\x00\x00\x00IEND\xAE\x42\x60\x82'
def read_png_depth(filename):

View File

@ -19,9 +19,6 @@ from six import PY3, text_type, exec_
if PY3:
# Python 3
# the ubiquitous "bytes" helper functions
def b(s):
return s.encode('utf-8')
# prefix for Unicode strings
u = ''
from io import TextIOWrapper
@ -57,7 +54,6 @@ if PY3:
else:
# Python 2
b = str
u = 'u'
# no need to refactor on 2.x versions
convert_with_2to3 = None
@ -92,7 +88,7 @@ def execfile_(filepath, _globals):
# py26 accept only LF eol instead of CRLF
if sys.version_info[:2] == (2, 6):
source = source.replace(b('\r\n'), b('\n'))
source = source.replace(b'\r\n', b'\n')
# compile to a code object, handle syntax errors
filepath_enc = filepath.encode(fs_encoding)

View File

@ -9,93 +9,95 @@
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
tex_replacements = [
# map TeX special chars
(u'$', ur'\$'),
(u'%', ur'\%'),
(u'&', ur'\&'),
(u'#', ur'\#'),
(u'_', ur'\_'),
(u'{', ur'\{'),
(u'}', ur'\}'),
(u'[', ur'{[}'),
(u']', ur'{]}'),
(u'`', ur'{}`'),
(u'\\',ur'\textbackslash{}'),
(u'~', ur'\textasciitilde{}'),
(u'<', ur'\textless{}'),
(u'>', ur'\textgreater{}'),
(u'^', ur'\textasciicircum{}'),
('$', r'\$'),
('%', r'\%'),
('&', r'\&'),
('#', r'\#'),
('_', r'\_'),
('{', r'\{'),
('}', r'\}'),
('[', r'{[}'),
(']', r'{]}'),
('`', r'{}`'),
('\\',r'\textbackslash{}'),
('~', r'\textasciitilde{}'),
('<', r'\textless{}'),
('>', r'\textgreater{}'),
('^', r'\textasciicircum{}'),
# map special Unicode characters to TeX commands
(u'', ur'\P{}'),
(u'§', ur'\S{}'),
(u'', ur'\texteuro{}'),
(u'', ur'\(\infty\)'),
(u'±', ur'\(\pm\)'),
(u'', ur'\(\rightarrow\)'),
(u'', ur'\(\rightarrow\)'),
('', r'\P{}'),
('§', r'\S{}'),
('', r'\texteuro{}'),
('', r'\(\infty\)'),
('±', r'\(\pm\)'),
('', r'\(\rightarrow\)'),
('', r'\(\rightarrow\)'),
# used to separate -- in options
(u'', ur'{}'),
('', r'{}'),
# map some special Unicode characters to similar ASCII ones
(u'', ur'-'),
(u'', ur'\_'),
(u'', ur'\textbackslash{}'),
(u'|', ur'\textbar{}'),
(u'', ur'\textbar{}'),
(u'', ur'e'),
(u'', ur'i'),
(u'', ur'1'),
(u'', ur'2'),
('', r'-'),
('', r'\_'),
('', r'\textbackslash{}'),
('|', r'\textbar{}'),
('', r'\textbar{}'),
('', r'e'),
('', r'i'),
('', r'1'),
('', r'2'),
# map Greek alphabet
(u'α', ur'\(\alpha\)'),
(u'β', ur'\(\beta\)'),
(u'γ', ur'\(\gamma\)'),
(u'δ', ur'\(\delta\)'),
(u'ε', ur'\(\epsilon\)'),
(u'ζ', ur'\(\zeta\)'),
(u'η', ur'\(\eta\)'),
(u'θ', ur'\(\theta\)'),
(u'ι', ur'\(\iota\)'),
(u'κ', ur'\(\kappa\)'),
(u'λ', ur'\(\lambda\)'),
(u'μ', ur'\(\mu\)'),
(u'ν', ur'\(\nu\)'),
(u'ξ', ur'\(\xi\)'),
(u'ο', ur'o'),
(u'π', ur'\(\pi\)'),
(u'ρ', ur'\(\rho\)'),
(u'σ', ur'\(\sigma\)'),
(u'τ', ur'\(\tau\)'),
(u'υ', u'\\(\\upsilon\\)'),
(u'φ', ur'\(\phi\)'),
(u'χ', ur'\(\chi\)'),
(u'ψ', ur'\(\psi\)'),
(u'ω', ur'\(\omega\)'),
(u'Α', ur'A'),
(u'Β', ur'B'),
(u'Γ', ur'\(\Gamma\)'),
(u'Δ', ur'\(\Delta\)'),
(u'Ε', ur'E'),
(u'Ζ', ur'Z'),
(u'Η', ur'H'),
(u'Θ', ur'\(\Theta\)'),
(u'Ι', ur'I'),
(u'Κ', ur'K'),
(u'Λ', ur'\(\Lambda\)'),
(u'Μ', ur'M'),
(u'Ν', ur'N'),
(u'Ξ', ur'\(\Xi\)'),
(u'Ο', ur'O'),
(u'Π', ur'\(\Pi\)'),
(u'Ρ', ur'P'),
(u'Σ', ur'\(\Sigma\)'),
(u'Τ', ur'T'),
(u'Υ', u'\\(\\Upsilon\\)'),
(u'Φ', ur'\(\Phi\)'),
(u'Χ', ur'X'),
(u'Ψ', ur'\(\Psi\)'),
(u'Ω', ur'\(\Omega\)'),
(u'', ur'\(\Omega\)'),
('α', r'\(\alpha\)'),
('β', r'\(\beta\)'),
('γ', r'\(\gamma\)'),
('δ', r'\(\delta\)'),
('ε', r'\(\epsilon\)'),
('ζ', r'\(\zeta\)'),
('η', r'\(\eta\)'),
('θ', r'\(\theta\)'),
('ι', r'\(\iota\)'),
('κ', r'\(\kappa\)'),
('λ', r'\(\lambda\)'),
('μ', r'\(\mu\)'),
('ν', r'\(\nu\)'),
('ξ', r'\(\xi\)'),
('ο', r'o'),
('π', r'\(\pi\)'),
('ρ', r'\(\rho\)'),
('σ', r'\(\sigma\)'),
('τ', r'\(\tau\)'),
('υ', '\\(\\upsilon\\)'),
('φ', r'\(\phi\)'),
('χ', r'\(\chi\)'),
('ψ', r'\(\psi\)'),
('ω', r'\(\omega\)'),
('Α', r'A'),
('Β', r'B'),
('Γ', r'\(\Gamma\)'),
('Δ', r'\(\Delta\)'),
('Ε', r'E'),
('Ζ', r'Z'),
('Η', r'H'),
('Θ', r'\(\Theta\)'),
('Ι', r'I'),
('Κ', r'K'),
('Λ', r'\(\Lambda\)'),
('Μ', r'M'),
('Ν', r'N'),
('Ξ', r'\(\Xi\)'),
('Ο', r'O'),
('Π', r'\(\Pi\)'),
('Ρ', r'P'),
('Σ', r'\(\Sigma\)'),
('Τ', r'T'),
('Υ', '\\(\\Upsilon\\)'),
('Φ', r'\(\Phi\)'),
('Χ', r'X'),
('Ψ', r'\(\Psi\)'),
('Ω', r'\(\Omega\)'),
('', r'\(\Omega\)'),
]
tex_escape_map = {}
@ -105,8 +107,8 @@ tex_hl_escape_map_new = {}
def init():
for a, b in tex_replacements:
tex_escape_map[ord(a)] = b
tex_replace_map[ord(a)] = u'_'
tex_replace_map[ord(a)] = '_'
for a, b in tex_replacements:
if a in u'[]{}\\': continue
if a in '[]{}\\': continue
tex_hl_escape_map_new[ord(a)] = b

View File

@ -711,7 +711,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('\n\\hline\n')
self.body.extend(self.tableheaders)
self.body.append('\\endhead\n\n')
self.body.append(ur'\hline \multicolumn{%s}{|r|}{{\textsf{%s}}} \\ \hline'
self.body.append(r'\hline \multicolumn{%s}{|r|}{{\textsf{%s}}} \\ \hline'
% (self.table.colcount,
_('Continued on next page')))
self.body.append('\n\\endfoot\n\n')
@ -1137,21 +1137,21 @@ class LaTeXTranslator(nodes.NodeVisitor):
p = scre.sub('!', self.encode(string))
self.body.append(r'\index{%s%s}' % (p, m))
elif type == 'pair':
p1, p2 = map(self.encode, split_into(2, 'pair', string))
p1, p2 = [self.encode(x) for x in split_into(2, 'pair', string)]
self.body.append(r'\index{%s!%s%s}\index{%s!%s%s}' %
(p1, p2, m, p2, p1, m))
elif type == 'triple':
p1, p2, p3 = map(self.encode,
split_into(3, 'triple', string))
p1, p2, p3 = [self.encode(x)
for x in split_into(3, 'triple', string)]
self.body.append(
r'\index{%s!%s %s%s}\index{%s!%s, %s%s}'
r'\index{%s!%s %s%s}' %
(p1, p2, p3, m, p2, p3, p1, m, p3, p1, p2, m))
elif type == 'see':
p1, p2 = map(self.encode, split_into(2, 'see', string))
p1, p2 = [self.encode(x) for x in split_into(2, 'see', string)]
self.body.append(r'\index{%s|see{%s}}' % (p1, p2))
elif type == 'seealso':
p1, p2 = map(self.encode, split_into(2, 'seealso', string))
p1, p2 = [self.encode(x) for x in split_into(2, 'seealso', string)]
self.body.append(r'\index{%s|see{%s}}' % (p1, p2))
else:
self.builder.warn(

View File

@ -488,7 +488,7 @@ class TextTranslator(nodes.NodeVisitor):
for i, cell in enumerate(line):
par = my_wrap(cell, width=colwidths[i])
if par:
maxwidth = max(map(column_width, par))
maxwidth = max(column_width(x) for x in par)
else:
maxwidth = 0
realwidths[i] = max(realwidths[i], maxwidth)

View File

@ -401,7 +401,7 @@ class coverage:
if settings.get('collect'):
self.collect()
if not args:
args = self.cexecuted.keys()
args = list(self.cexecuted.keys())
ignore_errors = settings.get('ignore-errors')
show_missing = settings.get('show-missing')
@ -743,10 +743,8 @@ class coverage:
visitor = StatementFindingAstVisitor(statements, excluded, suite_spots)
compiler.walk(ast, visitor, walker=visitor)
lines = statements.keys()
lines.sort()
excluded_lines = excluded.keys()
excluded_lines.sort()
lines = sorted(statements.keys())
excluded_lines = sorted(excluded.keys())
return lines, excluded_lines, suite_spots
# format_lines(statements, lines). Format a list of line numbers
@ -850,7 +848,7 @@ class coverage:
morfs = self.filter_by_prefix(morfs, omit_prefixes)
morfs.sort(self.morf_name_compare)
max_name = max([5,] + map(len, map(self.morf_name, morfs)))
max_name = max(5, *map(len, map(self.morf_name, morfs)))
fmt_name = "%%- %ds " % max_name
fmt_err = fmt_name + "%s: %s"
header = fmt_name % "Name" + " Stmts Exec Cover"

View File

@ -177,7 +177,7 @@ class _SelectorContext:
def find(elem, path):
try:
return findall(elem, path).next()
return next(findall(elem, path))
except StopIteration:
return None
@ -194,17 +194,17 @@ def findall(elem, path):
if path[:1] == "/":
raise SyntaxError("cannot use absolute path on element")
stream = iter(xpath_tokenizer(path))
next = stream.next; token = next()
next_ = lambda: next(stream); token = next_()
selector = []
while 1:
try:
selector.append(ops[token[0]](next, token))
selector.append(ops[token[0]](next_, token))
except StopIteration:
raise SyntaxError("invalid path")
try:
token = next()
token = next_()
if token[0] == "/":
token = next()
token = next_()
except StopIteration:
break
_cache[path] = selector
@ -220,7 +220,7 @@ def findall(elem, path):
def findtext(elem, path, default=None):
try:
elem = findall(elem, path).next()
elem = next(findall(elem, path))
return elem.text
except StopIteration:
return default

View File

@ -246,7 +246,7 @@ class Element(object):
def __len__(self):
return len(self._children)
def __nonzero__(self):
def __bool__(self):
import warnings
warnings.warn(
"The behavior of this method will change in future versions. "
@ -254,6 +254,7 @@ class Element(object):
FutureWarning
)
return len(self._children) != 0 # emulate old behaviour
__nonzero__ = __bool__ # for python2 compatibility
##
# Returns the given subelement.
@ -866,7 +867,7 @@ def _serialize_xml(write, elem, encoding, qnames, namespaces):
write("<" + tag)
items = elem.items()
if items or namespaces:
items.sort() # lexical order
items = sorted(items) # lexical order
for k, v in items:
if isinstance(k, QName):
k = k.text
@ -877,7 +878,7 @@ def _serialize_xml(write, elem, encoding, qnames, namespaces):
write(" %s=\"%s\"" % (qnames[k], v))
if namespaces:
items = namespaces.items()
items.sort(key=lambda x: x[1]) # sort on prefix
items = sorted(items, key=lambda x: x[1]) # sort on prefix
for v, k in items:
if k:
k = ":" + k
@ -923,7 +924,7 @@ def _serialize_html(write, elem, encoding, qnames, namespaces):
write("<" + tag)
items = elem.items()
if items or namespaces:
items.sort() # lexical order
items = sorted(items) # lexical order
for k, v in items:
if isinstance(k, QName):
k = k.text
@ -935,7 +936,7 @@ def _serialize_html(write, elem, encoding, qnames, namespaces):
write(" %s=\"%s\"" % (qnames[k], v))
if namespaces:
items = namespaces.items()
items.sort(key=lambda x: x[1]) # sort on prefix
items = sorted(items, key=lambda x: x[1]) # sort on prefix
for v, k in items:
if k:
k = ":" + k

View File

@ -15,8 +15,6 @@ import sys
from os import path, chdir, listdir, environ
import shutil
from six import PY3
testroot = path.dirname(__file__) or '.'
if 'BUILD_TEST_PATH' in environ:
@ -28,15 +26,9 @@ else:
newroot = path.join(newroot, listdir(newroot)[0], 'tests')
shutil.rmtree(newroot, ignore_errors=True)
if PY3:
print('Copying and converting sources to build/lib/tests...')
from distutils.util import copydir_run_2to3
copydir_run_2to3(testroot, newroot)
else:
# just copying test directory to parallel testing
print('Copying sources to build/lib/tests...')
shutil.copytree(testroot, newroot)
# just copying test directory to parallel testing
print('Copying sources to build/lib/tests...')
shutil.copytree(testroot, newroot)
# always test the sphinx package from build/lib/
sys.path.insert(0, path.abspath(path.join(newroot, path.pardir)))

View File

@ -35,7 +35,7 @@ def test_mangle_signature():
(a=1, b=<SomeClass: a, b, c>, c=3) :: ([a, b, c])
"""
TEST = [map(lambda x: x.strip(), x.split("::")) for x in TEST.split("\n")
TEST = [[y.strip() for y in x.split("::")] for x in TEST.split("\n")
if '::' in x]
for inp, outp in TEST:
res = mangle_signature(inp).strip().replace(u"\u00a0", " ")

View File

@ -266,7 +266,7 @@ if pygments:
(".//div[@class='inc-lines highlight-text']//pre",
r'^class Foo:\n pass\nclass Bar:\n$'),
(".//div[@class='inc-startend highlight-text']//pre",
ur'^foo = "Including Unicode characters: üöä"\n$'),
u'^foo = "Including Unicode characters: üöä"\\n$'),
(".//div[@class='inc-preappend highlight-text']//pre",
r'(?m)^START CODE$'),
(".//div[@class='inc-pyobj-dedent highlight-python']//span",

View File

@ -15,7 +15,6 @@ from util import TestApp, with_app, with_tempdir, raises, raises_msg
from sphinx.config import Config
from sphinx.errors import ExtensionError, ConfigError, VersionRequirementError
from sphinx.util.pycompat import b
@with_app(confoverrides={'master_doc': 'master', 'nonexisting_value': 'True',
@ -122,8 +121,8 @@ def test_needs_sphinx():
def test_config_eol(tmpdir):
# test config file's eol patterns: LF, CRLF
configfile = tmpdir / 'conf.py'
for eol in ('\n', '\r\n'):
configfile.write_bytes(b('project = "spam"' + eol))
for eol in (b'\n', b'\r\n'):
configfile.write_bytes(b'project = "spam"' + eol)
cfg = Config(tmpdir, 'conf.py', {}, None)
cfg.init_values(lambda warning: 1/0)
assert cfg.project == u'spam'

View File

@ -38,7 +38,7 @@ def test_build(app):
undoc_py, undoc_c = pickle.loads((app.outdir / 'undoc.pickle').bytes())
assert len(undoc_c) == 1
# the key is the full path to the header file, which isn't testable
assert undoc_c.values()[0] == [('function', 'Py_SphinxTest')]
assert list(undoc_c.values())[0] == [('function', 'Py_SphinxTest')]
assert 'test_autodoc' in undoc_py
assert 'funcs' in undoc_py['test_autodoc']

View File

@ -98,7 +98,7 @@ def assert_elem(elem, texts=None, refs=None, names=None):
_texts = elem_gettexts(elem)
assert _texts == texts
if refs is not None:
_refs = map(elem_getref, elem.findall('reference'))
_refs = [elem_getref(x) for x in elem.findall('reference')]
assert _refs == refs
if names is not None:
_names = elem.attrib.get('names').split()

View File

@ -15,7 +15,6 @@ from docutils import frontend, utils, nodes
from docutils.parsers import rst
from sphinx.util import texescape
from sphinx.util.pycompat import b
from sphinx.writers.html import HTMLWriter, SmartyPantsHTMLTranslator
from sphinx.writers.latex import LaTeXWriter, LaTeXTranslator
@ -54,7 +53,7 @@ class ForgivingLaTeXTranslator(LaTeXTranslator, ForgivingTranslator):
def verify_re(rst, html_expected, latex_expected):
document = utils.new_document(b('test data'), settings)
document = utils.new_document(b'test data', settings)
document['file'] = 'dummy'
parser.parse(rst, document)
for msg in document.traverse(nodes.system_message):
@ -128,7 +127,7 @@ def test_inline():
def test_latex_escaping():
# correct escaping in normal mode
yield (verify, u'Γ\\\\∞$', None,
ur'\(\Gamma\)\textbackslash{}\(\infty\)\$')
r'\(\Gamma\)\textbackslash{}\(\infty\)\$')
# in verbatim code fragments
yield (verify, u'::\n\n\\∞${}', None,
u'\\begin{Verbatim}[commandchars=\\\\\\{\\}]\n'
@ -136,4 +135,4 @@ def test_latex_escaping():
u'\\end{Verbatim}')
# in URIs
yield (verify_re, u'`test <http://example.com/~me/>`_', None,
ur'\\href{http://example.com/~me/}{test}.*')
r'\\href{http://example.com/~me/}{test}.*')

View File

@ -13,7 +13,6 @@ from docutils import frontend, utils
from docutils.parsers import rst
from sphinx.search import IndexBuilder
from sphinx.util.pycompat import b
settings = parser = None
@ -32,7 +31,7 @@ test that non-comments are indexed: fermion
'''
def test_wordcollector():
doc = utils.new_document(b('test data'), settings)
doc = utils.new_document(b'test data', settings)
doc['file'] = 'dummy'
parser.parse(FILE_CONTENTS, doc)

View File

@ -1,5 +1,5 @@
[tox]
envlist=py26,py27,py32,py33,pypy,du11,du10
envlist=py26,py27,py32,py33,py34,pypy,du11,du10
[testenv]
deps=

View File

@ -17,12 +17,6 @@ import cStringIO
from optparse import OptionParser
from os.path import join, splitext, abspath
if sys.version_info >= (3, 0):
def b(s):
return s.encode('utf-8')
else:
b = str
checkers = {}
@ -37,24 +31,24 @@ def checker(*suffixes, **kwds):
name_mail_re = r'[\w ]+(<.*?>)?'
copyright_re = re.compile(b(r'^ :copyright: Copyright 200\d(-20\d\d)? '
r'by %s(, %s)*[,.]$' %
(name_mail_re, name_mail_re)))
license_re = re.compile(b(r" :license: (.*?).\n"))
copyright_2_re = re.compile(b(r'^ %s(, %s)*[,.]$' %
(name_mail_re, name_mail_re)))
coding_re = re.compile(b(r'coding[:=]\s*([-\w.]+)'))
not_ix_re = re.compile(b(r'\bnot\s+\S+?\s+i[sn]\s\S+'))
is_const_re = re.compile(b(r'if.*?==\s+(None|False|True)\b'))
copyright_re = re.compile(br'^ :copyright: Copyright 200\d(-20\d\d)? '
br'by %s(, %s)*[,.]$' %
(name_mail_re, name_mail_re))
license_re = re.compile(br" :license: (.*?).\n")
copyright_2_re = re.compile(br'^ %s(, %s)*[,.]$' %
(name_mail_re, name_mail_re))
coding_re = re.compile(br'coding[:=]\s*([-\w.]+)')
not_ix_re = re.compile(br'\bnot\s+\S+?\s+i[sn]\s\S+')
is_const_re = re.compile(br'if.*?==\s+(None|False|True)\b')
misspellings = [b("developement"), b("adress"), # ALLOW-MISSPELLING
b("verificate"), b("informations")] # ALLOW-MISSPELLING
misspellings = [b"developement", b"adress", # ALLOW-MISSPELLING
b"verificate", b"informations"] # ALLOW-MISSPELLING
if sys.version_info < (3, 0):
@checker('.py')
def check_syntax(fn, lines):
try:
compile(b('').join(lines), fn, "exec")
compile(b''.join(lines), fn, "exec")
except SyntaxError as err:
yield 0, "not compilable: %s" % err
@ -69,7 +63,7 @@ def check_style_and_encoding(fn, lines):
co = coding_re.search(line)
if co:
encoding = co.group(1).decode('ascii')
if line.strip().startswith(b('#')):
if line.strip().startswith(b'#'):
continue
#m = not_ix_re.search(line)
#if m:
@ -89,7 +83,7 @@ def check_style_and_encoding(fn, lines):
def check_fileheader(fn, lines):
# line number correction
c = 1
if lines[0:1] == [b('#!/usr/bin/env python\n')]:
if lines[0:1] == [b'#!/usr/bin/env python\n']:
lines = lines[1:]
c = 2
@ -98,38 +92,38 @@ def check_fileheader(fn, lines):
for lno, l in enumerate(lines):
llist.append(l)
if lno == 0:
if l == b('# -*- coding: rot13 -*-\n'):
if l == b'# -*- coding: rot13 -*-\n':
# special-case pony package
return
elif l != b('# -*- coding: utf-8 -*-\n'):
elif l != b'# -*- coding: utf-8 -*-\n':
yield 1, "missing coding declaration"
elif lno == 1:
if l != b('"""\n') and l != b('r"""\n'):
if l != b'"""\n' and l != b'r"""\n':
yield 2, 'missing docstring begin (""")'
else:
docopen = True
elif docopen:
if l == b('"""\n'):
if l == b'"""\n':
# end of docstring
if lno <= 4:
yield lno+c, "missing module name in docstring"
break
if l != b("\n") and l[:4] != b(' ') and docopen:
if l != b"\n" and l[:4] != b' ' and docopen:
yield lno+c, "missing correct docstring indentation"
if lno == 2:
# if not in package, don't check the module name
modname = fn[:-3].replace('/', '.').replace('.__init__', '')
while modname:
if l.lower()[4:-1] == b(modname):
if l.lower()[4:-1] == bytes(modname):
break
modname = '.'.join(modname.split('.')[1:])
else:
yield 3, "wrong module name in docstring heading"
modnamelen = len(l.strip())
elif lno == 3:
if l.strip() != modnamelen * b("~"):
if l.strip() != modnamelen * b"~":
yield 4, "wrong module name underline, should be ~~~...~"
else:
@ -152,16 +146,16 @@ def check_fileheader(fn, lines):
@checker('.py', '.html', '.rst')
def check_whitespace_and_spelling(fn, lines):
for lno, line in enumerate(lines):
if b("\t") in line:
if b"\t" in line:
yield lno+1, "OMG TABS!!!1 "
if line[:-1].rstrip(b(' \t')) != line[:-1]:
if line[:-1].rstrip(b' \t') != line[:-1]:
yield lno+1, "trailing whitespace"
for word in misspellings:
if word in line and b('ALLOW-MISSPELLING') not in line:
if word in line and b'ALLOW-MISSPELLING' not in line:
yield lno+1, '"%s" used' % word
bad_tags = map(b, ['<u>', '<s>', '<strike>', '<center>', '<font'])
bad_tags = [b'<u>', b'<s>', b'<strike>', b'<center>', b'<font']
@checker('.html')
def check_xhtml(fn, lines):