wrap py3 iterators with list() for each places that expect a list object. refs #1350.

This commit is contained in:
Takayuki Shimizukawa 2014-04-30 23:25:44 +09:00
parent 6ae3b68859
commit 2d1549b35a
21 changed files with 35 additions and 37 deletions

View File

@ -110,7 +110,7 @@ class I18nBuilder(Builder):
for node, entries in traverse_translatable_index(doctree): for node, entries in traverse_translatable_index(doctree):
for typ, msg, tid, main in entries: for typ, msg, tid, main in entries:
for m in split_index_msg(typ, msg): for m in split_index_msg(typ, msg):
if typ == 'pair' and m in pairindextypes.values(): if typ == 'pair' and m in list(pairindextypes.values()):
# avoid built-in translated message was incorporated # avoid built-in translated message was incorporated
# in 'sphinx.util.nodes.process_index_entry' # in 'sphinx.util.nodes.process_index_entry'
continue continue

View File

@ -57,7 +57,7 @@ class LaTeXBuilder(Builder):
return self.get_target_uri(to, typ) return self.get_target_uri(to, typ)
def init_document_data(self): def init_document_data(self):
preliminary_document_data = map(list, self.config.latex_documents) preliminary_document_data = [list(x) for x in self.config.latex_documents]
if not preliminary_document_data: if not preliminary_document_data:
self.warn('no "latex_documents" config value found; no documents ' self.warn('no "latex_documents" config value found; no documents '
'will be written') 'will be written')

View File

@ -89,7 +89,7 @@ class CheckExternalLinksBuilder(Builder):
name = 'linkcheck' name = 'linkcheck'
def init(self): def init(self):
self.to_ignore = map(re.compile, self.app.config.linkcheck_ignore) self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore]
self.good = set() self.good = set()
self.broken = {} self.broken = {}
self.redirected = {} self.redirected = {}

View File

@ -108,7 +108,7 @@ class TexinfoBuilder(Builder):
return self.get_target_uri(to, typ) return self.get_target_uri(to, typ)
def init_document_data(self): def init_document_data(self):
preliminary_document_data = map(list, self.config.texinfo_documents) preliminary_document_data = [list(x) for x in self.config.texinfo_documents]
if not preliminary_document_data: if not preliminary_document_data:
self.warn('no "texinfo_documents" config value found; no documents ' self.warn('no "texinfo_documents" config value found; no documents '
'will be written') 'will be written')

View File

@ -1268,7 +1268,7 @@ class CPPDomain(Domain):
} }
def clear_doc(self, docname): def clear_doc(self, docname):
for fullname, (fn, _, _) in self.data['objects'].items(): for fullname, (fn, _, _) in list(self.data['objects'].items()):
if fn == docname: if fn == docname:
del self.data['objects'][fullname] del self.data['objects'][fullname]

View File

@ -1560,7 +1560,7 @@ class BuildEnvironment:
if lckey[0:1] in lcletters: if lckey[0:1] in lcletters:
return chr(127) + lckey return chr(127) + lckey
return lckey return lckey
newlist = new.items() newlist = list(new.items())
newlist.sort(key=keyfunc) newlist.sort(key=keyfunc)
if group_entries: if group_entries:

View File

@ -110,14 +110,11 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
# read # read
items = find_autosummary_in_files(sources) items = find_autosummary_in_files(sources)
# remove possible duplicates
items = dict([(item, True) for item in items]).keys()
# keep track of new files # keep track of new files
new_files = [] new_files = []
# write # write
for name, path, template_name in sorted(items, key=str): for name, path, template_name in sorted(set(items), key=str):
if path is None: if path is None:
# The corresponding autosummary:: directive did not have # The corresponding autosummary:: directive did not have
# a :toctree: option # a :toctree: option

View File

@ -213,7 +213,7 @@ class CoverageBuilder(Builder):
try: try:
if self.config.coverage_write_headline: if self.config.coverage_write_headline:
write_header(op, 'Undocumented Python objects', '=') write_header(op, 'Undocumented Python objects', '=')
keys = self.py_undoc.keys() keys = list(self.py_undoc.keys())
keys.sort() keys.sort()
for name in keys: for name in keys:
undoc = self.py_undoc[name] undoc = self.py_undoc[name]

View File

@ -143,7 +143,7 @@ class InheritanceGraph(object):
displayed node names. displayed node names.
""" """
all_classes = {} all_classes = {}
builtins = vars(__builtin__).values() builtins = list(vars(__builtin__).values())
def recurse(cls): def recurse(cls):
if not show_builtins and cls in builtins: if not show_builtins and cls in builtins:

View File

@ -114,7 +114,7 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
self.pathchain = pathchain self.pathchain = pathchain
# make the paths into loaders # make the paths into loaders
self.loaders = map(SphinxFileSystemLoader, loaderchain) self.loaders = [SphinxFileSystemLoader(x) for x in loaderchain]
use_i18n = builder.app.translator is not None use_i18n = builder.app.translator is not None
extensions = use_i18n and ['jinja2.ext.i18n'] or [] extensions = use_i18n and ['jinja2.ext.i18n'] or []

View File

@ -335,7 +335,7 @@ class ParserGenerator(object):
try: try:
msg = msg % args msg = msg % args
except: except:
msg = " ".join([msg] + map(str, args)) msg = " ".join([msg] + [str(x) for x in args])
raise SyntaxError(msg, (self.filename, self.end[0], raise SyntaxError(msg, (self.filename, self.end[0],
self.end[1], self.line)) self.end[1], self.line))

View File

@ -97,8 +97,9 @@ ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
PseudoExtras = group(r'\\\r?\n', Comment, Triple) PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map( tokenprog, pseudoprog, single3prog, double3prog = [
re.compile, (Token, PseudoToken, Single3, Double3)) re.compile(x) for x in (Token, PseudoToken, Single3, Double3)
]
endprogs = {"'": re.compile(Single), '"': re.compile(Double), endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog, "'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog, "r'''": single3prog, 'r"""': double3prog,

View File

@ -316,8 +316,8 @@ class IndexBuilder(object):
def freeze(self): def freeze(self):
"""Create a usable data structure for serializing.""" """Create a usable data structure for serializing."""
filenames = self._titles.keys() filenames = list(self._titles.keys())
titles = self._titles.values() titles = list(self._titles.values())
fn2index = dict((f, i) for (i, f) in enumerate(filenames)) fn2index = dict((f, i) for (i, f) in enumerate(filenames))
terms, title_terms = self.get_terms(fn2index) terms, title_terms = self.get_terms(fn2index)

View File

@ -368,7 +368,7 @@ def rpartition(s, t):
def split_into(n, type, value): def split_into(n, type, value):
"""Split an index entry into a given number of parts at semicolons.""" """Split an index entry into a given number of parts at semicolons."""
parts = map(lambda x: x.strip(), value.split(';', n-1)) parts = [x.strip() for x in value.split(';', n-1)]
if sum(1 for part in parts if part) < n: if sum(1 for part in parts if part) < n:
raise ValueError('invalid %s index entry %r' % (type, value)) raise ValueError('invalid %s index entry %r' % (type, value))
return parts return parts

View File

@ -1137,21 +1137,21 @@ class LaTeXTranslator(nodes.NodeVisitor):
p = scre.sub('!', self.encode(string)) p = scre.sub('!', self.encode(string))
self.body.append(r'\index{%s%s}' % (p, m)) self.body.append(r'\index{%s%s}' % (p, m))
elif type == 'pair': elif type == 'pair':
p1, p2 = map(self.encode, split_into(2, 'pair', string)) p1, p2 = [self.encode(x) for x in split_into(2, 'pair', string)]
self.body.append(r'\index{%s!%s%s}\index{%s!%s%s}' % self.body.append(r'\index{%s!%s%s}\index{%s!%s%s}' %
(p1, p2, m, p2, p1, m)) (p1, p2, m, p2, p1, m))
elif type == 'triple': elif type == 'triple':
p1, p2, p3 = map(self.encode, p1, p2, p3 = [self.encode(x)
split_into(3, 'triple', string)) for x in split_into(3, 'triple', string)]
self.body.append( self.body.append(
r'\index{%s!%s %s%s}\index{%s!%s, %s%s}' r'\index{%s!%s %s%s}\index{%s!%s, %s%s}'
r'\index{%s!%s %s%s}' % r'\index{%s!%s %s%s}' %
(p1, p2, p3, m, p2, p3, p1, m, p3, p1, p2, m)) (p1, p2, p3, m, p2, p3, p1, m, p3, p1, p2, m))
elif type == 'see': elif type == 'see':
p1, p2 = map(self.encode, split_into(2, 'see', string)) p1, p2 = [self.encode(x) for x in split_into(2, 'see', string)]
self.body.append(r'\index{%s|see{%s}}' % (p1, p2)) self.body.append(r'\index{%s|see{%s}}' % (p1, p2))
elif type == 'seealso': elif type == 'seealso':
p1, p2 = map(self.encode, split_into(2, 'seealso', string)) p1, p2 = [self.encode(x) for x in split_into(2, 'seealso', string)]
self.body.append(r'\index{%s|see{%s}}' % (p1, p2)) self.body.append(r'\index{%s|see{%s}}' % (p1, p2))
else: else:
self.builder.warn( self.builder.warn(

View File

@ -488,7 +488,7 @@ class TextTranslator(nodes.NodeVisitor):
for i, cell in enumerate(line): for i, cell in enumerate(line):
par = my_wrap(cell, width=colwidths[i]) par = my_wrap(cell, width=colwidths[i])
if par: if par:
maxwidth = max(map(column_width, par)) maxwidth = max(column_width(x) for x in par)
else: else:
maxwidth = 0 maxwidth = 0
realwidths[i] = max(realwidths[i], maxwidth) realwidths[i] = max(realwidths[i], maxwidth)

View File

@ -350,7 +350,7 @@ class coverage:
'-o:': 'omit=', '-o:': 'omit=',
} }
short_opts = string.join(map(lambda o: o[1:], optmap.keys()), '') short_opts = string.join(map(lambda o: o[1:], optmap.keys()), '')
long_opts = optmap.values() long_opts = list(optmap.values())
options, args = getopt.getopt(argv, short_opts, long_opts) options, args = getopt.getopt(argv, short_opts, long_opts)
for o, a in options: for o, a in options:
if o in optmap: if o in optmap:
@ -401,7 +401,7 @@ class coverage:
if settings.get('collect'): if settings.get('collect'):
self.collect() self.collect()
if not args: if not args:
args = self.cexecuted.keys() args = list(self.cexecuted.keys())
ignore_errors = settings.get('ignore-errors') ignore_errors = settings.get('ignore-errors')
show_missing = settings.get('show-missing') show_missing = settings.get('show-missing')
@ -743,9 +743,9 @@ class coverage:
visitor = StatementFindingAstVisitor(statements, excluded, suite_spots) visitor = StatementFindingAstVisitor(statements, excluded, suite_spots)
compiler.walk(ast, visitor, walker=visitor) compiler.walk(ast, visitor, walker=visitor)
lines = statements.keys() lines = list(statements.keys())
lines.sort() lines.sort()
excluded_lines = excluded.keys() excluded_lines = list(excluded.keys())
excluded_lines.sort() excluded_lines.sort()
return lines, excluded_lines, suite_spots return lines, excluded_lines, suite_spots
@ -850,7 +850,7 @@ class coverage:
morfs = self.filter_by_prefix(morfs, omit_prefixes) morfs = self.filter_by_prefix(morfs, omit_prefixes)
morfs.sort(self.morf_name_compare) morfs.sort(self.morf_name_compare)
max_name = max([5,] + map(len, map(self.morf_name, morfs))) max_name = max([5,] + list(map(len, map(self.morf_name, morfs))))
fmt_name = "%%- %ds " % max_name fmt_name = "%%- %ds " % max_name
fmt_err = fmt_name + "%s: %s" fmt_err = fmt_name + "%s: %s"
header = fmt_name % "Name" + " Stmts Exec Cover" header = fmt_name % "Name" + " Stmts Exec Cover"

View File

@ -864,7 +864,7 @@ def _serialize_xml(write, elem, encoding, qnames, namespaces):
_serialize_xml(write, e, encoding, qnames, None) _serialize_xml(write, e, encoding, qnames, None)
else: else:
write("<" + tag) write("<" + tag)
items = elem.items() items = list(elem.items())
if items or namespaces: if items or namespaces:
items.sort() # lexical order items.sort() # lexical order
for k, v in items: for k, v in items:
@ -876,7 +876,7 @@ def _serialize_xml(write, elem, encoding, qnames, namespaces):
v = _escape_attrib(v, encoding) v = _escape_attrib(v, encoding)
write(" %s=\"%s\"" % (qnames[k], v)) write(" %s=\"%s\"" % (qnames[k], v))
if namespaces: if namespaces:
items = namespaces.items() items = list(namespaces.items())
items.sort(key=lambda x: x[1]) # sort on prefix items.sort(key=lambda x: x[1]) # sort on prefix
for v, k in items: for v, k in items:
if k: if k:
@ -921,7 +921,7 @@ def _serialize_html(write, elem, encoding, qnames, namespaces):
_serialize_html(write, e, encoding, qnames, None) _serialize_html(write, e, encoding, qnames, None)
else: else:
write("<" + tag) write("<" + tag)
items = elem.items() items = list(elem.items())
if items or namespaces: if items or namespaces:
items.sort() # lexical order items.sort() # lexical order
for k, v in items: for k, v in items:
@ -934,7 +934,7 @@ def _serialize_html(write, elem, encoding, qnames, namespaces):
# FIXME: handle boolean attributes # FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v)) write(" %s=\"%s\"" % (qnames[k], v))
if namespaces: if namespaces:
items = namespaces.items() items = list(namespaces.items())
items.sort(key=lambda x: x[1]) # sort on prefix items.sort(key=lambda x: x[1]) # sort on prefix
for v, k in items: for v, k in items:
if k: if k:

View File

@ -190,7 +190,7 @@ class path(text_type):
""" """
Joins the path with the argument given and returns the result. Joins the path with the argument given and returns the result.
""" """
return self.__class__(os.path.join(self, *map(self.__class__, args))) return self.__class__(os.path.join(self, *list(map(self.__class__, args))))
__div__ = __truediv__ = joinpath __div__ = __truediv__ = joinpath

View File

@ -35,7 +35,7 @@ def test_mangle_signature():
(a=1, b=<SomeClass: a, b, c>, c=3) :: ([a, b, c]) (a=1, b=<SomeClass: a, b, c>, c=3) :: ([a, b, c])
""" """
TEST = [map(lambda x: x.strip(), x.split("::")) for x in TEST.split("\n") TEST = [[y.strip() for y in x.split("::")] for x in TEST.split("\n")
if '::' in x] if '::' in x]
for inp, outp in TEST: for inp, outp in TEST:
res = mangle_signature(inp).strip().replace(u"\u00a0", " ") res = mangle_signature(inp).strip().replace(u"\u00a0", " ")

View File

@ -98,7 +98,7 @@ def assert_elem(elem, texts=None, refs=None, names=None):
_texts = elem_gettexts(elem) _texts = elem_gettexts(elem)
assert _texts == texts assert _texts == texts
if refs is not None: if refs is not None:
_refs = map(elem_getref, elem.findall('reference')) _refs = [elem_getref(x) for x in elem.findall('reference')]
assert _refs == refs assert _refs == refs
if names is not None: if names is not None:
_names = elem.attrib.get('names').split() _names = elem.attrib.get('names').split()