Merge branch 'stable'

This commit is contained in:
Takeshi KOMIYA 2017-01-12 13:03:53 +09:00
commit 38d6c34f35
50 changed files with 150 additions and 149 deletions

View File

@ -88,6 +88,7 @@ Bugs fixed
* #3284: Sphinx crashes on parallel build with an extension which raises * #3284: Sphinx crashes on parallel build with an extension which raises
unserializable exception unserializable exception
* #3315: Bibliography crashes on latex build with docclass 'memoir' * #3315: Bibliography crashes on latex build with docclass 'memoir'
* #3328: Could not refer rubric implicitly
Release 1.5.1 (released Dec 13, 2016) Release 1.5.1 (released Dec 13, 2016)

View File

@ -25,5 +25,5 @@ universal = 1
[flake8] [flake8]
max-line-length = 95 max-line-length = 95
ignore = E113,E116,E221,E226,E241,E251,E901 ignore = E116,E241,E251
exclude = .git,.tox,tests/*,build/*,sphinx/search/*,sphinx/pycode/pgen2/*,doc/ext/example*.py exclude = .git,.tox,tests/*,build/*,sphinx/search/*,sphinx/pycode/pgen2/*,doc/ext/example*.py

View File

@ -30,7 +30,7 @@ if 'PYTHONWARNINGS' not in os.environ:
warnings.filterwarnings('ignore', "'U' mode is deprecated", warnings.filterwarnings('ignore', "'U' mode is deprecated",
DeprecationWarning, module='docutils.io') DeprecationWarning, module='docutils.io')
__version__ = '1.6' __version__ = '1.6'
__released__ = '1.6+' # used when Sphinx builds its own docs __released__ = '1.6+' # used when Sphinx builds its own docs
# version info for better programmatic use # version info for better programmatic use

View File

@ -384,8 +384,8 @@ Note: By default this script will not overwrite already created files.""")
text += ' %s\n' % module text += ' %s\n' % module
d = dict( d = dict(
path = opts.destdir, path = opts.destdir,
sep = False, sep = False,
dot = '_', dot = '_',
project = opts.header, project = opts.header,
author = opts.author or 'Author', author = opts.author or 'Author',
version = opts.version or '', version = opts.version or '',

View File

@ -632,9 +632,9 @@ class Sphinx(object):
else: else:
# ignore invalid keys for compatibility # ignore invalid keys for compatibility
continue continue
setattr(translator, 'visit_'+node.__name__, visit) setattr(translator, 'visit_' + node.__name__, visit)
if depart: if depart:
setattr(translator, 'depart_'+node.__name__, depart) setattr(translator, 'depart_' + node.__name__, depart)
def add_enumerable_node(self, node, figtype, title_getter=None, **kwds): def add_enumerable_node(self, node, figtype, title_getter=None, **kwds):
# type: (nodes.Node, unicode, Callable, Any) -> None # type: (nodes.Node, unicode, Callable, Any) -> None

View File

@ -141,7 +141,7 @@ class ChangesBuilder(Builder):
targetfn = path.join(self.outdir, 'rst', os_path(docname)) + '.html' targetfn = path.join(self.outdir, 'rst', os_path(docname)) + '.html'
ensuredir(path.dirname(targetfn)) ensuredir(path.dirname(targetfn))
with codecs.open(targetfn, 'w', 'utf-8') as f: # type: ignore with codecs.open(targetfn, 'w', 'utf-8') as f: # type: ignore
text = ''.join(hl(i+1, line) for (i, line) in enumerate(lines)) text = ''.join(hl(i + 1, line) for (i, line) in enumerate(lines))
ctx = { ctx = {
'filename': self.env.doc2path(docname, None), 'filename': self.env.doc2path(docname, None),
'text': text 'text': text

View File

@ -522,7 +522,7 @@ class StandaloneHTMLBuilder(Builder):
# additional pages from conf.py # additional pages from conf.py
for pagename, template in self.config.html_additional_pages.items(): for pagename, template in self.config.html_additional_pages.items():
logger.info(' '+pagename, nonl=1) self.info(' ' + pagename, nonl=1)
self.handle_page(pagename, {}, template) self.handle_page(pagename, {}, template)
# the search page # the search page
@ -1003,7 +1003,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
hashindex = refuri.find('#') hashindex = refuri.find('#')
if hashindex < 0: if hashindex < 0:
continue continue
hashindex = refuri.find('#', hashindex+1) hashindex = refuri.find('#', hashindex + 1)
if hashindex >= 0: if hashindex >= 0:
refnode['refuri'] = fname + refuri[hashindex:] refnode['refuri'] = fname + refuri[hashindex:]
@ -1116,7 +1116,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
# additional pages from conf.py # additional pages from conf.py
for pagename, template in self.config.html_additional_pages.items(): for pagename, template in self.config.html_additional_pages.items():
logger.info(' '+pagename, nonl=1) self.info(' ' + pagename, nonl=1)
self.handle_page(pagename, {}, template) self.handle_page(pagename, {}, template)
if self.config.html_use_opensearch: if self.config.html_use_opensearch:

View File

@ -215,12 +215,12 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
def build_hhx(self, outdir, outname): def build_hhx(self, outdir, outname):
logger.info('dumping stopword list...') logger.info('dumping stopword list...')
with self.open_file(outdir, outname+'.stp') as f: with self.open_file(outdir, outname + '.stp') as f:
for word in sorted(stopwords): for word in sorted(stopwords):
print(word, file=f) print(word, file=f)
logger.info('writing project file...') logger.info('writing project file...')
with self.open_file(outdir, outname+'.hhp') as f: with self.open_file(outdir, outname + '.hhp') as f:
f.write(project_template % { f.write(project_template % {
'outname': outname, 'outname': outname,
'title': self.config.html_title, 'title': self.config.html_title,
@ -241,7 +241,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
file=f) file=f)
logger.info('writing TOC file...') logger.info('writing TOC file...')
with self.open_file(outdir, outname+'.hhc') as f: with self.open_file(outdir, outname + '.hhc') as f:
f.write(contents_header) f.write(contents_header)
# special books # special books
f.write('<LI> ' + object_sitemap % (self.config.html_short_title, f.write('<LI> ' + object_sitemap % (self.config.html_short_title,
@ -266,7 +266,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
if ullevel != 0: if ullevel != 0:
f.write('<UL>\n') f.write('<UL>\n')
for subnode in node: for subnode in node:
write_toc(subnode, ullevel+1) write_toc(subnode, ullevel + 1)
if ullevel != 0: if ullevel != 0:
f.write('</UL>\n') f.write('</UL>\n')
elif isinstance(node, addnodes.compact_paragraph): elif isinstance(node, addnodes.compact_paragraph):
@ -282,7 +282,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
logger.info('writing index file...') logger.info('writing index file...')
index = self.env.create_index(self) index = self.env.create_index(self)
with self.open_file(outdir, outname+'.hhk') as f: with self.open_file(outdir, outname + '.hhk') as f:
f.write('<UL>\n') f.write('<UL>\n')
def write_index(title, refs, subitems): def write_index(title, refs, subitems):

View File

@ -243,7 +243,7 @@ class CheckExternalLinksBuilder(Builder):
logger.info(darkgray('-local- ') + uri) logger.info(darkgray('-local- ') + uri)
self.write_entry('local', docname, lineno, uri) self.write_entry('local', docname, lineno, uri)
elif status == 'working': elif status == 'working':
logger.info(darkgreen('ok ') + uri + info) logger.info(darkgreen('ok ') + uri + info)
elif status == 'broken': elif status == 'broken':
self.write_entry('broken', docname, lineno, uri + ': ' + info) self.write_entry('broken', docname, lineno, uri + ': ' + info)
if self.app.quiet or self.app.warningiserror: if self.app.quiet or self.app.warningiserror:
@ -261,7 +261,7 @@ class CheckExternalLinksBuilder(Builder):
}[code] }[code]
self.write_entry('redirected ' + text, docname, lineno, self.write_entry('redirected ' + text, docname, lineno,
uri + ' to ' + info) uri + ' to ' + info)
logger.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info)) logger.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info))
def get_target_uri(self, docname, typ=None): def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode # type: (unicode, unicode) -> unicode

View File

@ -98,7 +98,7 @@ project_template = u'''\
''' '''
section_template = '<section title="%(title)s" ref="%(ref)s"/>' section_template = '<section title="%(title)s" ref="%(ref)s"/>'
file_template = ' '*12 + '<file>%(filename)s</file>' file_template = ' ' * 12 + '<file>%(filename)s</file>'
class QtHelpBuilder(StandaloneHTMLBuilder): class QtHelpBuilder(StandaloneHTMLBuilder):
@ -203,7 +203,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
nspace = nspace.lower() nspace = nspace.lower()
# write the project file # write the project file
with codecs.open(path.join(outdir, outname+'.qhp'), 'w', 'utf-8') as f: # type: ignore with codecs.open(path.join(outdir, outname + '.qhp'), 'w', 'utf-8') as f: # type: ignore
f.write(project_template % { # type: ignore f.write(project_template % { # type: ignore
'outname': htmlescape(outname), 'outname': htmlescape(outname),
'title': htmlescape(self.config.html_title), 'title': htmlescape(self.config.html_title),
@ -220,7 +220,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html') startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html')
logger.info('writing collection project file...') logger.info('writing collection project file...')
with codecs.open(path.join(outdir, outname+'.qhcp'), 'w', 'utf-8') as f: # type: ignore # NOQA with codecs.open(path.join(outdir, outname + '.qhcp'), 'w', 'utf-8') as f: # type: ignore # NOQA
f.write(collection_template % { # type: ignore f.write(collection_template % { # type: ignore
'outname': htmlescape(outname), 'outname': htmlescape(outname),
'title': htmlescape(self.config.html_short_title), 'title': htmlescape(self.config.html_short_title),
@ -251,10 +251,10 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
title = htmlescape(refnode.astext()).replace('"', '&quot;') title = htmlescape(refnode.astext()).replace('"', '&quot;')
item = '<section title="%(title)s" ref="%(ref)s">' % \ item = '<section title="%(title)s" ref="%(ref)s">' % \
{'title': title, 'ref': link} {'title': title, 'ref': link}
parts.append(' '*4*indentlevel + item) parts.append(' ' * 4 * indentlevel + item)
for subnode in node.children[1]: for subnode in node.children[1]:
parts.extend(self.write_toc(subnode, indentlevel+1)) parts.extend(self.write_toc(subnode, indentlevel + 1))
parts.append(' '*4*indentlevel + '</section>') parts.append(' ' * 4 * indentlevel + '</section>')
elif isinstance(node, nodes.list_item): elif isinstance(node, nodes.list_item):
for subnode in node: for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel)) parts.extend(self.write_toc(subnode, indentlevel))
@ -288,10 +288,10 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
id = None id = None
if id: if id:
item = ' '*12 + '<keyword name="%s" id="%s" ref="%s"/>' % ( item = ' ' * 12 + '<keyword name="%s" id="%s" ref="%s"/>' % (
name, id, ref[1]) name, id, ref[1])
else: else:
item = ' '*12 + '<keyword name="%s" ref="%s"/>' % (name, ref[1]) item = ' ' * 12 + '<keyword name="%s" ref="%s"/>' % (name, ref[1])
item.encode('ascii', 'xmlcharrefreplace') item.encode('ascii', 'xmlcharrefreplace')
return item return item

View File

@ -136,7 +136,7 @@ class TexinfoBuilder(Builder):
'document %s', docname) 'document %s', docname)
continue continue
self.document_data.append(entry) # type: ignore self.document_data.append(entry) # type: ignore
if docname.endswith(SEP+'index'): if docname.endswith(SEP + 'index'):
docname = docname[:-5] docname = docname[:-5]
self.titles.append((docname, entry[2])) self.titles.append((docname, entry[2]))
@ -227,7 +227,7 @@ class TexinfoBuilder(Builder):
if self.images: if self.images:
logger.info(bold('copying images...'), nonl=1) logger.info(bold('copying images...'), nonl=1)
for src, dest in iteritems(self.images): for src, dest in iteritems(self.images):
logger.info(' '+src, nonl=1) logger.info(' ' + src, nonl=1)
copyfile(path.join(self.srcdir, src), copyfile(path.join(self.srcdir, src),
path.join(self.outdir, dest)) path.join(self.outdir, dest))
logger.info('') logger.info('')

View File

@ -141,7 +141,7 @@ class WebSupportBuilder(PickleHTMLBuilder):
# "show source" link # "show source" link
if ctx.get('sourcename'): if ctx.get('sourcename'):
source_name = path.join(self.staticdir, source_name = path.join(self.staticdir,
'_sources', os_path(ctx['sourcename'])) '_sources', os_path(ctx['sourcename']))
ensuredir(path.dirname(source_name)) ensuredir(path.dirname(source_name))
copyfile(self.env.doc2path(pagename), source_name) copyfile(self.env.doc2path(pagename), source_name)

View File

@ -116,7 +116,7 @@ class CodeBlock(Directive):
if linespec: if linespec:
try: try:
nlines = len(self.content) nlines = len(self.content)
hl_lines = [x+1 for x in parselinenos(linespec, nlines)] hl_lines = [x + 1 for x in parselinenos(linespec, nlines)]
except ValueError as err: except ValueError as err:
document = self.state.document document = self.state.document
return [document.reporter.warning(str(err), line=self.lineno)] return [document.reporter.warning(str(err), line=self.lineno)]
@ -277,7 +277,7 @@ class LiteralInclude(Directive):
'Object named %r not found in include file %r' % 'Object named %r not found in include file %r' %
(objectname, filename), line=self.lineno)] (objectname, filename), line=self.lineno)]
else: else:
lines = lines[tags[objectname][1]-1: tags[objectname][2]-1] lines = lines[tags[objectname][1] - 1: tags[objectname][2] - 1]
if 'lineno-match' in self.options: if 'lineno-match' in self.options:
linenostart = tags[objectname][1] linenostart = tags[objectname][1]
@ -309,7 +309,7 @@ class LiteralInclude(Directive):
linespec = self.options.get('emphasize-lines') linespec = self.options.get('emphasize-lines')
if linespec: if linespec:
try: try:
hl_lines = [x+1 for x in parselinenos(linespec, len(lines))] hl_lines = [x + 1 for x in parselinenos(linespec, len(lines))]
except ValueError as err: except ValueError as err:
return [document.reporter.warning(str(err), line=self.lineno)] return [document.reporter.warning(str(err), line=self.lineno)]
else: else:

View File

@ -215,7 +215,7 @@ class VersionChange(Directive):
text = versionlabels[self.name] % self.arguments[0] text = versionlabels[self.name] % self.arguments[0]
if len(self.arguments) == 2: if len(self.arguments) == 2:
inodes, messages = self.state.inline_text(self.arguments[1], inodes, messages = self.state.inline_text(self.arguments[1],
self.lineno+1) self.lineno + 1)
para = nodes.paragraph(self.arguments[1], '', *inodes, translatable=False) para = nodes.paragraph(self.arguments[1], '', *inodes, translatable=False)
set_source_info(self, para) set_source_info(self, para)
node.append(para) node.append(para)
@ -340,7 +340,7 @@ class HList(Directive):
index = 0 index = 0
newnode = addnodes.hlist() newnode = addnodes.hlist()
for column in range(ncolumns): for column in range(ncolumns):
endindex = index + (column < nmore and (npercol+1) or npercol) endindex = index + (column < nmore and (npercol + 1) or npercol)
col = addnodes.hlistcol() col = addnodes.hlistcol()
col += nodes.bullet_list() col += nodes.bullet_list()
col[0] += fulllist.children[index:endindex] col[0] += fulllist.children[index:endindex]

View File

@ -85,7 +85,7 @@ class CObject(ObjectDescription):
# add cross-ref nodes for all words # add cross-ref nodes for all words
for part in [_f for _f in wsplit_re.split(ctype) if _f]: # type: ignore for part in [_f for _f in wsplit_re.split(ctype) if _f]: # type: ignore
tnode = nodes.Text(part, part) tnode = nodes.Text(part, part)
if part[0] in string.ascii_letters+'_' and \ if part[0] in string.ascii_letters + '_' and \
part not in self.stopwords: part not in self.stopwords:
pnode = addnodes.pending_xref( pnode = addnodes.pending_xref(
'', refdomain='c', reftype='type', reftarget=part, '', refdomain='c', reftype='type', reftarget=part,
@ -172,7 +172,7 @@ class CObject(ObjectDescription):
ctype, argname = arg.rsplit(' ', 1) ctype, argname = arg.rsplit(' ', 1)
self._parse_type(param, ctype) self._parse_type(param, ctype)
# separate by non-breaking space in the output # separate by non-breaking space in the output
param += nodes.emphasis(' '+argname, u'\xa0'+argname) param += nodes.emphasis(' ' + argname, u'\xa0' + argname)
except ValueError: except ValueError:
# no argument name given, only the type # no argument name given, only the type
self._parse_type(param, arg) self._parse_type(param, arg)
@ -245,7 +245,7 @@ class CXRefRole(XRefRole):
title = title[1:] title = title[1:]
dot = title.rfind('.') dot = title.rfind('.')
if dot != -1: if dot != -1:
title = title[dot+1:] title = title[dot + 1:]
return title, target return title, target

View File

@ -3198,14 +3198,14 @@ class Symbol(object):
def to_string(self, indent): def to_string(self, indent):
# type: (int) -> unicode # type: (int) -> unicode
res = ['\t'*indent] # type: List[unicode] res = ['\t' * indent] # type: List[unicode]
if not self.parent: if not self.parent:
res.append('::') res.append('::')
else: else:
if self.templateParams: if self.templateParams:
res.append(text_type(self.templateParams)) res.append(text_type(self.templateParams))
res.append('\n') res.append('\n')
res.append('\t'*indent) res.append('\t' * indent)
if self.identifier: if self.identifier:
res.append(text_type(self.identifier)) res.append(text_type(self.identifier))
else: else:

View File

@ -160,7 +160,7 @@ class JSXRefRole(XRefRole):
title = title[1:] title = title[1:]
dot = title.rfind('.') dot = title.rfind('.')
if dot != -1: if dot != -1:
title = title[dot+1:] title = title[dot + 1:]
if target[0:1] == '.': if target[0:1] == '.':
target = target[1:] target = target[1:]
refnode['refspecific'] = True refnode['refspecific'] = True

View File

@ -563,7 +563,7 @@ class PyXRefRole(XRefRole):
title = title[1:] title = title[1:]
dot = title.rfind('.') dot = title.rfind('.')
if dot != -1: if dot != -1:
title = title[dot+1:] title = title[dot + 1:]
# if the first character is a dot, search more specific namespaces first # if the first character is a dot, search more specific namespaces first
# else search builtins first # else search builtins first
if target[0:1] == '.': if target[0:1] == '.':

View File

@ -77,7 +77,7 @@ class GenericObject(ObjectDescription):
colon = self.indextemplate.find(':') colon = self.indextemplate.find(':')
if colon != -1: if colon != -1:
indextype = self.indextemplate[:colon].strip() indextype = self.indextemplate[:colon].strip()
indexentry = self.indextemplate[colon+1:].strip() % (name,) indexentry = self.indextemplate[colon + 1:].strip() % (name,)
else: else:
indextype = 'single' indextype = 'single'
indexentry = self.indextemplate % (name,) indexentry = self.indextemplate % (name,)
@ -139,7 +139,7 @@ class Target(Directive):
colon = indexentry.find(':') colon = indexentry.find(':')
if colon != -1: if colon != -1:
indextype = indexentry[:colon].strip() indextype = indexentry[:colon].strip()
indexentry = indexentry[colon+1:].strip() indexentry = indexentry[colon + 1:].strip()
inode = addnodes.index(entries=[(indextype, indexentry, inode = addnodes.index(entries=[(indextype, indexentry,
targetname, '', None)]) targetname, '', None)])
ret.insert(0, inode) ret.insert(0, inode)
@ -600,7 +600,7 @@ class StandardDomain(Domain):
'in ' + env.doc2path(labels[name][0]), 'in ' + env.doc2path(labels[name][0]),
location=node) location=node)
anonlabels[name] = docname, labelid anonlabels[name] = docname, labelid
if node.tagname == 'section': if node.tagname in ('section', 'rubric'):
sectname = clean_astext(node[0]) # node[0] == title node sectname = clean_astext(node[0]) # node[0] == title node
elif self.is_enumerable_node(node): elif self.is_enumerable_node(node):
sectname = self.get_numfig_title(node) sectname = self.get_numfig_title(node)

View File

@ -692,7 +692,7 @@ class BuildEnvironment(object):
lineend = len(error.object) lineend = len(error.object)
lineno = error.object.count(b'\n', 0, error.start) + 1 lineno = error.object.count(b'\n', 0, error.start) + 1
logger.warning('undecodable source characters, replacing with "?": %r', logger.warning('undecodable source characters, replacing with "?": %r',
(error.object[linestart+1:error.start] + b'>>>' + (error.object[linestart + 1:error.start] + b'>>>' +
error.object[error.start:error.end] + b'<<<' + error.object[error.start:error.end] + b'<<<' +
error.object[error.end:lineend]), error.object[error.end:lineend]),
location=(self.docname, lineno)) location=(self.docname, lineno))

View File

@ -241,11 +241,11 @@ class Toctree(EnvironmentManager):
if isinstance(subnode, (addnodes.compact_paragraph, if isinstance(subnode, (addnodes.compact_paragraph,
nodes.list_item)): nodes.list_item)):
# for <p> and <li>, indicate the depth level and recurse # for <p> and <li>, indicate the depth level and recurse
subnode['classes'].append('toctree-l%d' % (depth-1)) subnode['classes'].append('toctree-l%d' % (depth - 1))
_toctree_add_classes(subnode, depth) _toctree_add_classes(subnode, depth)
elif isinstance(subnode, nodes.bullet_list): elif isinstance(subnode, nodes.bullet_list):
# for <ul>, just recurse # for <ul>, just recurse
_toctree_add_classes(subnode, depth+1) _toctree_add_classes(subnode, depth + 1)
elif isinstance(subnode, nodes.reference): elif isinstance(subnode, nodes.reference):
# for <a>, identify which entries point to the current # for <a>, identify which entries point to the current
# document and therefore may not be collapsed # document and therefore may not be collapsed
@ -435,7 +435,7 @@ class Toctree(EnvironmentManager):
subnode.parent.remove(subnode) subnode.parent.remove(subnode)
else: else:
# recurse on visible children # recurse on visible children
self._toctree_prune(subnode, depth+1, maxdepth, collapse) self._toctree_prune(subnode, depth + 1, maxdepth, collapse)
def assign_section_numbers(self): def assign_section_numbers(self):
# type: () -> List[unicode] # type: () -> List[unicode]
@ -453,7 +453,7 @@ class Toctree(EnvironmentManager):
for subnode in node.children: for subnode in node.children:
if isinstance(subnode, nodes.bullet_list): if isinstance(subnode, nodes.bullet_list):
numstack.append(0) numstack.append(0)
_walk_toc(subnode, secnums, depth-1, titlenode) _walk_toc(subnode, secnums, depth - 1, titlenode)
numstack.pop() numstack.pop()
titlenode = None titlenode = None
elif isinstance(subnode, nodes.list_item): elif isinstance(subnode, nodes.list_item):

View File

@ -111,7 +111,7 @@ def process_autosummary_toc(app, doctree):
if not isinstance(subnode, nodes.section): if not isinstance(subnode, nodes.section):
continue continue
if subnode not in crawled: if subnode not in crawled:
crawl_toc(subnode, depth+1) crawl_toc(subnode, depth + 1)
crawl_toc(doctree) crawl_toc(doctree)
@ -284,7 +284,7 @@ class Autosummary(Directive):
if not isinstance(obj, ModuleType): if not isinstance(obj, ModuleType):
# give explicitly separated module name, so that members # give explicitly separated module name, so that members
# of inner classes can be documented # of inner classes can be documented
full_name = modname + '::' + full_name[len(modname)+1:] full_name = modname + '::' + full_name[len(modname) + 1:]
# NB. using full_name here is important, since Documenters # NB. using full_name here is important, since Documenters
# handle module prefixes slightly differently # handle module prefixes slightly differently
documenter = get_documenter(obj, parent)(self, full_name) documenter = get_documenter(obj, parent)(self, full_name)
@ -423,13 +423,13 @@ def mangle_signature(sig, max_chars=30):
s = m.group(1)[:-2] s = m.group(1)[:-2]
# Produce a more compact signature # Produce a more compact signature
sig = limited_join(", ", args, max_chars=max_chars-2) sig = limited_join(", ", args, max_chars=max_chars - 2)
if opts: if opts:
if not sig: if not sig:
sig = "[%s]" % limited_join(", ", opts, max_chars=max_chars-4) sig = "[%s]" % limited_join(", ", opts, max_chars=max_chars - 4)
elif len(sig) < max_chars - 4 - 2 - 3: elif len(sig) < max_chars - 4 - 2 - 3:
sig += "[, %s]" % limited_join(", ", opts, sig += "[, %s]" % limited_join(", ", opts,
max_chars=max_chars-len(sig)-4-2) max_chars=max_chars - len(sig) - 4 - 2)
return u"(%s)" % sig return u"(%s)" % sig
@ -521,7 +521,7 @@ def _import_by_name(name):
# ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ... # ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ...
last_j = 0 last_j = 0
modname = None modname = None
for j in reversed(range(1, len(name_parts)+1)): for j in reversed(range(1, len(name_parts) + 1)):
last_j = j last_j = j
modname = '.'.join(name_parts[:j]) modname = '.'.join(name_parts[:j])
try: try:

View File

@ -260,7 +260,7 @@ class DocTestBuilder(Builder):
self.outfile.write('''\ self.outfile.write('''\
Results of doctest builder run on %s Results of doctest builder run on %s
==================================%s ==================================%s
''' % (date, '='*len(date))) ''' % (date, '=' * len(date)))
def _out(self, text): def _out(self, text):
# type: (unicode) -> None # type: (unicode) -> None
@ -377,7 +377,7 @@ Doctest summary
return return
self._out('\nDocument: %s\n----------%s\n' % self._out('\nDocument: %s\n----------%s\n' %
(docname, '-'*len(docname))) (docname, '-' * len(docname)))
for group in itervalues(groups): for group in itervalues(groups):
self.test_group(group, self.env.doc2path(docname, base=None)) self.test_group(group, self.env.doc2path(docname, base=None))
# Separately count results from setup code # Separately count results from setup code

View File

@ -231,7 +231,7 @@ def get_tooltip(self, node):
def html_visit_math(self, node): def html_visit_math(self, node):
# type: (nodes.NodeVisitor, math_node) -> None # type: (nodes.NodeVisitor, math_node) -> None
try: try:
fname, depth = render_math(self, '$'+node['latex']+'$') fname, depth = render_math(self, '$' + node['latex'] + '$')
except MathExtError as exc: except MathExtError as exc:
msg = text_type(exc) msg = text_type(exc)
sm = nodes.system_message(msg, type='WARNING', level=2, sm = nodes.system_message(msg, type='WARNING', level=2,

View File

@ -83,7 +83,7 @@ def read_inventory_v1(f, uri, join):
return invdata return invdata
def read_inventory_v2(f, uri, join, bufsize=16*1024): def read_inventory_v2(f, uri, join, bufsize=16 * 1024):
# type: (IO, unicode, Callable, int) -> Inventory # type: (IO, unicode, Callable, int) -> Inventory
invdata = {} # type: Inventory invdata = {} # type: Inventory
line = f.readline() line = f.readline()
@ -109,7 +109,7 @@ def read_inventory_v2(f, uri, join, bufsize=16*1024):
lineend = buf.find(b'\n') lineend = buf.find(b'\n')
while lineend != -1: while lineend != -1:
yield buf[:lineend].decode('utf-8') yield buf[:lineend].decode('utf-8')
buf = buf[lineend+1:] buf = buf[lineend + 1:]
lineend = buf.find(b'\n') lineend = buf.find(b'\n')
assert not buf assert not buf
@ -134,7 +134,7 @@ def read_inventory_v2(f, uri, join, bufsize=16*1024):
return invdata return invdata
def read_inventory(f, uri, join, bufsize=16*1024): def read_inventory(f, uri, join, bufsize=16 * 1024):
# type: (IO, unicode, Callable, int) -> Inventory # type: (IO, unicode, Callable, int) -> Inventory
line = f.readline().rstrip().decode('utf-8') line = f.readline().rstrip().decode('utf-8')
if line == '# Sphinx inventory version 1': if line == '# Sphinx inventory version 1':
@ -371,9 +371,9 @@ def missing_reference(app, env, node, contnode):
(domain == 'std' and node['reftype'] == 'keyword'): (domain == 'std' and node['reftype'] == 'keyword'):
# use whatever title was given, but strip prefix # use whatever title was given, but strip prefix
title = contnode.astext() title = contnode.astext()
if in_set and title.startswith(in_set+':'): if in_set and title.startswith(in_set + ':'):
newnode.append(contnode.__class__(title[len(in_set)+1:], newnode.append(contnode.__class__(title[len(in_set) + 1:],
title[len(in_set)+1:])) title[len(in_set) + 1:]))
else: else:
newnode.append(contnode) newnode.append(contnode)
else: else:

View File

@ -203,7 +203,7 @@ def get_tooltip(self, node):
def html_visit_math(self, node): def html_visit_math(self, node):
# type: (nodes.NodeVisitor, math_node) -> None # type: (nodes.NodeVisitor, math_node) -> None
try: try:
fname, depth = render_math(self, '$'+node['latex']+'$') fname, depth = render_math(self, '$' + node['latex'] + '$')
except MathExtError as exc: except MathExtError as exc:
msg = text_type(exc) msg = text_type(exc)
sm = nodes.system_message(msg, type='WARNING', level=2, sm = nodes.system_message(msg, type='WARNING', level=2,

View File

@ -152,7 +152,7 @@ def process_todo_nodes(app, doctree, fromdocname):
(todo_info['source'], todo_info['lineno']) (todo_info['source'], todo_info['lineno'])
) )
desc1 = description[:description.find('<<')] desc1 = description[:description.find('<<')]
desc2 = description[description.find('>>')+2:] desc2 = description[description.find('>>') + 2:]
para += nodes.Text(desc1, desc1) para += nodes.Text(desc1, desc1)
# Create a reference # Create a reference

View File

@ -83,7 +83,7 @@ class Make(object):
def build_help(self): def build_help(self):
# type: () -> None # type: () -> None
print(bold("Sphinx v%s" % sphinx.__display_version__)) print(bold("Sphinx v%s" % sphinx.__display_version__))
print("Please use `make %s' where %s is one of" % ((blue('target'),)*2)) # type: ignore # NOQA print("Please use `make %s' where %s is one of" % ((blue('target'),) * 2)) # type: ignore # NOQA
for osname, bname, description in BUILDERS: for osname, bname, description in BUILDERS:
if not osname or os.name == osname: if not osname or os.name == osname:
print(' %s %s' % (blue(bname.ljust(10)), description)) print(' %s %s' % (blue(bname.ljust(10)), description))

View File

@ -368,4 +368,4 @@ if __name__ == '__main__':
pprint.pprint(ma.find_tags()) pprint.pprint(ma.find_tags())
x3 = time.time() x3 = time.time()
# print nodes.nice_repr(ma.parsetree, number2name) # print nodes.nice_repr(ma.parsetree, number2name)
print("tokenizing %.4f, parsing %.4f, finding %.4f" % (x1-x0, x2-x1, x3-x2)) print("tokenizing %.4f, parsing %.4f, finding %.4f" % (x1 - x0, x2 - x1, x3 - x2))

View File

@ -39,7 +39,7 @@ class BaseNode(object):
if child is self: if child is self:
if i == 0: if i == 0:
return None return None
return self.parent.children[i-1] return self.parent.children[i - 1]
def get_next_sibling(self): def get_next_sibling(self):
"""Return next child in parent's children, or None.""" """Return next child in parent's children, or None."""
@ -48,7 +48,7 @@ class BaseNode(object):
for i, child in enumerate(self.parent.children): for i, child in enumerate(self.parent.children):
if child is self: if child is self:
try: try:
return self.parent.children[i+1] return self.parent.children[i + 1]
except IndexError: except IndexError:
return None return None

View File

@ -302,11 +302,11 @@ document is a custom template, you can also set this to another filename.''')
do_prompt(d, 'master', 'Name of your master document (without suffix)', do_prompt(d, 'master', 'Name of your master document (without suffix)',
'index') 'index')
while path.isfile(path.join(d['path'], d['master']+d['suffix'])) or \ while path.isfile(path.join(d['path'], d['master'] + d['suffix'])) or \
path.isfile(path.join(d['path'], 'source', d['master']+d['suffix'])): path.isfile(path.join(d['path'], 'source', d['master'] + d['suffix'])):
print() print()
print(bold('Error: the master file %s has already been found in the ' print(bold('Error: the master file %s has already been found in the '
'selected root path.' % (d['master']+d['suffix']))) 'selected root path.' % (d['master'] + d['suffix'])))
print('sphinx-quickstart will not overwrite the existing file.') print('sphinx-quickstart will not overwrite the existing file.')
print() print()
do_prompt(d, 'master', 'Please enter a new file name, or rename the ' do_prompt(d, 'master', 'Please enter a new file name, or rename the '
@ -633,7 +633,7 @@ def main(argv=sys.argv):
d.setdefault('version', '') d.setdefault('version', '')
d.setdefault('release', d['version']) d.setdefault('release', d['version'])
d2 = DEFAULT_VALUE.copy() d2 = DEFAULT_VALUE.copy()
d2.update(dict(("ext_"+ext, False) for ext in EXTENSIONS)) d2.update(dict(("ext_" + ext, False) for ext in EXTENSIONS))
d2.update(d) d2.update(d)
d = d2 d = d2
if 'no_makefile' in d: if 'no_makefile' in d:

View File

@ -201,7 +201,7 @@ def indexmarkup_role(typ, rawtext, text, lineno, inliner,
return [prb], [msg] return [prb], [msg]
ref = inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum ref = inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum
sn = nodes.strong(title, title) sn = nodes.strong(title, title)
rn = nodes.reference('', '', internal=False, refuri=ref+anchor, rn = nodes.reference('', '', internal=False, refuri=ref + anchor,
classes=[typ]) classes=[typ])
rn += sn rn += sn
return [indexnode, targetnode, rn], [] return [indexnode, targetnode, rn], []
@ -223,7 +223,7 @@ def indexmarkup_role(typ, rawtext, text, lineno, inliner,
return [prb], [msg] return [prb], [msg]
ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum
sn = nodes.strong(title, title) sn = nodes.strong(title, title)
rn = nodes.reference('', '', internal=False, refuri=ref+anchor, rn = nodes.reference('', '', internal=False, refuri=ref + anchor,
classes=[typ]) classes=[typ])
rn += sn rn += sn
return [indexnode, targetnode, rn], [] return [indexnode, targetnode, rn], []

View File

@ -123,7 +123,7 @@ class Locale(Transform):
# literalblock need literal block notation to avoid it become # literalblock need literal block notation to avoid it become
# paragraph. # paragraph.
if isinstance(node, LITERAL_TYPE_NODES): if isinstance(node, LITERAL_TYPE_NODES):
msgstr = '::\n\n' + indent(msgstr, ' '*3) msgstr = '::\n\n' + indent(msgstr, ' ' * 3)
patch = publish_msgstr( patch = publish_msgstr(
env.app, msgstr, source, node.line, env.config, settings) env.app, msgstr, source, node.line, env.config, settings)
@ -249,7 +249,7 @@ class Locale(Transform):
# literalblock need literal block notation to avoid it become # literalblock need literal block notation to avoid it become
# paragraph. # paragraph.
if isinstance(node, LITERAL_TYPE_NODES): if isinstance(node, LITERAL_TYPE_NODES):
msgstr = '::\n\n' + indent(msgstr, ' '*3) msgstr = '::\n\n' + indent(msgstr, ' ' * 3)
patch = publish_msgstr( patch = publish_msgstr(
env.app, msgstr, source, node.line, env.config, settings) env.app, msgstr, source, node.line, env.config, settings)

View File

@ -110,7 +110,7 @@ def get_matching_docs(dirname, suffixes, exclude_matchers=()):
for filename in get_matching_files(dirname, exclude_matchers): for filename in get_matching_files(dirname, exclude_matchers):
for suffixpattern in suffixpatterns: for suffixpattern in suffixpatterns:
if fnmatch.fnmatch(filename, suffixpattern): # type: ignore if fnmatch.fnmatch(filename, suffixpattern): # type: ignore
yield filename[:-len(suffixpattern)+1] yield filename[:-len(suffixpattern) + 1]
break break
@ -183,7 +183,7 @@ def copy_static_entry(source, targetdir, builder, context={},
if path.isdir(path.join(source, entry)): if path.isdir(path.join(source, entry)):
newtarget = path.join(targetdir, entry) newtarget = path.join(targetdir, entry)
copy_static_entry(path.join(source, entry), newtarget, copy_static_entry(path.join(source, entry), newtarget,
builder, context, level=level+1, builder, context, level=level + 1,
exclude_matchers=exclude_matchers) exclude_matchers=exclude_matchers)
@ -380,9 +380,9 @@ def parselinenos(spec, total):
if len(begend) > 2: if len(begend) > 2:
raise ValueError raise ValueError
if len(begend) == 1: if len(begend) == 1:
items.append(int(begend[0])-1) items.append(int(begend[0]) - 1)
else: else:
start = (begend[0] == '') and 0 or int(begend[0])-1 start = (begend[0] == '') and 0 or int(begend[0]) - 1
end = (begend[1] == '') and total or int(begend[1]) end = (begend[1] == '') and total or int(begend[1])
items.extend(range(start, end)) items.extend(range(start, end))
except Exception: except Exception:
@ -420,13 +420,13 @@ def rpartition(s, t):
"""Similar to str.rpartition from 2.5, but doesn't return the separator.""" """Similar to str.rpartition from 2.5, but doesn't return the separator."""
i = s.rfind(t) i = s.rfind(t)
if i != -1: if i != -1:
return s[:i], s[i+len(t):] return s[:i], s[i + len(t):]
return '', s return '', s
def split_into(n, type, value): def split_into(n, type, value):
"""Split an index entry into a given number of parts at semicolons.""" """Split an index entry into a given number of parts at semicolons."""
parts = [x.strip() for x in value.split(';', n-1)] parts = [x.strip() for x in value.split(';', n - 1)]
if sum(1 for part in parts if part) < n: if sum(1 for part in parts if part) < n:
raise ValueError('invalid %s index entry %r' % (type, value)) raise ValueError('invalid %s index entry %r' % (type, value))
return parts return parts

View File

@ -123,8 +123,8 @@ _colors = [
] ]
for i, (dark, light) in enumerate(_colors): for i, (dark, light) in enumerate(_colors):
codes[dark] = '\x1b[%im' % (i+30) codes[dark] = '\x1b[%im' % (i + 30)
codes[light] = '\x1b[%i;01m' % (i+30) codes[light] = '\x1b[%i;01m' % (i + 30)
_orig_codes = codes.copy() _orig_codes = codes.copy()

View File

@ -31,7 +31,7 @@ if False:
from sphinx.environment import BuildEnvironment # NOQA from sphinx.environment import BuildEnvironment # NOQA
__version_info__ = tuple(map(int, docutils.__version__.split('.'))) __version_info__ = tuple(map(int, docutils.__version__.split('.')))
@contextmanager @contextmanager

View File

@ -20,8 +20,8 @@ if False:
# For type annotation # For type annotation
from typing import Any, IO, Union # NOQA from typing import Any, IO, Union # NOQA
_str_re = re.compile(r'"(\\\\|\\"|[^"])*"') _str_re = re.compile(r'"(\\\\|\\"|[^"])*"')
_int_re = re.compile(r'\d+') _int_re = re.compile(r'\d+')
_name_re = re.compile(r'[a-zA-Z_]\w*') _name_re = re.compile(r'[a-zA-Z_]\w*')
_nameonly_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*$') _nameonly_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*$')

View File

@ -266,15 +266,15 @@ def process_index_entry(entry, targetid):
main = 'main' main = 'main'
entry = entry[1:].lstrip() entry = entry[1:].lstrip()
for type in pairindextypes: for type in pairindextypes:
if entry.startswith(type+':'): if entry.startswith(type + ':'):
value = entry[len(type)+1:].strip() value = entry[len(type) + 1:].strip()
value = pairindextypes[type] + '; ' + value value = pairindextypes[type] + '; ' + value
indexentries.append(('pair', value, targetid, main, None)) indexentries.append(('pair', value, targetid, main, None))
break break
else: else:
for type in indextypes: for type in indextypes:
if entry.startswith(type+':'): if entry.startswith(type + ':'):
value = entry[len(type)+1:].strip() value = entry[len(type) + 1:].strip()
if type == 'double': if type == 'double':
type = 'pair' type = 'pair'
indexentries.append((type, value, targetid, main, None)) indexentries.append((type, value, targetid, main, None))

View File

@ -30,7 +30,7 @@ if False:
# Errnos that we need. # Errnos that we need.
EEXIST = getattr(errno, 'EEXIST', 0) EEXIST = getattr(errno, 'EEXIST', 0)
ENOENT = getattr(errno, 'ENOENT', 0) ENOENT = getattr(errno, 'ENOENT', 0)
EPIPE = getattr(errno, 'EPIPE', 0) EPIPE = getattr(errno, 'EPIPE', 0)
EINVAL = getattr(errno, 'EINVAL', 0) EINVAL = getattr(errno, 'EINVAL', 0)
# SEP separates path elements in the canonical file names # SEP separates path elements in the canonical file names
@ -73,7 +73,7 @@ def relative_uri(base, to):
# Special case: relative_uri('f/index.html','f/') should # Special case: relative_uri('f/index.html','f/') should
# return './', not '' # return './', not ''
return '.' + SEP return '.' + SEP
return ('..' + SEP) * (len(b2)-1) + SEP.join(t2) return ('..' + SEP) * (len(b2) - 1) + SEP.join(t2)
def ensuredir(path): def ensuredir(path):

View File

@ -140,11 +140,11 @@ def make_chunks(arguments, nproc, maxbatch=10):
chunksize = nargs // nproc chunksize = nargs // nproc
if chunksize >= maxbatch: if chunksize >= maxbatch:
# try to improve batch size vs. number of batches # try to improve batch size vs. number of batches
chunksize = int(sqrt(nargs/nproc * maxbatch)) chunksize = int(sqrt(nargs / nproc * maxbatch))
if chunksize == 0: if chunksize == 0:
chunksize = 1 chunksize = 1
nchunks, rest = divmod(nargs, chunksize) nchunks, rest = divmod(nargs, chunksize)
if rest: if rest:
nchunks += 1 nchunks += 1
# partition documents in "chunks" that will be written by one Process # partition documents in "chunks" that will be written by one Process
return [arguments[i*chunksize:(i+1)*chunksize] for i in range(nchunks)] return [arguments[i * chunksize:(i + 1) * chunksize] for i in range(nchunks)]

View File

@ -107,7 +107,7 @@ class PorterStemmer(object):
"""doublec(j) is TRUE <=> j,(j-1) contain a double consonant.""" """doublec(j) is TRUE <=> j,(j-1) contain a double consonant."""
if j < (self.k0 + 1): if j < (self.k0 + 1):
return 0 return 0
if (self.b[j] != self.b[j-1]): if (self.b[j] != self.b[j - 1]):
return 0 return 0
return self.cons(j) return self.cons(j)
@ -120,8 +120,8 @@ class PorterStemmer(object):
cav(e), lov(e), hop(e), crim(e), but cav(e), lov(e), hop(e), crim(e), but
snow, box, tray. snow, box, tray.
""" """
if i < (self.k0 + 2) or not self.cons(i) or self.cons(i-1) \ if i < (self.k0 + 2) or not self.cons(i) or self.cons(i - 1) \
or not self.cons(i-2): or not self.cons(i - 2):
return 0 return 0
ch = self.b[i] ch = self.b[i]
if ch == 'w' or ch == 'x' or ch == 'y': if ch == 'w' or ch == 'x' or ch == 'y':
@ -135,7 +135,7 @@ class PorterStemmer(object):
return 0 return 0
if length > (self.k - self.k0 + 1): if length > (self.k - self.k0 + 1):
return 0 return 0
if self.b[self.k-length+1:self.k+1] != s: if self.b[self.k - length + 1:self.k + 1] != s:
return 0 return 0
self.j = self.k - length self.j = self.k - length
return 1 return 1
@ -144,7 +144,7 @@ class PorterStemmer(object):
"""setto(s) sets (j+1),...k to the characters in the string s, """setto(s) sets (j+1),...k to the characters in the string s,
readjusting k.""" readjusting k."""
length = len(s) length = len(s)
self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:] self.b = self.b[:self.j + 1] + s + self.b[self.j + length + 1:]
self.k = self.j + length self.k = self.j + length
def r(self, s): def r(self, s):
@ -203,7 +203,7 @@ class PorterStemmer(object):
"""step1c() turns terminal y to i when there is another vowel in """step1c() turns terminal y to i when there is another vowel in
the stem.""" the stem."""
if (self.ends("y") and self.vowelinstem()): if (self.ends("y") and self.vowelinstem()):
self.b = self.b[:self.k] + 'i' + self.b[self.k+1:] self.b = self.b[:self.k] + 'i' + self.b[self.k + 1:]
def step2(self): def step2(self):
"""step2() maps double suffices to single ones. """step2() maps double suffices to single ones.
@ -376,7 +376,7 @@ class PorterStemmer(object):
self.j = self.k self.j = self.k
if self.b[self.k] == 'e': if self.b[self.k] == 'e':
a = self.m() a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k-1)): if a > 1 or (a == 1 and not self.cvc(self.k - 1)):
self.k = self.k - 1 self.k = self.k - 1
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1: if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k - 1 self.k = self.k - 1
@ -408,4 +408,4 @@ class PorterStemmer(object):
self.step3() self.step3()
self.step4() self.step4()
self.step5() self.step5()
return self.b[self.k0:self.k+1] return self.b[self.k0:self.k + 1]

View File

@ -106,7 +106,7 @@ class BaseSearch(object):
res = self.context_re.search(text) res = self.context_re.search(text)
if res is None: if res is None:
return '' return ''
context_start = max(res.start() - int(length/2), 0) context_start = max(res.start() - int(length / 2), 0)
context_end = context_start + length context_end = context_start + length
context = ''.join([context_start > 0 and '...' or '', context = ''.join([context_start > 0 and '...' or '',
text[context_start:context_end], text[context_start:context_end],

View File

@ -454,7 +454,7 @@ class HTMLTranslator(BaseTranslator):
self.body.append(self.starttag(production, 'strong', '')) self.body.append(self.starttag(production, 'strong', ''))
self.body.append(lastname + '</strong> ::= ') self.body.append(lastname + '</strong> ::= ')
elif lastname is not None: elif lastname is not None:
self.body.append('%s ' % (' '*len(lastname))) self.body.append('%s ' % (' ' * len(lastname)))
production.walkabout(self) production.walkabout(self)
self.body.append('\n') self.body.append('\n')
self.body.append('</pre>\n') self.body.append('</pre>\n')
@ -614,7 +614,7 @@ class HTMLTranslator(BaseTranslator):
self.body.append(token) self.body.append(token)
else: else:
# protect runs of multiple spaces; the last one can wrap # protect runs of multiple spaces; the last one can wrap
self.body.append('&#160;' * (len(token)-1) + ' ') self.body.append('&#160;' * (len(token) - 1) + ' ')
else: else:
if self.in_mailto and self.settings.cloak_email_addresses: if self.in_mailto and self.settings.cloak_email_addresses:
encoded = self.cloak_email(encoded) encoded = self.cloak_email(encoded)

View File

@ -1793,7 +1793,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
parindex = node.parent.index(node) parindex = node.parent.index(node)
try: try:
try: try:
next = node.parent[parindex+1] next = node.parent[parindex + 1]
except IndexError: except IndexError:
# last node in parent, look at next after parent # last node in parent, look at next after parent
# (for section of equal level) if it exists # (for section of equal level) if it exists
@ -1856,14 +1856,14 @@ class LaTeXTranslator(nodes.NodeVisitor):
elif type == 'pair': elif type == 'pair':
p1, p2 = [self.encode(x) for x in split_into(2, 'pair', string)] p1, p2 = [self.encode(x) for x in split_into(2, 'pair', string)]
self.body.append(r'\index{%s!%s%s}\index{%s!%s%s}' % self.body.append(r'\index{%s!%s%s}\index{%s!%s%s}' %
(p1, p2, m, p2, p1, m)) (p1, p2, m, p2, p1, m))
elif type == 'triple': elif type == 'triple':
p1, p2, p3 = [self.encode(x) p1, p2, p3 = [self.encode(x)
for x in split_into(3, 'triple', string)] for x in split_into(3, 'triple', string)]
self.body.append( self.body.append(
r'\index{%s!%s %s%s}\index{%s!%s, %s%s}' r'\index{%s!%s %s%s}\index{%s!%s, %s%s}'
r'\index{%s!%s %s%s}' % r'\index{%s!%s %s%s}' %
(p1, p2, p3, m, p2, p3, p1, m, p3, p1, p2, m)) (p1, p2, p3, m, p2, p3, p1, m, p3, p1, p2, m))
elif type == 'see': elif type == 'see':
p1, p2 = [self.encode(x) for x in split_into(2, 'see', string)] p1, p2 = [self.encode(x) for x in split_into(2, 'see', string)]
self.body.append(r'\index{%s|see{%s}}' % (p1, p2)) self.body.append(r'\index{%s|see{%s}}' % (p1, p2))

View File

@ -263,7 +263,7 @@ class ManualPageTranslator(BaseTranslator):
self.body.append(self.defs['strong'][1]) self.body.append(self.defs['strong'][1])
self.body.append(' ::= ') self.body.append(' ::= ')
elif lastname is not None: elif lastname is not None:
self.body.append('%s ' % (' '*len(lastname))) self.body.append('%s ' % (' ' * len(lastname)))
production.walkabout(self) production.walkabout(self)
self.body.append('\n') self.body.append('\n')
self.body.append('\n.fi\n') self.body.append('\n.fi\n')

View File

@ -345,10 +345,10 @@ class TexinfoTranslator(nodes.NodeVisitor):
for i, id in enumerate(entries): for i, id in enumerate(entries):
# First child's prev is empty # First child's prev is empty
if i != 0: if i != 0:
rellinks[id][1] = entries[i-1] rellinks[id][1] = entries[i - 1]
# Last child's next is empty # Last child's next is empty
if i != len(entries) - 1: if i != len(entries) - 1:
rellinks[id][0] = entries[i+1] rellinks[id][0] = entries[i + 1]
# top's next is its first child # top's next is its first child
try: try:
first = node_menus['Top'][0] first = node_menus['Top'][0]
@ -416,7 +416,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
s = '* %s: %s. ' % (name, node_name) s = '* %s: %s. ' % (name, node_name)
offset = max((24, (len(name) + 4) % 78)) offset = max((24, (len(name) + 4) % 78))
wdesc = '\n'.join(' ' * offset + l for l in wdesc = '\n'.join(' ' * offset + l for l in
textwrap.wrap(desc, width=78-offset)) textwrap.wrap(desc, width=78 - offset))
return s + wdesc.strip() + '\n' return s + wdesc.strip() + '\n'
def add_menu_entries(self, entries, reg=re.compile(r'\s+---?\s+')): def add_menu_entries(self, entries, reg=re.compile(r'\s+---?\s+')):
@ -698,7 +698,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
parindex = node.parent.index(node) parindex = node.parent.index(node)
try: try:
try: try:
next = node.parent[parindex+1] next = node.parent[parindex + 1]
except IndexError: except IndexError:
# last node in parent, look at next after parent # last node in parent, look at next after parent
# (for section of equal level) # (for section of equal level)
@ -1110,7 +1110,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
return return
self.body.append('\n\n@multitable ') self.body.append('\n\n@multitable ')
for i, n in enumerate(self.colwidths): for i, n in enumerate(self.colwidths):
self.body.append('{%s} ' % ('x' * (n+2))) self.body.append('{%s} ' % ('x' * (n + 2)))
def depart_colspec(self, node): def depart_colspec(self, node):
# type: (nodes.Node) -> None # type: (nodes.Node) -> None
@ -1450,7 +1450,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.add_anchor(id, production) self.add_anchor(id, production)
s = production['tokenname'].ljust(maxlen) + ' ::=' s = production['tokenname'].ljust(maxlen) + ' ::='
else: else:
s = '%s ' % (' '*maxlen) s = '%s ' % (' ' * maxlen)
self.body.append(self.escape(s)) self.body.append(self.escape(s))
self.body.append(self.escape(production.astext() + '\n')) self.body.append(self.escape(production.astext() + '\n'))
self.depart_literal_block(None) self.depart_literal_block(None)

View File

@ -98,7 +98,7 @@ class TextWrapper(textwrap.TextWrapper):
for i, c in enumerate(word): for i, c in enumerate(word):
total += column_width(c) total += column_width(c)
if total > space_left: if total > space_left:
return word[:i-1], word[i-1:] return word[:i - 1], word[i - 1:]
return word, '' return word, ''
def _split(self, text): def _split(self, text):
@ -213,7 +213,7 @@ class TextTranslator(nodes.NodeVisitor):
if not toformat: if not toformat:
return return
if wrap: if wrap:
res = my_wrap(''.join(toformat), width=MAXWIDTH-maxindent) res = my_wrap(''.join(toformat), width=MAXWIDTH - maxindent)
else: else:
res = ''.join(toformat).splitlines() res = ''.join(toformat).splitlines()
if end: if end:
@ -246,7 +246,7 @@ class TextTranslator(nodes.NodeVisitor):
def depart_document(self, node): def depart_document(self, node):
# type: (nodes.Node) -> None # type: (nodes.Node) -> None
self.end_state() self.end_state()
self.body = self.nl.join(line and (' '*indent + line) self.body = self.nl.join(line and (' ' * indent + line)
for indent, lines in self.states[0] for indent, lines in self.states[0]
for line in lines) for line in lines)
# XXX header/footer? # XXX header/footer?
@ -304,7 +304,7 @@ class TextTranslator(nodes.NodeVisitor):
def visit_title(self, node): def visit_title(self, node):
# type: (nodes.Node) -> None # type: (nodes.Node) -> None
if isinstance(node.parent, nodes.Admonition): if isinstance(node.parent, nodes.Admonition):
self.add_text(node.astext()+': ') self.add_text(node.astext() + ': ')
raise nodes.SkipNode raise nodes.SkipNode
self.new_state(0) self.new_state(0)
@ -468,7 +468,7 @@ class TextTranslator(nodes.NodeVisitor):
self.add_text(production['tokenname'].ljust(maxlen) + ' ::=') self.add_text(production['tokenname'].ljust(maxlen) + ' ::=')
lastname = production['tokenname'] lastname = production['tokenname']
elif lastname is not None: elif lastname is not None:
self.add_text('%s ' % (' '*len(lastname))) self.add_text('%s ' % (' ' * len(lastname)))
self.add_text(production.astext() + self.nl) self.add_text(production.astext() + self.nl)
self.end_state(wrap=False) self.end_state(wrap=False)
raise nodes.SkipNode raise nodes.SkipNode
@ -657,7 +657,7 @@ class TextTranslator(nodes.NodeVisitor):
# type: (unicode) -> None # type: (unicode) -> None
out = ['+'] # type: List[unicode] out = ['+'] # type: List[unicode]
for width in realwidths: for width in realwidths:
out.append(char * (width+2)) out.append(char * (width + 2))
out.append('+') out.append('+')
self.add_text(''.join(out) + self.nl) self.add_text(''.join(out) + self.nl)

View File

@ -34,7 +34,7 @@ def checker(*suffixes, **kwds):
# this one is a byte regex since it is applied before decoding # this one is a byte regex since it is applied before decoding
coding_re = re.compile(br'coding[:=]\s*([-\w.]+)') coding_re = re.compile(br'coding[:=]\s*([-\w.]+)')
uni_coding_re = re.compile(r'^#.*coding[:=]\s*([-\w.]+).*') uni_coding_re = re.compile(r'^#.*coding[:=]\s*([-\w.]+).*')
name_mail_re = r'[\w ]+(<.*?>)?' name_mail_re = r'[\w ]+(<.*?>)?'
@ -44,9 +44,9 @@ copyright_re = re.compile(r'^ :copyright: Copyright 200\d(-20\d\d)? '
license_re = re.compile(r" :license: (.*?).\n") license_re = re.compile(r" :license: (.*?).\n")
copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' % copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
(name_mail_re, name_mail_re)) (name_mail_re, name_mail_re))
not_ix_re = re.compile(r'\bnot\s+\S+?\s+i[sn]\s\S+') not_ix_re = re.compile(r'\bnot\s+\S+?\s+i[sn]\s\S+')
is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b') is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b')
noqa_re = re.compile(r'#\s+NOQA\s*$', re.I) noqa_re = re.compile(r'#\s+NOQA\s*$', re.I)
misspellings = ["developement", "adress", # ALLOW-MISSPELLING misspellings = ["developement", "adress", # ALLOW-MISSPELLING
"verificate", "informations"] # ALLOW-MISSPELLING "verificate", "informations"] # ALLOW-MISSPELLING
@ -64,7 +64,7 @@ def decode_source(fn, lines):
decoded_lines.append(line.decode(encoding)) decoded_lines.append(line.decode(encoding))
except UnicodeDecodeError as err: except UnicodeDecodeError as err:
raise UnicodeError("%s:%d: not decodable: %s\n Line: %r" % raise UnicodeError("%s:%d: not decodable: %s\n Line: %r" %
(fn, lno+1, err, line)) (fn, lno + 1, err, line))
except LookupError as err: except LookupError as err:
raise LookupError("unknown encoding: %s" % encoding) raise LookupError("unknown encoding: %s" % encoding)
return decoded_lines return decoded_lines
@ -85,14 +85,14 @@ def check_style(fn, lines):
if noqa_re.search(line): if noqa_re.search(line):
continue continue
if len(line.rstrip('\n')) > 95: if len(line.rstrip('\n')) > 95:
yield lno+1, "line too long" yield lno + 1, "line too long"
if line.strip().startswith('#'): if line.strip().startswith('#'):
continue continue
# m = not_ix_re.search(line) # m = not_ix_re.search(line)
# if m: # if m:
# yield lno+1, '"' + m.group() + '"' # yield lno+1, '"' + m.group() + '"'
if is_const_re.search(line): if is_const_re.search(line):
yield lno+1, 'using == None/True/False' yield lno + 1, 'using == None/True/False'
@checker('.py', only_pkg=True) @checker('.py', only_pkg=True)
@ -119,11 +119,11 @@ def check_fileheader(fn, lines):
if l == '"""\n': if l == '"""\n':
# end of docstring # end of docstring
if lno <= 4: if lno <= 4:
yield lno+c, "missing module name in docstring" yield lno + c, "missing module name in docstring"
break break
if l != '\n' and l[:4] != ' ' and docopen: if l != '\n' and l[:4] != ' ' and docopen:
yield lno+c, "missing correct docstring indentation" yield lno + c, "missing correct docstring indentation"
if lno == 2: if lno == 2:
# if not in package, don't check the module name # if not in package, don't check the module name
@ -148,10 +148,10 @@ def check_fileheader(fn, lines):
yield 0, "no correct license info" yield 0, "no correct license info"
ci = -3 ci = -3
copyright = llist[ci:ci+1] copyright = llist[ci:ci + 1]
while copyright and copyright_2_re.match(copyright[0]): while copyright and copyright_2_re.match(copyright[0]):
ci -= 1 ci -= 1
copyright = llist[ci:ci+1] copyright = llist[ci:ci + 1]
if not copyright or not copyright_re.match(copyright[0]): if not copyright or not copyright_re.match(copyright[0]):
yield 0, "no correct copyright info" yield 0, "no correct copyright info"
@ -160,12 +160,12 @@ def check_fileheader(fn, lines):
def check_whitespace_and_spelling(fn, lines): def check_whitespace_and_spelling(fn, lines):
for lno, line in enumerate(lines): for lno, line in enumerate(lines):
if '\t' in line: if '\t' in line:
yield lno+1, "OMG TABS!!!1 " yield lno + 1, "OMG TABS!!!1 "
if line[:-1].rstrip(' \t') != line[:-1]: if line[:-1].rstrip(' \t') != line[:-1]:
yield lno+1, "trailing whitespace" yield lno + 1, "trailing whitespace"
for word in misspellings: for word in misspellings:
if word in line and 'ALLOW-MISSPELLING' not in line: if word in line and 'ALLOW-MISSPELLING' not in line:
yield lno+1, '"%s" used' % word yield lno + 1, '"%s" used' % word
bad_tags = ['<u>', '<s>', '<strike>', '<center>', '<font'] bad_tags = ['<u>', '<s>', '<strike>', '<center>', '<font']
@ -176,7 +176,7 @@ def check_xhtml(fn, lines):
for lno, line in enumerate(lines): for lno, line in enumerate(lines):
for bad_tag in bad_tags: for bad_tag in bad_tags:
if bad_tag in line: if bad_tag in line:
yield lno+1, "used " + bad_tag yield lno + 1, "used " + bad_tag
def main(argv): def main(argv):

View File

@ -35,8 +35,8 @@ def fold(jsonData, splitter):
lines.append(' ' + code) lines.append(' ' + code)
break break
index = code.index(splitter, 70) index = code.index(splitter, 70)
lines.append(' ' + code[:index+len(splitter)]) lines.append(' ' + code[:index + len(splitter)])
code = code[index+len(splitter):] code = code[index + len(splitter):]
lines[0] = lines[0][8:] lines[0] = lines[0][8:]
return '\n'.join(lines) return '\n'.join(lines)

View File

@ -55,9 +55,9 @@ if sys.version_info >= (3, 0):
else: else:
tokens = tokenize.tokenize tokens = tokenize.tokenize
verbose = 0 verbose = 0
recurse = 0 recurse = 0
dryrun = 0 dryrun = 0
makebackup = True makebackup = True
@ -160,7 +160,7 @@ def _rstrip(line, JUNK='\n \t'):
""" """
i = len(line) i = len(line)
while i > 0 and line[i-1] in JUNK: while i > 0 and line[i - 1] in JUNK:
i -= 1 i -= 1
return line[:i] return line[:i]
@ -204,9 +204,9 @@ class Reindenter:
# we see a line with *something* on it. # we see a line with *something* on it.
i = stats[0][0] i = stats[0][0]
after.extend(lines[1:i]) after.extend(lines[1:i])
for i in range(len(stats)-1): for i in range(len(stats) - 1):
thisstmt, thislevel = stats[i] thisstmt, thislevel = stats[i]
nextstmt = stats[i+1][0] nextstmt = stats[i + 1][0]
have = getlspace(lines[thisstmt]) have = getlspace(lines[thisstmt])
want = thislevel * 4 want = thislevel * 4
if want < 0: if want < 0:
@ -218,7 +218,7 @@ class Reindenter:
want = have2want.get(have, -1) want = have2want.get(have, -1)
if want < 0: if want < 0:
# Then it probably belongs to the next real stmt. # Then it probably belongs to the next real stmt.
for j in range(i+1, len(stats)-1): for j in range(i + 1, len(stats) - 1):
jline, jlevel = stats[j] jline, jlevel = stats[j]
if jlevel >= 0: if jlevel >= 0:
if have == getlspace(lines[jline]): if have == getlspace(lines[jline]):
@ -228,10 +228,10 @@ class Reindenter:
# comment like this one, # comment like this one,
# in which case we should shift it like its base # in which case we should shift it like its base
# line got shifted. # line got shifted.
for j in range(i-1, -1, -1): for j in range(i - 1, -1, -1):
jline, jlevel = stats[j] jline, jlevel = stats[j]
if jlevel >= 0: if jlevel >= 0:
want = (have + getlspace(after[jline-1]) - want = (have + getlspace(after[jline - 1]) -
getlspace(lines[jline])) getlspace(lines[jline]))
break break
if want < 0: if want < 0: