Merge pull request #5787 from tk0miya/refactor_uprefix

refactor: Remove u-prefix from strings
This commit is contained in:
Takeshi KOMIYA 2018-12-16 21:47:17 +09:00 committed by GitHub
commit 42140fae39
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
88 changed files with 1125 additions and 1145 deletions

View File

@ -50,22 +50,22 @@ logger = logging.getLogger(__name__)
# output but that may be customized by (re-)setting module attributes, # output but that may be customized by (re-)setting module attributes,
# e.g. from conf.py. # e.g. from conf.py.
COVERPAGE_NAME = u'epub-cover.xhtml' COVERPAGE_NAME = 'epub-cover.xhtml'
TOCTREE_TEMPLATE = u'toctree-l%d' TOCTREE_TEMPLATE = 'toctree-l%d'
LINK_TARGET_TEMPLATE = u' [%(uri)s]' LINK_TARGET_TEMPLATE = ' [%(uri)s]'
FOOTNOTE_LABEL_TEMPLATE = u'#%d' FOOTNOTE_LABEL_TEMPLATE = '#%d'
FOOTNOTES_RUBRIC_NAME = u'Footnotes' FOOTNOTES_RUBRIC_NAME = 'Footnotes'
CSS_LINK_TARGET_CLASS = u'link-target' CSS_LINK_TARGET_CLASS = 'link-target'
# XXX These strings should be localized according to epub_language # XXX These strings should be localized according to epub_language
GUIDE_TITLES = { GUIDE_TITLES = {
'toc': u'Table of Contents', 'toc': 'Table of Contents',
'cover': u'Cover' 'cover': 'Cover'
} }
MEDIA_TYPES = { MEDIA_TYPES = {
@ -721,7 +721,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
epub_filename = path.join(outdir, outname) epub_filename = path.join(outdir, outname)
with ZipFile(epub_filename, 'w', ZIP_DEFLATED) as epub: with ZipFile(epub_filename, 'w', ZIP_DEFLATED) as epub:
epub.write(path.join(outdir, 'mimetype'), 'mimetype', ZIP_STORED) epub.write(path.join(outdir, 'mimetype'), 'mimetype', ZIP_STORED)
for filename in [u'META-INF/container.xml', u'content.opf', u'toc.ncx']: for filename in ['META-INF/container.xml', 'content.opf', 'toc.ncx']:
epub.write(path.join(outdir, filename), filename, ZIP_DEFLATED) epub.write(path.join(outdir, filename), filename, ZIP_DEFLATED)
for filename in self.files: for filename in self.files:
epub.write(path.join(outdir, filename), filename, ZIP_DEFLATED) epub.write(path.join(outdir, filename), filename, ZIP_DEFLATED)

View File

@ -51,8 +51,8 @@ THEME_WRITING_MODES = {
DOCTYPE = '''<!DOCTYPE html>''' DOCTYPE = '''<!DOCTYPE html>'''
HTML_TAG = ( HTML_TAG = (
u'<html xmlns="http://www.w3.org/1999/xhtml" ' '<html xmlns="http://www.w3.org/1999/xhtml" '
u'xmlns:epub="http://www.idpf.org/2007/ops">' 'xmlns:epub="http://www.idpf.org/2007/ops">'
) )

View File

@ -193,8 +193,8 @@ class BuildInfo:
def __init__(self, config=None, tags=None, config_categories=[]): def __init__(self, config=None, tags=None, config_categories=[]):
# type: (Config, Tags, List[str]) -> None # type: (Config, Tags, List[str]) -> None
self.config_hash = u'' self.config_hash = ''
self.tags_hash = u'' self.tags_hash = ''
if config: if config:
values = dict((c.name, c.value) for c in config.filter(config_categories)) values = dict((c.name, c.value) for c in config.filter(config_categories))
@ -1309,7 +1309,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
# There are related codes in inline_all_toctres() and # There are related codes in inline_all_toctres() and
# HTMLTranslter#add_fignumber(). # HTMLTranslter#add_fignumber().
new_fignumbers = {} # type: Dict[str, Dict[str, Tuple[int, ...]]] new_fignumbers = {} # type: Dict[str, Dict[str, Tuple[int, ...]]]
# {u'foo': {'figure': {'id2': (2,), 'id1': (1,)}}, u'bar': {'figure': {'id1': (3,)}}} # {'foo': {'figure': {'id2': (2,), 'id1': (1,)}}, 'bar': {'figure': {'id1': (3,)}}}
for docname, fignumlist in self.env.toc_fignumbers.items(): for docname, fignumlist in self.env.toc_fignumbers.items():
for figtype, fignums in fignumlist.items(): for figtype, fignums in fignumlist.items():
alias = "%s/%s" % (docname, figtype) alias = "%s/%s" % (docname, figtype)
@ -1631,7 +1631,7 @@ def setup(app):
app.add_config_value('html_sidebars', {}, 'html') app.add_config_value('html_sidebars', {}, 'html')
app.add_config_value('html_additional_pages', {}, 'html') app.add_config_value('html_additional_pages', {}, 'html')
app.add_config_value('html_domain_indices', True, 'html', [list]) app.add_config_value('html_domain_indices', True, 'html', [list])
app.add_config_value('html_add_permalinks', u'\u00B6', 'html') app.add_config_value('html_add_permalinks', '\u00B6', 'html')
app.add_config_value('html_use_index', True, 'html') app.add_config_value('html_use_index', True, 'html')
app.add_config_value('html_split_index', False, 'html') app.add_config_value('html_split_index', False, 'html')
app.add_config_value('html_copy_source', True, 'html') app.add_config_value('html_copy_source', True, 'html')

View File

@ -282,8 +282,8 @@ class LaTeXBuilder(Builder):
# fresh document # fresh document
new_tree = new_document('<latex output>') new_tree = new_document('<latex output>')
new_sect = nodes.section() new_sect = nodes.section()
new_sect += nodes.title(u'<Set title in conf.py>', new_sect += nodes.title('<Set title in conf.py>',
u'<Set title in conf.py>') '<Set title in conf.py>')
new_tree += new_sect new_tree += new_sect
for node in tree.traverse(addnodes.toctree): for node in tree.traverse(addnodes.toctree):
new_sect += node new_sect += node

View File

@ -119,7 +119,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
for (key, group) in index: for (key, group) in index:
for title, (refs, subitems, key_) in group: for title, (refs, subitems, key_) in group:
keywords.extend(self.build_keywords(title, refs, subitems)) keywords.extend(self.build_keywords(title, refs, subitems))
keywords = u'\n'.join(keywords) # type: ignore keywords = '\n'.join(keywords) # type: ignore
# it seems that the "namespace" may not contain non-alphanumeric # it seems that the "namespace" may not contain non-alphanumeric
# characters, and more than one successive dot, or leading/trailing # characters, and more than one successive dot, or leading/trailing
@ -192,7 +192,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
link = node['refuri'] link = node['refuri']
title = html.escape(node.astext()).replace('"', '&quot;') title = html.escape(node.astext()).replace('"', '&quot;')
item = section_template % {'title': title, 'ref': link} item = section_template % {'title': title, 'ref': link}
item = u' ' * 4 * indentlevel + item item = ' ' * 4 * indentlevel + item
parts.append(item.encode('ascii', 'xmlcharrefreplace').decode()) parts.append(item.encode('ascii', 'xmlcharrefreplace').decode())
elif isinstance(node, nodes.bullet_list): elif isinstance(node, nodes.bullet_list):
for subnode in node: for subnode in node:

View File

@ -148,8 +148,8 @@ class TexinfoBuilder(Builder):
# fresh document # fresh document
new_tree = new_document('<texinfo output>') new_tree = new_document('<texinfo output>')
new_sect = nodes.section() new_sect = nodes.section()
new_sect += nodes.title(u'<Set title in conf.py>', new_sect += nodes.title('<Set title in conf.py>',
u'<Set title in conf.py>') '<Set title in conf.py>')
new_tree += new_sect new_tree += new_sect
for node in tree.traverse(addnodes.toctree): for node in tree.traverse(addnodes.toctree):
new_sect += node new_sect += node

View File

@ -457,13 +457,13 @@ def generate(d, overwrite=True, silent=False, templatedir=None):
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build' d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
# use binary mode, to avoid writing \r\n on Windows # use binary mode, to avoid writing \r\n on Windows
write_file(path.join(d['path'], 'Makefile'), write_file(path.join(d['path'], 'Makefile'),
template.render(makefile_template, d), u'\n') template.render(makefile_template, d), '\n')
if d['batchfile'] is True: if d['batchfile'] is True:
d['rsrcdir'] = d['sep'] and 'source' or '.' d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build' d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
write_file(path.join(d['path'], 'make.bat'), write_file(path.join(d['path'], 'make.bat'),
template.render(batchfile_template, d), u'\r\n') template.render(batchfile_template, d), '\r\n')
if silent: if silent:
return return

View File

@ -111,7 +111,7 @@ class Config:
'language': (None, 'env', string_classes), 'language': (None, 'env', string_classes),
'locale_dirs': (['locales'], 'env', []), 'locale_dirs': (['locales'], 'env', []),
'figure_language_filename': (u'{root}.{language}{ext}', 'env', [str]), 'figure_language_filename': ('{root}.{language}{ext}', 'env', [str]),
'master_doc': ('index', 'env', []), 'master_doc': ('index', 'env', []),
'source_suffix': ({'.rst': 'restructuredtext'}, 'env', Any), 'source_suffix': ({'.rst': 'restructuredtext'}, 'env', Any),
@ -497,7 +497,7 @@ def check_unicode(config):
if isinstance(value, bytes) and nonascii_re.search(value): if isinstance(value, bytes) and nonascii_re.search(value):
logger.warning(__('the config value %r is set to a string with non-ASCII ' logger.warning(__('the config value %r is set to a string with non-ASCII '
'characters; this can lead to Unicode errors occurring. ' 'characters; this can lead to Unicode errors occurring. '
'Please use Unicode strings, e.g. %r.'), name, u'Content') 'Please use Unicode strings, e.g. %r.'), name, 'Content')
def check_primary_domain(app, config): def check_primary_domain(app, config):

View File

@ -128,7 +128,7 @@ class CodeBlock(SphinxDirective):
def run(self): def run(self):
# type: () -> List[nodes.Node] # type: () -> List[nodes.Node]
document = self.state.document document = self.state.document
code = u'\n'.join(self.content) code = '\n'.join(self.content)
location = self.state_machine.get_source_and_line(self.lineno) location = self.state_machine.get_source_and_line(self.lineno)
linespec = self.options.get('emphasize-lines') linespec = self.options.get('emphasize-lines')

View File

@ -174,7 +174,7 @@ class CObject(ObjectDescription):
ctype, argname = arg.rsplit(' ', 1) ctype, argname = arg.rsplit(' ', 1)
self._parse_type(param, ctype) self._parse_type(param, ctype)
# separate by non-breaking space in the output # separate by non-breaking space in the output
param += nodes.emphasis(' ' + argname, u'\xa0' + argname) param += nodes.emphasis(' ' + argname, '\xa0' + argname)
except ValueError: except ValueError:
# no argument name given, only the type # no argument name given, only the type
self._parse_type(param, arg) self._parse_type(param, arg)

View File

@ -739,7 +739,7 @@ class ASTParenAttribute(ASTBase):
class ASTPointerLiteral(ASTBase): class ASTPointerLiteral(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
return u'nullptr' return 'nullptr'
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -756,9 +756,9 @@ class ASTBooleanLiteral(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
if self.value: if self.value:
return u'true' return 'true'
else: else:
return u'false' return 'false'
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -882,20 +882,20 @@ class ASTFoldExpr(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
res = [u'('] res = ['(']
if self.leftExpr: if self.leftExpr:
res.append(transform(self.leftExpr)) res.append(transform(self.leftExpr))
res.append(u' ') res.append(' ')
res.append(transform(self.op)) res.append(transform(self.op))
res.append(u' ') res.append(' ')
res.append(u'...') res.append('...')
if self.rightExpr: if self.rightExpr:
res.append(u' ') res.append(' ')
res.append(transform(self.op)) res.append(transform(self.op))
res.append(u' ') res.append(' ')
res.append(transform(self.rightExpr)) res.append(transform(self.rightExpr))
res.append(u')') res.append(')')
return u''.join(res) return ''.join(res)
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -937,7 +937,7 @@ class ASTBinOpExpr(ASTBase):
res.append(self.ops[i - 1]) res.append(self.ops[i - 1])
res.append(' ') res.append(' ')
res.append(transform(self.exprs[i])) res.append(transform(self.exprs[i]))
return u''.join(res) return ''.join(res)
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -947,7 +947,7 @@ class ASTBinOpExpr(ASTBase):
res.append(_id_operator_v2[self.ops[i]]) res.append(_id_operator_v2[self.ops[i]])
res.append(self.exprs[i].get_id(version)) res.append(self.exprs[i].get_id(version))
res.append(self.exprs[-1].get_id(version)) res.append(self.exprs[-1].get_id(version))
return u''.join(res) return ''.join(res)
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
self.exprs[0].describe_signature(signode, mode, env, symbol) self.exprs[0].describe_signature(signode, mode, env, symbol)
@ -974,7 +974,7 @@ class ASTAssignmentExpr(ASTBase):
res.append(self.ops[i - 1]) res.append(self.ops[i - 1])
res.append(' ') res.append(' ')
res.append(transform(self.exprs[i])) res.append(transform(self.exprs[i]))
return u''.join(res) return ''.join(res)
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -983,7 +983,7 @@ class ASTAssignmentExpr(ASTBase):
res.append(_id_operator_v2[self.ops[i]]) res.append(_id_operator_v2[self.ops[i]])
res.append(self.exprs[i].get_id(version)) res.append(self.exprs[i].get_id(version))
res.append(self.exprs[-1].get_id(version)) res.append(self.exprs[-1].get_id(version))
return u''.join(res) return ''.join(res)
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
self.exprs[0].describe_signature(signode, mode, env, symbol) self.exprs[0].describe_signature(signode, mode, env, symbol)
@ -1001,11 +1001,11 @@ class ASTCastExpr(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
res = [u'('] res = ['(']
res.append(transform(self.typ)) res.append(transform(self.typ))
res.append(u')') res.append(')')
res.append(transform(self.expr)) res.append(transform(self.expr))
return u''.join(res) return ''.join(res)
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -1158,7 +1158,7 @@ class ASTNewExpr(ASTBase):
first = False first = False
res.append(transform(e)) res.append(transform(e))
res.append(self.initType) res.append(self.initType)
return u''.join(res) return ''.join(res)
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -1177,7 +1177,7 @@ class ASTNewExpr(ASTBase):
assert False assert False
else: else:
res.append('E') res.append('E')
return u''.join(res) return ''.join(res)
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
if self.rooted: if self.rooted:
@ -1217,7 +1217,7 @@ class ASTDeleteExpr(ASTBase):
if self.array: if self.array:
res.append('[] ') res.append('[] ')
res.append(transform(self.expr)) res.append(transform(self.expr))
return u''.join(res) return ''.join(res)
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -1251,7 +1251,7 @@ class ASTExplicitCast(ASTBase):
res.append('>(') res.append('>(')
res.append(transform(self.expr)) res.append(transform(self.expr))
res.append(')') res.append(')')
return u''.join(res) return ''.join(res)
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -1296,15 +1296,15 @@ class ASTPostfixCallExpr(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
res = [u'('] res = ['(']
first = True first = True
for e in self.exprs: for e in self.exprs:
if not first: if not first:
res.append(u', ') res.append(', ')
first = False first = False
res.append(transform(e)) res.append(transform(e))
res.append(u')') res.append(')')
return u''.join(res) return ''.join(res)
def get_id(self, idPrefix, version): def get_id(self, idPrefix, version):
# type: (str, int) -> str # type: (str, int) -> str
@ -1312,7 +1312,7 @@ class ASTPostfixCallExpr(ASTBase):
for e in self.exprs: for e in self.exprs:
res.append(e.get_id(version)) res.append(e.get_id(version))
res.append('E') res.append('E')
return u''.join(res) return ''.join(res)
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
signode.append(nodes.Text('(')) signode.append(nodes.Text('('))
@ -1331,7 +1331,7 @@ class ASTPostfixArray(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
return u'[' + transform(self.expr) + ']' return '[' + transform(self.expr) + ']'
def get_id(self, idPrefix, version): def get_id(self, idPrefix, version):
# type: (str, int) -> str # type: (str, int) -> str
@ -1346,7 +1346,7 @@ class ASTPostfixArray(ASTBase):
class ASTPostfixInc(ASTBase): class ASTPostfixInc(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
return u'++' return '++'
def get_id(self, idPrefix, version): def get_id(self, idPrefix, version):
# type: (str, int) -> str # type: (str, int) -> str
@ -1359,7 +1359,7 @@ class ASTPostfixInc(ASTBase):
class ASTPostfixDec(ASTBase): class ASTPostfixDec(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
return u'--' return '--'
def get_id(self, idPrefix, version): def get_id(self, idPrefix, version):
# type: (str, int) -> str # type: (str, int) -> str
@ -1375,7 +1375,7 @@ class ASTPostfixMember(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
return u'.' + transform(self.name) return '.' + transform(self.name)
def get_id(self, idPrefix, version): def get_id(self, idPrefix, version):
# type: (str, int) -> str # type: (str, int) -> str
@ -1392,7 +1392,7 @@ class ASTPostfixMemberOfPointer(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
return u'->' + transform(self.name) return '->' + transform(self.name)
def get_id(self, idPrefix, version): def get_id(self, idPrefix, version):
# type: (str, int) -> str # type: (str, int) -> str
@ -1414,7 +1414,7 @@ class ASTPostfixExpr(ASTBase):
res = [transform(self.prefix)] res = [transform(self.prefix)]
for p in self.postFixes: for p in self.postFixes:
res.append(transform(p)) res.append(transform(p))
return u''.join(res) return ''.join(res)
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -1493,7 +1493,7 @@ class ASTIdentifier(ASTBase):
return 'D0' return 'D0'
else: else:
if self.is_anon(): if self.is_anon():
return u'Ut%d_%s' % (len(self.identifier) - 1, self.identifier[1:]) return 'Ut%d_%s' % (len(self.identifier) - 1, self.identifier[1:])
else: else:
return text_type(len(self.identifier)) + self.identifier return text_type(len(self.identifier)) + self.identifier
@ -1505,7 +1505,7 @@ class ASTIdentifier(ASTBase):
def get_display_string(self): def get_display_string(self):
# type: () -> str # type: () -> str
return u"[anonymous]" if self.is_anon() else self.identifier return "[anonymous]" if self.is_anon() else self.identifier
def describe_signature(self, signode, mode, env, prefix, templateArgs, symbol): def describe_signature(self, signode, mode, env, prefix, templateArgs, symbol):
# type: (Any, str, BuildEnvironment, str, str, Symbol) -> None # type: (Any, str, BuildEnvironment, str, str, Symbol) -> None
@ -1789,9 +1789,9 @@ class ASTTemplateParams(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
res = [] res = []
res.append(u"template<") res.append("template<")
res.append(u", ".join(transform(a) for a in self.params)) res.append(", ".join(transform(a) for a in self.params))
res.append(u"> ") res.append("> ")
return ''.join(res) return ''.join(res)
def describe_signature(self, parentNode, mode, env, symbol, lineSpec=None): def describe_signature(self, parentNode, mode, env, symbol, lineSpec=None):
@ -1860,7 +1860,7 @@ class ASTTemplateIntroductionParameter(ASTBase):
# used for the implicit requires clause # used for the implicit requires clause
res = self.identifier.get_id(version) res = self.identifier.get_id(version)
if self.parameterPack: if self.parameterPack:
return u'sp' + res return 'sp' + res
else: else:
return res return res
@ -1944,14 +1944,14 @@ class ASTTemplateDeclarationPrefix(ASTBase):
res = [] res = []
for t in self.templates: for t in self.templates:
res.append(t.get_id(version)) res.append(t.get_id(version))
return u''.join(res) return ''.join(res)
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
res = [] res = []
for t in self.templates: for t in self.templates:
res.append(transform(t)) res.append(transform(t))
return u''.join(res) return ''.join(res)
def describe_signature(self, signode, mode, env, symbol, lineSpec): def describe_signature(self, signode, mode, env, symbol, lineSpec):
# type: (addnodes.desc_signature, str, BuildEnvironment, Symbol, bool) -> None # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol, bool) -> None
@ -2004,9 +2004,9 @@ class ASTOperatorBuildIn(ASTOperator):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
if self.op in ('new', 'new[]', 'delete', 'delete[]'): if self.op in ('new', 'new[]', 'delete', 'delete[]'):
return u'operator ' + self.op return 'operator ' + self.op
else: else:
return u'operator' + self.op return 'operator' + self.op
class ASTOperatorType(ASTOperator): class ASTOperatorType(ASTOperator):
@ -2017,13 +2017,13 @@ class ASTOperatorType(ASTOperator):
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
if version == 1: if version == 1:
return u'castto-%s-operator' % self.type.get_id(version) return 'castto-%s-operator' % self.type.get_id(version)
else: else:
return u'cv' + self.type.get_id(version) return 'cv' + self.type.get_id(version)
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
return u''.join(['operator ', transform(self.type)]) return ''.join(['operator ', transform(self.type)])
def get_name_no_template(self): def get_name_no_template(self):
# type: () -> str # type: () -> str
@ -2040,11 +2040,11 @@ class ASTOperatorLiteral(ASTOperator):
if version == 1: if version == 1:
raise NoOldIdError() raise NoOldIdError()
else: else:
return u'li' + self.identifier.get_id(version) return 'li' + self.identifier.get_id(version)
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
return u'operator""' + transform(self.identifier) return 'operator""' + transform(self.identifier)
############################################################################################## ##############################################################################################
@ -2062,10 +2062,10 @@ class ASTTemplateArgConstant(ASTBase):
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
if version == 1: if version == 1:
return text_type(self).replace(u' ', u'-') return text_type(self).replace(' ', '-')
if version == 2: if version == 2:
return u'X' + text_type(self) + u'E' return 'X' + text_type(self) + 'E'
return u'X' + self.value.get_id(version) + u'E' return 'X' + self.value.get_id(version) + 'E'
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
@ -2084,16 +2084,16 @@ class ASTTemplateArgs(ASTBase):
if version == 1: if version == 1:
res = [] res = []
res.append(':') res.append(':')
res.append(u'.'.join(a.get_id(version) for a in self.args)) res.append('.'.join(a.get_id(version) for a in self.args))
res.append(':') res.append(':')
return u''.join(res) return ''.join(res)
res = [] res = []
res.append('I') res.append('I')
for a in self.args: for a in self.args:
res.append(a.get_id(version)) res.append(a.get_id(version))
res.append('E') res.append('E')
return u''.join(res) return ''.join(res)
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
@ -2176,7 +2176,8 @@ class ASTNestedName(ASTBase):
if tt in _id_shorthands_v1: if tt in _id_shorthands_v1:
return _id_shorthands_v1[tt] return _id_shorthands_v1[tt]
else: else:
return u'::'.join(n.get_id(version) for n in self.names) return '::'.join(n.get_id(version) for n in self.names)
res = [] res = []
if len(self.names) > 1 or len(modifiers) > 0: if len(self.names) > 1 or len(modifiers) > 0:
res.append('N') res.append('N')
@ -2185,7 +2186,7 @@ class ASTNestedName(ASTBase):
res.append(n.get_id(version)) res.append(n.get_id(version))
if len(self.names) > 1 or len(modifiers) > 0: if len(self.names) > 1 or len(modifiers) > 0:
res.append('E') res.append('E')
return u''.join(res) return ''.join(res)
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
@ -2221,7 +2222,7 @@ class ASTNestedName(ASTBase):
if symbol.declaration.templatePrefix is not None: if symbol.declaration.templatePrefix is not None:
templateParams = symbol.declaration.templatePrefix.templates templateParams = symbol.declaration.templatePrefix.templates
iTemplateParams = 0 iTemplateParams = 0
templateParamsPrefix = u'' templateParamsPrefix = ''
prefix = '' prefix = ''
first = True first = True
names = self.names[:-1] if mode == 'lastIsName' else self.names names = self.names[:-1] if mode == 'lastIsName' else self.names
@ -2278,7 +2279,7 @@ class ASTTrailingTypeSpecFundamental(ASTBase):
res.append(_id_fundamental_v1[a]) res.append(_id_fundamental_v1[a])
else: else:
res.append(a) res.append(a)
return u'-'.join(res) return '-'.join(res)
if self.name not in _id_fundamental_v2: if self.name not in _id_fundamental_v2:
raise Exception( raise Exception(
@ -2314,7 +2315,7 @@ class ASTTrailingTypeSpecName(ASTBase):
res.append(self.prefix) res.append(self.prefix)
res.append(' ') res.append(' ')
res.append(transform(self.nestedName)) res.append(transform(self.nestedName))
return u''.join(res) return ''.join(res)
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
@ -2327,7 +2328,7 @@ class ASTTrailingTypeSpecName(ASTBase):
class ASTTrailingTypeSpecDecltypeAuto(ASTBase): class ASTTrailingTypeSpecDecltypeAuto(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
return u'decltype(auto)' return 'decltype(auto)'
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -2346,7 +2347,7 @@ class ASTTrailingTypeSpecDecltype(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
return u'decltype(' + transform(self.expr) + ')' return 'decltype(' + transform(self.expr) + ')'
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -2427,7 +2428,7 @@ class ASTParametersQualifiers(ASTBase):
res.append('O') res.append('O')
elif self.refQual == '&': elif self.refQual == '&':
res.append('R') res.append('R')
return u''.join(res) return ''.join(res)
def get_param_id(self, version): def get_param_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -2435,11 +2436,11 @@ class ASTParametersQualifiers(ASTBase):
if len(self.args) == 0: if len(self.args) == 0:
return '' return ''
else: else:
return u'__' + u'.'.join(a.get_id(version) for a in self.args) return '__' + '.'.join(a.get_id(version) for a in self.args)
if len(self.args) == 0: if len(self.args) == 0:
return 'v' return 'v'
else: else:
return u''.join(a.get_id(version) for a in self.args) return ''.join(a.get_id(version) for a in self.args)
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
@ -2469,7 +2470,7 @@ class ASTParametersQualifiers(ASTBase):
if self.initializer: if self.initializer:
res.append(' = ') res.append(' = ')
res.append(self.initializer) res.append(self.initializer)
return u''.join(res) return ''.join(res)
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
@ -2559,7 +2560,7 @@ class ASTDeclSpecsSimple(ASTBase):
res.append('volatile') res.append('volatile')
if self.const: if self.const:
res.append('const') res.append('const')
return u' '.join(res) return ' '.join(res)
def describe_signature(self, modifiers): def describe_signature(self, modifiers):
# type: (List[nodes.Node]) -> None # type: (List[nodes.Node]) -> None
@ -2615,14 +2616,14 @@ class ASTDeclSpecs(ASTBase):
res.append('V') res.append('V')
if self.allSpecs.const: if self.allSpecs.const:
res.append('C') res.append('C')
return u''.join(res) return ''.join(res)
res = [] res = []
if self.leftSpecs.volatile or self.rightSpecs.volatile: if self.leftSpecs.volatile or self.rightSpecs.volatile:
res.append('V') res.append('V')
if self.leftSpecs.const or self.rightSpecs.volatile: if self.leftSpecs.const or self.rightSpecs.volatile:
res.append('K') res.append('K')
res.append(self.trailingTypeSpec.get_id(version)) res.append(self.trailingTypeSpec.get_id(version))
return u''.join(res) return ''.join(res)
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
@ -2677,23 +2678,23 @@ class ASTArray(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
if self.size: if self.size:
return u'[' + transform(self.size) + ']' return '[' + transform(self.size) + ']'
else: else:
return u'[]' return '[]'
def get_id(self, version): def get_id(self, version):
# type: (int) -> str # type: (int) -> str
if version == 1: if version == 1:
return u'A' return 'A'
if version == 2: if version == 2:
if self.size: if self.size:
return u'A' + text_type(self.size) + u'_' return 'A' + text_type(self.size) + '_'
else: else:
return u'A_' return 'A_'
if self.size: if self.size:
return u'A' + self.size.get_id(version) + u'_' return 'A' + self.size.get_id(version) + '_'
else: else:
return u'A_' return 'A_'
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
_verify_description_mode(mode) _verify_description_mode(mode)
@ -2744,7 +2745,7 @@ class ASTDeclaratorPtr(ASTBase):
if self.next.require_space_after_declSpecs: if self.next.require_space_after_declSpecs:
res.append(' ') res.append(' ')
res.append(transform(self.next)) res.append(transform(self.next))
return u''.join(res) return ''.join(res)
def get_modifiers_id(self, version): def get_modifiers_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -2763,7 +2764,7 @@ class ASTDeclaratorPtr(ASTBase):
if self.const: if self.const:
res.append('C') res.append('C')
res.append(self.next.get_ptr_suffix_id(version)) res.append(self.next.get_ptr_suffix_id(version))
return u''.join(res) return ''.join(res)
res = [self.next.get_ptr_suffix_id(version)] res = [self.next.get_ptr_suffix_id(version)]
res.append('P') res.append('P')
@ -2771,7 +2772,7 @@ class ASTDeclaratorPtr(ASTBase):
res.append('V') res.append('V')
if self.const: if self.const:
res.append('C') res.append('C')
return u''.join(res) return ''.join(res)
def get_type_id(self, version, returnTypeId): def get_type_id(self, version, returnTypeId):
# type: (int, str) -> str # type: (int, str) -> str
@ -2782,7 +2783,7 @@ class ASTDeclaratorPtr(ASTBase):
if self.const: if self.const:
res.append('C') res.append('C')
res.append(returnTypeId) res.append(returnTypeId)
return self.next.get_type_id(version, returnTypeId=u''.join(res)) return self.next.get_type_id(version, returnTypeId=''.join(res))
def is_function_type(self): def is_function_type(self):
# type: () -> bool # type: () -> bool
@ -2845,7 +2846,7 @@ class ASTDeclaratorRef(ASTBase):
if len(self.attrs) > 0 and self.next.require_space_after_declSpecs: if len(self.attrs) > 0 and self.next.require_space_after_declSpecs:
res.append(' ') res.append(' ')
res.append(transform(self.next)) res.append(transform(self.next))
return u''.join(res) return ''.join(res)
def get_modifiers_id(self, version): def get_modifiers_id(self, version):
# type: (int) -> str # type: (int) -> str
@ -2858,15 +2859,15 @@ class ASTDeclaratorRef(ASTBase):
def get_ptr_suffix_id(self, version): def get_ptr_suffix_id(self, version):
# type: (int) -> str # type: (int) -> str
if version == 1: if version == 1:
return u'R' + self.next.get_ptr_suffix_id(version) return 'R' + self.next.get_ptr_suffix_id(version)
else: else:
return self.next.get_ptr_suffix_id(version) + u'R' return self.next.get_ptr_suffix_id(version) + 'R'
def get_type_id(self, version, returnTypeId): def get_type_id(self, version, returnTypeId):
# type: (int, str) -> str # type: (int, str) -> str
assert version >= 2 assert version >= 2
# ReturnType &next, so we are part of the return type of 'next # ReturnType &next, so we are part of the return type of 'next
return self.next.get_type_id(version, returnTypeId=u'R' + returnTypeId) return self.next.get_type_id(version, returnTypeId='R' + returnTypeId)
def is_function_type(self): def is_function_type(self):
# type: () -> bool # type: () -> bool
@ -2923,13 +2924,13 @@ class ASTDeclaratorParamPack(ASTBase):
if version == 1: if version == 1:
return 'Dp' + self.next.get_ptr_suffix_id(version) return 'Dp' + self.next.get_ptr_suffix_id(version)
else: else:
return self.next.get_ptr_suffix_id(version) + u'Dp' return self.next.get_ptr_suffix_id(version) + 'Dp'
def get_type_id(self, version, returnTypeId): def get_type_id(self, version, returnTypeId):
# type: (int, str) -> str # type: (int, str) -> str
assert version >= 2 assert version >= 2
# ReturnType... next, so we are part of the return type of 'next # ReturnType... next, so we are part of the return type of 'next
return self.next.get_type_id(version, returnTypeId=u'Dp' + returnTypeId) return self.next.get_type_id(version, returnTypeId='Dp' + returnTypeId)
def is_function_type(self): def is_function_type(self):
# type: () -> bool # type: () -> bool
@ -3002,7 +3003,7 @@ class ASTDeclaratorMemPtr(ASTBase):
raise NoOldIdError() raise NoOldIdError()
else: else:
raise NotImplementedError() raise NotImplementedError()
return self.next.get_ptr_suffix_id(version) + u'Dp' return self.next.get_ptr_suffix_id(version) + 'Dp'
def get_type_id(self, version, returnTypeId): def get_type_id(self, version, returnTypeId):
# type: (int, str) -> str # type: (int, str) -> str
@ -3150,7 +3151,7 @@ class ASTDeclaratorNameParamQual(ASTBase):
def get_ptr_suffix_id(self, version): # only the array specifiers def get_ptr_suffix_id(self, version): # only the array specifiers
# type: (int) -> str # type: (int) -> str
return u''.join(a.get_id(version) for a in self.arrayOps) return ''.join(a.get_id(version) for a in self.arrayOps)
def get_type_id(self, version, returnTypeId): def get_type_id(self, version, returnTypeId):
# type: (int, str) -> str # type: (int, str) -> str
@ -3166,7 +3167,7 @@ class ASTDeclaratorNameParamQual(ASTBase):
res.append('E') res.append('E')
else: else:
res.append(returnTypeId) res.append(returnTypeId)
return u''.join(res) return ''.join(res)
# ------------------------------------------------------------------------ # ------------------------------------------------------------------------
@ -3187,7 +3188,7 @@ class ASTDeclaratorNameParamQual(ASTBase):
res.append(transform(op)) res.append(transform(op))
if self.paramQual: if self.paramQual:
res.append(transform(self.paramQual)) res.append(transform(self.paramQual))
return u''.join(res) return ''.join(res)
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
@ -3206,7 +3207,7 @@ class ASTInitializer(ASTBase):
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
return u' = ' + transform(self.value) return ' = ' + transform(self.value)
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
@ -3262,7 +3263,7 @@ class ASTType(ASTBase):
res.append(self.declSpecs.get_id(version)) res.append(self.declSpecs.get_id(version))
res.append(self.decl.get_ptr_suffix_id(version)) res.append(self.decl.get_ptr_suffix_id(version))
res.append(self.decl.get_param_id(version)) res.append(self.decl.get_param_id(version))
return u''.join(res) return ''.join(res)
# other versions # other versions
res = [] res = []
if objectType: # needs the name if objectType: # needs the name
@ -3281,7 +3282,7 @@ class ASTType(ASTBase):
returnTypeId = self.declSpecs.get_id(version) returnTypeId = self.declSpecs.get_id(version)
typeId = self.decl.get_type_id(version, returnTypeId) typeId = self.decl.get_type_id(version, returnTypeId)
res.append(typeId) res.append(typeId)
return u''.join(res) return ''.join(res)
def _stringify(self, transform): def _stringify(self, transform):
# type: (Callable[[Any], str]) -> str # type: (Callable[[Any], str]) -> str
@ -3289,9 +3290,9 @@ class ASTType(ASTBase):
declSpecs = transform(self.declSpecs) declSpecs = transform(self.declSpecs)
res.append(declSpecs) res.append(declSpecs)
if self.decl.require_space_after_declSpecs() and len(declSpecs) > 0: if self.decl.require_space_after_declSpecs() and len(declSpecs) > 0:
res.append(u' ') res.append(' ')
res.append(transform(self.decl)) res.append(transform(self.decl))
return u''.join(res) return ''.join(res)
def get_type_declaration_prefix(self): def get_type_declaration_prefix(self):
# type: () -> str # type: () -> str
@ -3335,8 +3336,8 @@ class ASTTypeWithInit(ASTBase):
if objectType != 'member': if objectType != 'member':
return self.type.get_id(version, objectType) return self.type.get_id(version, objectType)
if version == 1: if version == 1:
return symbol.get_full_nested_name().get_id(version) + u'__' \ return (symbol.get_full_nested_name().get_id(version) + '__' +
+ self.type.get_id(version) self.type.get_id(version))
return symbol.get_full_nested_name().get_id(version) return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform): def _stringify(self, transform):
@ -3345,7 +3346,7 @@ class ASTTypeWithInit(ASTBase):
res.append(transform(self.type)) res.append(transform(self.type))
if self.init: if self.init:
res.append(transform(self.init)) res.append(transform(self.init))
return u''.join(res) return ''.join(res)
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
@ -3374,7 +3375,7 @@ class ASTTypeUsing(ASTBase):
if self.type: if self.type:
res.append(' = ') res.append(' = ')
res.append(transform(self.type)) res.append(transform(self.type))
return u''.join(res) return ''.join(res)
def get_type_declaration_prefix(self): def get_type_declaration_prefix(self):
# type: () -> str # type: () -> str
@ -3439,7 +3440,7 @@ class ASTBaseClass(ASTBase):
res.append(transform(self.name)) res.append(transform(self.name))
if self.pack: if self.pack:
res.append('...') res.append('...')
return u''.join(res) return ''.join(res)
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
@ -3481,7 +3482,7 @@ class ASTClass(ASTBase):
res.append(', ') res.append(', ')
first = False first = False
res.append(transform(b)) res.append(transform(b))
return u''.join(res) return ''.join(res)
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
@ -3542,7 +3543,7 @@ class ASTEnum(ASTBase):
if self.underlyingType: if self.underlyingType:
res.append(' : ') res.append(' : ')
res.append(transform(self.underlyingType)) res.append(transform(self.underlyingType))
return u''.join(res) return ''.join(res)
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
@ -3573,7 +3574,7 @@ class ASTEnumerator(ASTBase):
res.append(transform(self.name)) res.append(transform(self.name))
if self.init: if self.init:
res.append(transform(self.init)) res.append(transform(self.init))
return u''.join(res) return ''.join(res)
def describe_signature(self, signode, mode, env, symbol): def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
@ -3635,7 +3636,7 @@ class ASTDeclaration(ASTBase):
if self.templatePrefix: if self.templatePrefix:
res.append(self.templatePrefix.get_id(version)) res.append(self.templatePrefix.get_id(version))
res.append(self.declaration.get_id(version, self.objectType, self.symbol)) res.append(self.declaration.get_id(version, self.objectType, self.symbol))
return u''.join(res) return ''.join(res)
def get_newest_id(self): def get_newest_id(self):
# type: () -> str # type: () -> str
@ -3646,11 +3647,11 @@ class ASTDeclaration(ASTBase):
res = [] res = []
if self.visibility and self.visibility != "public": if self.visibility and self.visibility != "public":
res.append(self.visibility) res.append(self.visibility)
res.append(u' ') res.append(' ')
if self.templatePrefix: if self.templatePrefix:
res.append(transform(self.templatePrefix)) res.append(transform(self.templatePrefix))
res.append(transform(self.declaration)) res.append(transform(self.declaration))
return u''.join(res) return ''.join(res)
def describe_signature(self, signode, mode, env, options): def describe_signature(self, signode, mode, env, options):
# type: (addnodes.desc_signature, str, BuildEnvironment, Dict) -> None # type: (addnodes.desc_signature, str, BuildEnvironment, Dict) -> None
@ -5348,7 +5349,7 @@ class DefinitionParser:
elif self.skip_word_and_ws('double'): elif self.skip_word_and_ws('double'):
elements.append('double') elements.append('double')
if len(elements) > 0: if len(elements) > 0:
return ASTTrailingTypeSpecFundamental(u' '.join(elements)) return ASTTrailingTypeSpecFundamental(' '.join(elements))
# decltype # decltype
self.skip_ws() self.skip_ws()
@ -5462,7 +5463,7 @@ class DefinitionParser:
if not initializer: if not initializer:
self.fail( self.fail(
'Expected "%s" in initializer-specifier.' 'Expected "%s" in initializer-specifier.'
% u'" or "'.join(valid)) % '" or "'.join(valid))
return ASTParametersQualifiers( return ASTParametersQualifiers(
args, volatile, const, refQual, exceptionSpec, override, final, args, volatile, const, refQual, exceptionSpec, override, final,

View File

@ -102,7 +102,7 @@ class IndexEntries:
# using specified category key to sort # using specified category key to sort
key = category_key key = category_key
lckey = unicodedata.normalize('NFD', key.lower()) lckey = unicodedata.normalize('NFD', key.lower())
if lckey.startswith(u'\N{RIGHT-TO-LEFT MARK}'): if lckey.startswith('\N{RIGHT-TO-LEFT MARK}'):
lckey = lckey[1:] lckey = lckey[1:]
if lckey[0:1].isalpha() or lckey.startswith('_'): if lckey[0:1].isalpha() or lckey.startswith('_'):
lckey = chr(127) + lckey lckey = chr(127) + lckey
@ -149,7 +149,7 @@ class IndexEntries:
v[1] = sorted((si, se) for (si, (se, void, void)) in v[1].items()) v[1] = sorted((si, se) for (si, (se, void, void)) in v[1].items())
if v[2] is None: if v[2] is None:
# now calculate the key # now calculate the key
if k.startswith(u'\N{RIGHT-TO-LEFT MARK}'): if k.startswith('\N{RIGHT-TO-LEFT MARK}'):
k = k[1:] k = k[1:]
letter = unicodedata.normalize('NFD', k[0])[0].upper() letter = unicodedata.normalize('NFD', k[0])[0].upper()
if letter.isalpha() or letter == '_': if letter.isalpha() or letter == '_':

View File

@ -220,7 +220,7 @@ class Documenter:
#: generated directive name #: generated directive name
objtype = 'object' objtype = 'object'
#: indentation by which to indent the directive content #: indentation by which to indent the directive content
content_indent = u' ' content_indent = ' '
#: priority if multiple documenters return True from can_document_member #: priority if multiple documenters return True from can_document_member
priority = 0 priority = 0
#: order if autodoc_member_order is set to 'groupwise' #: order if autodoc_member_order is set to 'groupwise'
@ -241,7 +241,7 @@ class Documenter:
"""Called to see if a member can be documented by this documenter.""" """Called to see if a member can be documented by this documenter."""
raise NotImplementedError('must be implemented in subclasses') raise NotImplementedError('must be implemented in subclasses')
def __init__(self, directive, name, indent=u''): def __init__(self, directive, name, indent=''):
# type: (DocumenterBridge, str, str) -> None # type: (DocumenterBridge, str, str) -> None
self.directive = directive self.directive = directive
self.env = directive.env # type: BuildEnvironment self.env = directive.env # type: BuildEnvironment
@ -426,14 +426,14 @@ class Documenter:
directive = getattr(self, 'directivetype', self.objtype) directive = getattr(self, 'directivetype', self.objtype)
name = self.format_name() name = self.format_name()
sourcename = self.get_sourcename() sourcename = self.get_sourcename()
self.add_line(u'.. %s:%s:: %s%s' % (domain, directive, name, sig), self.add_line('.. %s:%s:: %s%s' % (domain, directive, name, sig),
sourcename) sourcename)
if self.options.noindex: if self.options.noindex:
self.add_line(u' :noindex:', sourcename) self.add_line(' :noindex:', sourcename)
if self.objpath: if self.objpath:
# Be explicit about the module, this is necessary since .. class:: # Be explicit about the module, this is necessary since .. class::
# etc. don't support a prepended module name # etc. don't support a prepended module name
self.add_line(u' :module: %s' % self.modname, sourcename) self.add_line(' :module: %s' % self.modname, sourcename)
def get_doc(self, encoding=None, ignore=1): def get_doc(self, encoding=None, ignore=1):
# type: (str, int) -> List[List[str]] # type: (str, int) -> List[List[str]]
@ -468,8 +468,8 @@ class Documenter:
sys.getfilesystemencoding(), 'replace') sys.getfilesystemencoding(), 'replace')
else: else:
filename = self.analyzer.srcname filename = self.analyzer.srcname
return u'%s:docstring of %s' % (filename, self.fullname) return '%s:docstring of %s' % (filename, self.fullname)
return u'docstring of %s' % self.fullname return 'docstring of %s' % self.fullname
def add_content(self, more_content, no_docstring=False): def add_content(self, more_content, no_docstring=False):
# type: (Any, bool) -> None # type: (Any, bool) -> None
@ -743,14 +743,14 @@ class Documenter:
# make sure that the result starts with an empty line. This is # make sure that the result starts with an empty line. This is
# necessary for some situations where another directive preprocesses # necessary for some situations where another directive preprocesses
# reST and no starting newline is present # reST and no starting newline is present
self.add_line(u'', sourcename) self.add_line('', sourcename)
# format the object's signature, if any # format the object's signature, if any
sig = self.format_signature() sig = self.format_signature()
# generate the directive header and options, if applicable # generate the directive header and options, if applicable
self.add_directive_header(sig) self.add_directive_header(sig)
self.add_line(u'', sourcename) self.add_line('', sourcename)
# e.g. the module directive doesn't have content # e.g. the module directive doesn't have content
self.indent += self.content_indent self.indent += self.content_indent
@ -767,7 +767,7 @@ class ModuleDocumenter(Documenter):
Specialized Documenter subclass for modules. Specialized Documenter subclass for modules.
""" """
objtype = 'module' objtype = 'module'
content_indent = u'' content_indent = ''
titles_allowed = True titles_allowed = True
option_spec = { option_spec = {
@ -815,13 +815,11 @@ class ModuleDocumenter(Documenter):
# add some module-specific options # add some module-specific options
if self.options.synopsis: if self.options.synopsis:
self.add_line( self.add_line(' :synopsis: ' + self.options.synopsis, sourcename)
u' :synopsis: ' + self.options.synopsis, sourcename)
if self.options.platform: if self.options.platform:
self.add_line( self.add_line(' :platform: ' + self.options.platform, sourcename)
u' :platform: ' + self.options.platform, sourcename)
if self.options.deprecated: if self.options.deprecated:
self.add_line(u' :deprecated:', sourcename) self.add_line(' :deprecated:', sourcename)
def get_object_members(self, want_all): def get_object_members(self, want_all):
# type: (bool) -> Tuple[bool, List[Tuple[str, object]]] # type: (bool) -> Tuple[bool, List[Tuple[str, object]]]
@ -1112,13 +1110,13 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
# add inheritance info, if wanted # add inheritance info, if wanted
if not self.doc_as_attr and self.options.show_inheritance: if not self.doc_as_attr and self.options.show_inheritance:
sourcename = self.get_sourcename() sourcename = self.get_sourcename()
self.add_line(u'', sourcename) self.add_line('', sourcename)
if hasattr(self.object, '__bases__') and len(self.object.__bases__): if hasattr(self.object, '__bases__') and len(self.object.__bases__):
bases = [b.__module__ in ('__builtin__', 'builtins') and bases = [b.__module__ in ('__builtin__', 'builtins') and
u':class:`%s`' % b.__name__ or ':class:`%s`' % b.__name__ or
u':class:`%s.%s`' % (b.__module__, b.__name__) ':class:`%s.%s`' % (b.__module__, b.__name__)
for b in self.object.__bases__] for b in self.object.__bases__]
self.add_line(u' ' + _(u'Bases: %s') % ', '.join(bases), self.add_line(' ' + _('Bases: %s') % ', '.join(bases),
sourcename) sourcename)
def get_doc(self, encoding=None, ignore=1): def get_doc(self, encoding=None, ignore=1):
@ -1174,7 +1172,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
module = safe_getattr(self.object, '__module__', None) module = safe_getattr(self.object, '__module__', None)
parentmodule = safe_getattr(self.parent, '__module__', None) parentmodule = safe_getattr(self.parent, '__module__', None)
if module and module != parentmodule: if module and module != parentmodule:
classname = str(module) + u'.' + str(classname) classname = str(module) + '.' + str(classname)
content = StringList([_('alias of :class:`%s`') % classname], source='') content = StringList([_('alias of :class:`%s`') % classname], source='')
super().add_content(content, no_docstring=True) super().add_content(content, no_docstring=True)
else: else:
@ -1240,11 +1238,11 @@ class DataDocumenter(ModuleLevelDocumenter):
except ValueError: except ValueError:
pass pass
else: else:
self.add_line(u' :annotation: = ' + objrepr, sourcename) self.add_line(' :annotation: = ' + objrepr, sourcename)
elif self.options.annotation is SUPPRESS: elif self.options.annotation is SUPPRESS:
pass pass
else: else:
self.add_line(u' :annotation: %s' % self.options.annotation, self.add_line(' :annotation: %s' % self.options.annotation,
sourcename) sourcename)
def document_members(self, all_members=False): def document_members(self, all_members=False):
@ -1378,12 +1376,11 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
except ValueError: except ValueError:
pass pass
else: else:
self.add_line(u' :annotation: = ' + objrepr, sourcename) self.add_line(' :annotation: = ' + objrepr, sourcename)
elif self.options.annotation is SUPPRESS: elif self.options.annotation is SUPPRESS:
pass pass
else: else:
self.add_line(u' :annotation: %s' % self.options.annotation, self.add_line(' :annotation: %s' % self.options.annotation, sourcename)
sourcename)
def add_content(self, more_content, no_docstring=False): def add_content(self, more_content, no_docstring=False):
# type: (Any, bool) -> None # type: (Any, bool) -> None

View File

@ -162,7 +162,7 @@ def autosummary_table_visit_html(self, node):
for j, subnode in enumerate(list(par)): for j, subnode in enumerate(list(par)):
if isinstance(subnode, nodes.Text): if isinstance(subnode, nodes.Text):
new_text = text_type(subnode.astext()) new_text = text_type(subnode.astext())
new_text = new_text.replace(u" ", u"\u00a0") new_text = new_text.replace(" ", "\u00a0")
par[j] = nodes.Text(new_text) par[j] = nodes.Text(new_text)
except IndexError: except IndexError:
pass pass
@ -475,7 +475,7 @@ def mangle_signature(sig, max_chars=30):
sig += "[, %s]" % limited_join(", ", opts, sig += "[, %s]" % limited_join(", ", opts,
max_chars=max_chars - len(sig) - 4 - 2) max_chars=max_chars - len(sig) - 4 - 2)
return u"(%s)" % sig return "(%s)" % sig
def extract_summary(doc, document): def extract_summary(doc, document):

View File

@ -340,13 +340,13 @@ class GoogleDocstring(UnicodeMixin):
def _fix_field_desc(self, desc): def _fix_field_desc(self, desc):
# type: (List[str]) -> List[str] # type: (List[str]) -> List[str]
if self._is_list(desc): if self._is_list(desc):
desc = [u''] + desc desc = [''] + desc
elif desc[0].endswith('::'): elif desc[0].endswith('::'):
desc_block = desc[1:] desc_block = desc[1:]
indent = self._get_indent(desc[0]) indent = self._get_indent(desc[0])
block_indent = self._get_initial_indent(desc_block) block_indent = self._get_initial_indent(desc_block)
if block_indent > indent: if block_indent > indent:
desc = [u''] + desc desc = [''] + desc
else: else:
desc = ['', desc[0]] + self._indent(desc_block, 4) desc = ['', desc[0]] + self._indent(desc_block, 4)
return desc return desc
@ -358,9 +358,9 @@ class GoogleDocstring(UnicodeMixin):
return ['.. %s:: %s' % (admonition, lines[0].strip()), ''] return ['.. %s:: %s' % (admonition, lines[0].strip()), '']
elif lines: elif lines:
lines = self._indent(self._dedent(lines), 3) lines = self._indent(self._dedent(lines), 3)
return [u'.. %s::' % admonition, u''] + lines + [u''] return ['.. %s::' % admonition, ''] + lines + ['']
else: else:
return [u'.. %s::' % admonition, u''] return ['.. %s::' % admonition, '']
def _format_block(self, prefix, lines, padding=None): def _format_block(self, prefix, lines, padding=None):
# type: (str, List[str], str) -> List[str] # type: (str, List[str], str) -> List[str]
@ -676,7 +676,7 @@ class GoogleDocstring(UnicodeMixin):
for _name, _type, _desc in self._consume_fields(parse_type=False): for _name, _type, _desc in self._consume_fields(parse_type=False):
lines.append('.. method:: %s' % _name) lines.append('.. method:: %s' % _name)
if _desc: if _desc:
lines.extend([u''] + self._indent(_desc, 3)) lines.extend([''] + self._indent(_desc, 3))
lines.append('') lines.append('')
return lines return lines

View File

@ -228,12 +228,12 @@ def depart_todo_node(self, node):
def latex_visit_todo_node(self, node): def latex_visit_todo_node(self, node):
# type: (LaTeXTranslator, todo_node) -> None # type: (LaTeXTranslator, todo_node) -> None
self.body.append(u'\n\\begin{sphinxadmonition}{note}{') self.body.append('\n\\begin{sphinxadmonition}{note}{')
# If this is the original todo node, emit a label that will be referenced by # If this is the original todo node, emit a label that will be referenced by
# a hyperref in the todolist. # a hyperref in the todolist.
target = node.get('targetref') target = node.get('targetref')
if target is not None: if target is not None:
self.body.append(u'\\label{%s}' % target) self.body.append('\\label{%s}' % target)
title_node = cast(nodes.title, node[0]) title_node = cast(nodes.title, node[0])
self.body.append('%s:}' % title_node.astext().translate(tex_escape_map)) self.body.append('%s:}' % title_node.astext().translate(tex_escape_map))

View File

@ -51,9 +51,9 @@ for _lexer in lexers.values():
_lexer.add_filter('raiseonerror') _lexer.add_filter('raiseonerror')
escape_hl_chars = {ord(u'\\'): u'\\PYGZbs{}', escape_hl_chars = {ord('\\'): '\\PYGZbs{}',
ord(u'{'): u'\\PYGZob{}', ord('{'): '\\PYGZob{}',
ord(u'}'): u'\\PYGZcb{}'} ord('}'): '\\PYGZcb{}'}
# used if Pygments is available # used if Pygments is available
# use textcomp quote to get a true single quote # use textcomp quote to get a true single quote

View File

@ -23,9 +23,9 @@ if False:
# For type annotation # For type annotation
from typing import Any, Dict, IO, List, Tuple # NOQA from typing import Any, Dict, IO, List, Tuple # NOQA
comment_re = re.compile(u'^\\s*#: ?(.*)\r?\n?$') comment_re = re.compile('^\\s*#: ?(.*)\r?\n?$')
indent_re = re.compile(u'^\\s*$') indent_re = re.compile('^\\s*$')
emptyline_re = re.compile(u'^\\s*(#.*)?$') emptyline_re = re.compile('^\\s*(#.*)?$')
if sys.version_info >= (3, 6): if sys.version_info >= (3, 6):

View File

@ -257,7 +257,7 @@ def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text) text = utils.unescape(text)
if typ == 'menuselection': if typ == 'menuselection':
text = text.replace('-->', u'\N{TRIANGULAR BULLET}') text = text.replace('-->', '\N{TRIANGULAR BULLET}')
spans = _amp_re.split(text) spans = _amp_re.split(text)
node = nodes.inline(rawtext=rawtext) node = nodes.inline(rawtext=rawtext)

View File

@ -291,7 +291,7 @@ class IndexBuilder:
with open(scoring, 'rb') as fp: with open(scoring, 'rb') as fp:
self.js_scorer_code = fp.read().decode() self.js_scorer_code = fp.read().decode()
else: else:
self.js_scorer_code = u'' self.js_scorer_code = ''
self.js_splitter_code = splitter_code self.js_splitter_code = splitter_code
def load(self, stream, format): def load(self, stream, format):

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -16,7 +16,7 @@ if False:
# For type annotation # For type annotation
from typing import Dict # NOQA from typing import Dict # NOQA
english_stopwords = set(u""" english_stopwords = set("""
a and are as at a and are as at
be but by be but by
for for

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -151,279 +151,279 @@ class JanomeSplitter(BaseSplitter):
def split(self, input): def split(self, input):
# type: (str) -> List[str] # type: (str) -> List[str]
result = u' '.join(token.surface for token in self.tokenizer.tokenize(input)) result = ' '.join(token.surface for token in self.tokenizer.tokenize(input))
return result.split(u' ') return result.split(' ')
class DefaultSplitter(BaseSplitter): class DefaultSplitter(BaseSplitter):
patterns_ = dict([(re.compile(pattern), value) for pattern, value in { patterns_ = dict([(re.compile(pattern), value) for pattern, value in {
u'[一二三四五六七八九十百千万億兆]': u'M', '[一二三四五六七八九十百千万億兆]': 'M',
u'[一-龠々〆ヵヶ]': u'H', '[一-龠々〆ヵヶ]': 'H',
u'[ぁ-ん]': u'I', '[ぁ-ん]': 'I',
u'[ァ-ヴーア-ン゙ー]': u'K', '[ァ-ヴーア-ン゙ー]': 'K',
u'[a-zA-Z--]': u'A', '[a-zA-Z--]': 'A',
u'[0-9-]': u'N', '[0-9-]': 'N',
}.items()]) }.items()])
BIAS__ = -332 BIAS__ = -332
BC1__ = {u'HH': 6, u'II': 2461, u'KH': 406, u'OH': -1378} BC1__ = {'HH': 6, 'II': 2461, 'KH': 406, 'OH': -1378}
BC2__ = {u'AA': -3267, u'AI': 2744, u'AN': -878, u'HH': -4070, u'HM': -1711, BC2__ = {'AA': -3267, 'AI': 2744, 'AN': -878, 'HH': -4070, 'HM': -1711,
u'HN': 4012, u'HO': 3761, u'IA': 1327, u'IH': -1184, u'II': -1332, 'HN': 4012, 'HO': 3761, 'IA': 1327, 'IH': -1184, 'II': -1332,
u'IK': 1721, u'IO': 5492, u'KI': 3831, u'KK': -8741, u'MH': -3132, 'IK': 1721, 'IO': 5492, 'KI': 3831, 'KK': -8741, 'MH': -3132,
u'MK': 3334, u'OO': -2920} 'MK': 3334, 'OO': -2920}
BC3__ = {u'HH': 996, u'HI': 626, u'HK': -721, u'HN': -1307, u'HO': -836, u'IH': -301, BC3__ = {'HH': 996, 'HI': 626, 'HK': -721, 'HN': -1307, 'HO': -836, 'IH': -301,
u'KK': 2762, u'MK': 1079, u'MM': 4034, u'OA': -1652, u'OH': 266} 'KK': 2762, 'MK': 1079, 'MM': 4034, 'OA': -1652, 'OH': 266}
BP1__ = {u'BB': 295, u'OB': 304, u'OO': -125, u'UB': 352} BP1__ = {'BB': 295, 'OB': 304, 'OO': -125, 'UB': 352}
BP2__ = {u'BO': 60, u'OO': -1762} BP2__ = {'BO': 60, 'OO': -1762}
BQ1__ = {u'BHH': 1150, u'BHM': 1521, u'BII': -1158, u'BIM': 886, u'BMH': 1208, BQ1__ = {'BHH': 1150, 'BHM': 1521, 'BII': -1158, 'BIM': 886, 'BMH': 1208,
u'BNH': 449, u'BOH': -91, u'BOO': -2597, u'OHI': 451, u'OIH': -296, 'BNH': 449, 'BOH': -91, 'BOO': -2597, 'OHI': 451, 'OIH': -296,
u'OKA': 1851, u'OKH': -1020, u'OKK': 904, u'OOO': 2965} 'OKA': 1851, 'OKH': -1020, 'OKK': 904, 'OOO': 2965}
BQ2__ = {u'BHH': 118, u'BHI': -1159, u'BHM': 466, u'BIH': -919, u'BKK': -1720, BQ2__ = {'BHH': 118, 'BHI': -1159, 'BHM': 466, 'BIH': -919, 'BKK': -1720,
u'BKO': 864, u'OHH': -1139, u'OHM': -181, u'OIH': 153, u'UHI': -1146} 'BKO': 864, 'OHH': -1139, 'OHM': -181, 'OIH': 153, 'UHI': -1146}
BQ3__ = {u'BHH': -792, u'BHI': 2664, u'BII': -299, u'BKI': 419, u'BMH': 937, BQ3__ = {'BHH': -792, 'BHI': 2664, 'BII': -299, 'BKI': 419, 'BMH': 937,
u'BMM': 8335, u'BNN': 998, u'BOH': 775, u'OHH': 2174, u'OHM': 439, u'OII': 280, 'BMM': 8335, 'BNN': 998, 'BOH': 775, 'OHH': 2174, 'OHM': 439, 'OII': 280,
u'OKH': 1798, u'OKI': -793, u'OKO': -2242, u'OMH': -2402, u'OOO': 11699} 'OKH': 1798, 'OKI': -793, 'OKO': -2242, 'OMH': -2402, 'OOO': 11699}
BQ4__ = {u'BHH': -3895, u'BIH': 3761, u'BII': -4654, u'BIK': 1348, u'BKK': -1806, BQ4__ = {'BHH': -3895, 'BIH': 3761, 'BII': -4654, 'BIK': 1348, 'BKK': -1806,
u'BMI': -3385, u'BOO': -12396, u'OAH': 926, u'OHH': 266, u'OHK': -2036, 'BMI': -3385, 'BOO': -12396, 'OAH': 926, 'OHH': 266, 'OHK': -2036,
u'ONN': -973} 'ONN': -973}
BW1__ = {u',と': 660, u',同': 727, u'B1あ': 1404, u'B1同': 542, u'、と': 660, BW1__ = {',と': 660, ',同': 727, 'B1あ': 1404, 'B1同': 542, '、と': 660,
u'、同': 727, u'」と': 1682, u'あっ': 1505, u'いう': 1743, u'いっ': -2055, '、同': 727, '」と': 1682, 'あっ': 1505, 'いう': 1743, 'いっ': -2055,
u'いる': 672, u'うし': -4817, u'うん': 665, u'から': 3472, u'がら': 600, 'いる': 672, 'うし': -4817, 'うん': 665, 'から': 3472, 'がら': 600,
u'こう': -790, u'こと': 2083, u'こん': -1262, u'さら': -4143, u'さん': 4573, 'こう': -790, 'こと': 2083, 'こん': -1262, 'さら': -4143, 'さん': 4573,
u'した': 2641, u'して': 1104, u'すで': -3399, u'そこ': 1977, u'それ': -871, 'した': 2641, 'して': 1104, 'すで': -3399, 'そこ': 1977, 'それ': -871,
u'たち': 1122, u'ため': 601, u'った': 3463, u'つい': -802, u'てい': 805, 'たち': 1122, 'ため': 601, 'った': 3463, 'つい': -802, 'てい': 805,
u'てき': 1249, u'でき': 1127, u'です': 3445, u'では': 844, u'とい': -4915, 'てき': 1249, 'でき': 1127, 'です': 3445, 'では': 844, 'とい': -4915,
u'とみ': 1922, u'どこ': 3887, u'ない': 5713, u'なっ': 3015, u'など': 7379, 'とみ': 1922, 'どこ': 3887, 'ない': 5713, 'なっ': 3015, 'など': 7379,
u'なん': -1113, u'にし': 2468, u'には': 1498, u'にも': 1671, u'に対': -912, 'なん': -1113, 'にし': 2468, 'には': 1498, 'にも': 1671, 'に対': -912,
u'の一': -501, u'の中': 741, u'ませ': 2448, u'まで': 1711, u'まま': 2600, 'の一': -501, 'の中': 741, 'ませ': 2448, 'まで': 1711, 'まま': 2600,
u'まる': -2155, u'やむ': -1947, u'よっ': -2565, u'れた': 2369, u'れで': -913, 'まる': -2155, 'やむ': -1947, 'よっ': -2565, 'れた': 2369, 'れで': -913,
u'をし': 1860, u'を見': 731, u'亡く': -1886, u'京都': 2558, u'取り': -2784, 'をし': 1860, 'を見': 731, '亡く': -1886, '京都': 2558, '取り': -2784,
u'大き': -2604, u'大阪': 1497, u'平方': -2314, u'引き': -1336, u'日本': -195, '大き': -2604, '大阪': 1497, '平方': -2314, '引き': -1336, '日本': -195,
u'本当': -2423, u'毎日': -2113, u'目指': -724, u'B1あ': 1404, u'B1同': 542, '本当': -2423, '毎日': -2113, '目指': -724, 'B1あ': 1404, 'B1同': 542,
u'」と': 1682} '」と': 1682}
BW2__ = {u'..': -11822, u'11': -669, u'――': -5730, u'': -13175, u'いう': -1609, BW2__ = {'..': -11822, '11': -669, '――': -5730, '': -13175, 'いう': -1609,
u'うか': 2490, u'かし': -1350, u'かも': -602, u'から': -7194, u'かれ': 4612, 'うか': 2490, 'かし': -1350, 'かも': -602, 'から': -7194, 'かれ': 4612,
u'がい': 853, u'がら': -3198, u'きた': 1941, u'くな': -1597, u'こと': -8392, 'がい': 853, 'がら': -3198, 'きた': 1941, 'くな': -1597, 'こと': -8392,
u'この': -4193, u'させ': 4533, u'され': 13168, u'さん': -3977, u'しい': -1819, 'この': -4193, 'させ': 4533, 'され': 13168, 'さん': -3977, 'しい': -1819,
u'しか': -545, u'した': 5078, u'して': 972, u'しな': 939, u'その': -3744, 'しか': -545, 'した': 5078, 'して': 972, 'しな': 939, 'その': -3744,
u'たい': -1253, u'たた': -662, u'ただ': -3857, u'たち': -786, u'たと': 1224, 'たい': -1253, 'たた': -662, 'ただ': -3857, 'たち': -786, 'たと': 1224,
u'たは': -939, u'った': 4589, u'って': 1647, u'っと': -2094, u'てい': 6144, 'たは': -939, 'った': 4589, 'って': 1647, 'っと': -2094, 'てい': 6144,
u'てき': 3640, u'てく': 2551, u'ては': -3110, u'ても': -3065, u'でい': 2666, 'てき': 3640, 'てく': 2551, 'ては': -3110, 'ても': -3065, 'でい': 2666,
u'でき': -1528, u'でし': -3828, u'です': -4761, u'でも': -4203, u'とい': 1890, 'でき': -1528, 'でし': -3828, 'です': -4761, 'でも': -4203, 'とい': 1890,
u'とこ': -1746, u'とと': -2279, u'との': 720, u'とみ': 5168, u'とも': -3941, 'とこ': -1746, 'とと': -2279, 'との': 720, 'とみ': 5168, 'とも': -3941,
u'ない': -2488, u'なが': -1313, u'など': -6509, u'なの': 2614, u'なん': 3099, 'ない': -2488, 'なが': -1313, 'など': -6509, 'なの': 2614, 'なん': 3099,
u'にお': -1615, u'にし': 2748, u'にな': 2454, u'によ': -7236, u'に対': -14943, 'にお': -1615, 'にし': 2748, 'にな': 2454, 'によ': -7236, 'に対': -14943,
u'に従': -4688, u'に関': -11388, u'のか': 2093, u'ので': -7059, u'のに': -6041, 'に従': -4688, 'に関': -11388, 'のか': 2093, 'ので': -7059, 'のに': -6041,
u'のの': -6125, u'はい': 1073, u'はが': -1033, u'はず': -2532, u'ばれ': 1813, 'のの': -6125, 'はい': 1073, 'はが': -1033, 'はず': -2532, 'ばれ': 1813,
u'まし': -1316, u'まで': -6621, u'まれ': 5409, u'めて': -3153, u'もい': 2230, 'まし': -1316, 'まで': -6621, 'まれ': 5409, 'めて': -3153, 'もい': 2230,
u'もの': -10713, u'らか': -944, u'らし': -1611, u'らに': -1897, u'りし': 651, 'もの': -10713, 'らか': -944, 'らし': -1611, 'らに': -1897, 'りし': 651,
u'りま': 1620, u'れた': 4270, u'れて': 849, u'れば': 4114, u'ろう': 6067, 'りま': 1620, 'れた': 4270, 'れて': 849, 'れば': 4114, 'ろう': 6067,
u'われ': 7901, u'を通': -11877, u'んだ': 728, u'んな': -4115, u'一人': 602, 'われ': 7901, 'を通': -11877, 'んだ': 728, 'んな': -4115, '一人': 602,
u'一方': -1375, u'一日': 970, u'一部': -1051, u'上が': -4479, u'会社': -1116, '一方': -1375, '一日': 970, '一部': -1051, '上が': -4479, '会社': -1116,
u'出て': 2163, u'分の': -7758, u'同党': 970, u'同日': -913, u'大阪': -2471, '出て': 2163, '分の': -7758, '同党': 970, '同日': -913, '大阪': -2471,
u'委員': -1250, u'少な': -1050, u'年度': -8669, u'年間': -1626, u'府県': -2363, '委員': -1250, '少な': -1050, '年度': -8669, '年間': -1626, '府県': -2363,
u'手権': -1982, u'新聞': -4066, u'日新': -722, u'日本': -7068, u'日米': 3372, '手権': -1982, '新聞': -4066, '日新': -722, '日本': -7068, '日米': 3372,
u'曜日': -601, u'朝鮮': -2355, u'本人': -2697, u'東京': -1543, u'然と': -1384, '曜日': -601, '朝鮮': -2355, '本人': -2697, '東京': -1543, '然と': -1384,
u'社会': -1276, u'立て': -990, u'第に': -1612, u'米国': -4268, u'': -669} '社会': -1276, '立て': -990, '第に': -1612, '米国': -4268, '': -669}
BW3__ = {u'あた': -2194, u'あり': 719, u'ある': 3846, u'い.': -1185, u'い。': -1185, BW3__ = {'あた': -2194, 'あり': 719, 'ある': 3846, 'い.': -1185, 'い。': -1185,
u'いい': 5308, u'いえ': 2079, u'いく': 3029, u'いた': 2056, u'いっ': 1883, 'いい': 5308, 'いえ': 2079, 'いく': 3029, 'いた': 2056, 'いっ': 1883,
u'いる': 5600, u'いわ': 1527, u'うち': 1117, u'うと': 4798, u'えと': 1454, 'いる': 5600, 'いわ': 1527, 'うち': 1117, 'うと': 4798, 'えと': 1454,
u'か.': 2857, u'か。': 2857, u'かけ': -743, u'かっ': -4098, u'かに': -669, 'か.': 2857, 'か。': 2857, 'かけ': -743, 'かっ': -4098, 'かに': -669,
u'から': 6520, u'かり': -2670, u'が,': 1816, u'が、': 1816, u'がき': -4855, 'から': 6520, 'かり': -2670, 'が,': 1816, 'が、': 1816, 'がき': -4855,
u'がけ': -1127, u'がっ': -913, u'がら': -4977, u'がり': -2064, u'きた': 1645, 'がけ': -1127, 'がっ': -913, 'がら': -4977, 'がり': -2064, 'きた': 1645,
u'けど': 1374, u'こと': 7397, u'この': 1542, u'ころ': -2757, u'さい': -714, 'けど': 1374, 'こと': 7397, 'この': 1542, 'ころ': -2757, 'さい': -714,
u'さを': 976, u'し,': 1557, u'し、': 1557, u'しい': -3714, u'した': 3562, 'さを': 976, 'し,': 1557, 'し、': 1557, 'しい': -3714, 'した': 3562,
u'して': 1449, u'しな': 2608, u'しま': 1200, u'す.': -1310, u'す。': -1310, 'して': 1449, 'しな': 2608, 'しま': 1200, 'す.': -1310, 'す。': -1310,
u'する': 6521, u'ず,': 3426, u'ず、': 3426, u'ずに': 841, u'そう': 428, 'する': 6521, 'ず,': 3426, 'ず、': 3426, 'ずに': 841, 'そう': 428,
u'た.': 8875, u'た。': 8875, u'たい': -594, u'たの': 812, u'たり': -1183, 'た.': 8875, 'た。': 8875, 'たい': -594, 'たの': 812, 'たり': -1183,
u'たる': -853, u'だ.': 4098, u'だ。': 4098, u'だっ': 1004, u'った': -4748, 'たる': -853, 'だ.': 4098, 'だ。': 4098, 'だっ': 1004, 'った': -4748,
u'って': 300, u'てい': 6240, u'てお': 855, u'ても': 302, u'です': 1437, 'って': 300, 'てい': 6240, 'てお': 855, 'ても': 302, 'です': 1437,
u'でに': -1482, u'では': 2295, u'とう': -1387, u'とし': 2266, u'との': 541, 'でに': -1482, 'では': 2295, 'とう': -1387, 'とし': 2266, 'との': 541,
u'とも': -3543, u'どう': 4664, u'ない': 1796, u'なく': -903, u'など': 2135, 'とも': -3543, 'どう': 4664, 'ない': 1796, 'なく': -903, 'など': 2135,
u'に,': -1021, u'に、': -1021, u'にし': 1771, u'にな': 1906, u'には': 2644, 'に,': -1021, 'に、': -1021, 'にし': 1771, 'にな': 1906, 'には': 2644,
u'の,': -724, u'の、': -724, u'の子': -1000, u'は,': 1337, u'は、': 1337, 'の,': -724, 'の、': -724, 'の子': -1000, 'は,': 1337, 'は、': 1337,
u'べき': 2181, u'まし': 1113, u'ます': 6943, u'まっ': -1549, u'まで': 6154, 'べき': 2181, 'まし': 1113, 'ます': 6943, 'まっ': -1549, 'まで': 6154,
u'まれ': -793, u'らし': 1479, u'られ': 6820, u'るる': 3818, u'れ,': 854, 'まれ': -793, 'らし': 1479, 'られ': 6820, 'るる': 3818, 'れ,': 854,
u'れ、': 854, u'れた': 1850, u'れて': 1375, u'れば': -3246, u'れる': 1091, 'れ、': 854, 'れた': 1850, 'れて': 1375, 'れば': -3246, 'れる': 1091,
u'われ': -605, u'んだ': 606, u'んで': 798, u'カ月': 990, u'会議': 860, 'われ': -605, 'んだ': 606, 'んで': 798, 'カ月': 990, '会議': 860,
u'入り': 1232, u'大会': 2217, u'始め': 1681, u'': 965, u'新聞': -5055, '入り': 1232, '大会': 2217, '始め': 1681, '': 965, '新聞': -5055,
u'日,': 974, u'日、': 974, u'社会': 2024, u'カ月': 990} '日,': 974, '日、': 974, '社会': 2024, 'カ月': 990}
TC1__ = {u'AAA': 1093, u'HHH': 1029, u'HHM': 580, u'HII': 998, u'HOH': -390, TC1__ = {'AAA': 1093, 'HHH': 1029, 'HHM': 580, 'HII': 998, 'HOH': -390,
u'HOM': -331, u'IHI': 1169, u'IOH': -142, u'IOI': -1015, u'IOM': 467, 'HOM': -331, 'IHI': 1169, 'IOH': -142, 'IOI': -1015, 'IOM': 467,
u'MMH': 187, u'OOI': -1832} 'MMH': 187, 'OOI': -1832}
TC2__ = {u'HHO': 2088, u'HII': -1023, u'HMM': -1154, u'IHI': -1965, TC2__ = {'HHO': 2088, 'HII': -1023, 'HMM': -1154, 'IHI': -1965,
u'KKH': 703, u'OII': -2649} 'KKH': 703, 'OII': -2649}
TC3__ = {u'AAA': -294, u'HHH': 346, u'HHI': -341, u'HII': -1088, u'HIK': 731, TC3__ = {'AAA': -294, 'HHH': 346, 'HHI': -341, 'HII': -1088, 'HIK': 731,
u'HOH': -1486, u'IHH': 128, u'IHI': -3041, u'IHO': -1935, u'IIH': -825, 'HOH': -1486, 'IHH': 128, 'IHI': -3041, 'IHO': -1935, 'IIH': -825,
u'IIM': -1035, u'IOI': -542, u'KHH': -1216, u'KKA': 491, u'KKH': -1217, 'IIM': -1035, 'IOI': -542, 'KHH': -1216, 'KKA': 491, 'KKH': -1217,
u'KOK': -1009, u'MHH': -2694, u'MHM': -457, u'MHO': 123, u'MMH': -471, 'KOK': -1009, 'MHH': -2694, 'MHM': -457, 'MHO': 123, 'MMH': -471,
u'NNH': -1689, u'NNO': 662, u'OHO': -3393} 'NNH': -1689, 'NNO': 662, 'OHO': -3393}
TC4__ = {u'HHH': -203, u'HHI': 1344, u'HHK': 365, u'HHM': -122, u'HHN': 182, TC4__ = {'HHH': -203, 'HHI': 1344, 'HHK': 365, 'HHM': -122, 'HHN': 182,
u'HHO': 669, u'HIH': 804, u'HII': 679, u'HOH': 446, u'IHH': 695, 'HHO': 669, 'HIH': 804, 'HII': 679, 'HOH': 446, 'IHH': 695,
u'IHO': -2324, u'IIH': 321, u'III': 1497, u'IIO': 656, u'IOO': 54, 'IHO': -2324, 'IIH': 321, 'III': 1497, 'IIO': 656, 'IOO': 54,
u'KAK': 4845, u'KKA': 3386, u'KKK': 3065, u'MHH': -405, u'MHI': 201, 'KAK': 4845, 'KKA': 3386, 'KKK': 3065, 'MHH': -405, 'MHI': 201,
u'MMH': -241, u'MMM': 661, u'MOM': 841} 'MMH': -241, 'MMM': 661, 'MOM': 841}
TQ1__ = {u'BHHH': -227, u'BHHI': 316, u'BHIH': -132, u'BIHH': 60, u'BIII': 1595, TQ1__ = {'BHHH': -227, 'BHHI': 316, 'BHIH': -132, 'BIHH': 60, 'BIII': 1595,
u'BNHH': -744, u'BOHH': 225, u'BOOO': -908, u'OAKK': 482, u'OHHH': 281, 'BNHH': -744, 'BOHH': 225, 'BOOO': -908, 'OAKK': 482, 'OHHH': 281,
u'OHIH': 249, u'OIHI': 200, u'OIIH': -68} 'OHIH': 249, 'OIHI': 200, 'OIIH': -68}
TQ2__ = {u'BIHH': -1401, u'BIII': -1033, u'BKAK': -543, u'BOOO': -5591} TQ2__ = {'BIHH': -1401, 'BIII': -1033, 'BKAK': -543, 'BOOO': -5591}
TQ3__ = {u'BHHH': 478, u'BHHM': -1073, u'BHIH': 222, u'BHII': -504, u'BIIH': -116, TQ3__ = {'BHHH': 478, 'BHHM': -1073, 'BHIH': 222, 'BHII': -504, 'BIIH': -116,
u'BIII': -105, u'BMHI': -863, u'BMHM': -464, u'BOMH': 620, u'OHHH': 346, 'BIII': -105, 'BMHI': -863, 'BMHM': -464, 'BOMH': 620, 'OHHH': 346,
u'OHHI': 1729, u'OHII': 997, u'OHMH': 481, u'OIHH': 623, u'OIIH': 1344, 'OHHI': 1729, 'OHII': 997, 'OHMH': 481, 'OIHH': 623, 'OIIH': 1344,
u'OKAK': 2792, u'OKHH': 587, u'OKKA': 679, u'OOHH': 110, u'OOII': -685} 'OKAK': 2792, 'OKHH': 587, 'OKKA': 679, 'OOHH': 110, 'OOII': -685}
TQ4__ = {u'BHHH': -721, u'BHHM': -3604, u'BHII': -966, u'BIIH': -607, u'BIII': -2181, TQ4__ = {'BHHH': -721, 'BHHM': -3604, 'BHII': -966, 'BIIH': -607, 'BIII': -2181,
u'OAAA': -2763, u'OAKK': 180, u'OHHH': -294, u'OHHI': 2446, u'OHHO': 480, 'OAAA': -2763, 'OAKK': 180, 'OHHH': -294, 'OHHI': 2446, 'OHHO': 480,
u'OHIH': -1573, u'OIHH': 1935, u'OIHI': -493, u'OIIH': 626, u'OIII': -4007, 'OHIH': -1573, 'OIHH': 1935, 'OIHI': -493, 'OIIH': 626, 'OIII': -4007,
u'OKAK': -8156} 'OKAK': -8156}
TW1__ = {u'につい': -4681, u'東京都': 2026} TW1__ = {'につい': -4681, '東京都': 2026}
TW2__ = {u'ある程': -2049, u'いった': -1256, u'ころが': -2434, u'しょう': 3873, TW2__ = {'ある程': -2049, 'いった': -1256, 'ころが': -2434, 'しょう': 3873,
u'その後': -4430, u'だって': -1049, u'ていた': 1833, u'として': -4657, 'その後': -4430, 'だって': -1049, 'ていた': 1833, 'として': -4657,
u'ともに': -4517, u'もので': 1882, u'一気に': -792, u'初めて': -1512, 'ともに': -4517, 'もので': 1882, '一気に': -792, '初めて': -1512,
u'同時に': -8097, u'大きな': -1255, u'対して': -2721, u'社会党': -3216} '同時に': -8097, '大きな': -1255, '対して': -2721, '社会党': -3216}
TW3__ = {u'いただ': -1734, u'してい': 1314, u'として': -4314, u'につい': -5483, TW3__ = {'いただ': -1734, 'してい': 1314, 'として': -4314, 'につい': -5483,
u'にとっ': -5989, u'に当た': -6247, u'ので,': -727, u'ので、': -727, 'にとっ': -5989, 'に当た': -6247, 'ので,': -727, 'ので、': -727,
u'のもの': -600, u'れから': -3752, u'十二月': -2287} 'のもの': -600, 'れから': -3752, '十二月': -2287}
TW4__ = {u'いう.': 8576, u'いう。': 8576, u'からな': -2348, u'してい': 2958, TW4__ = {'いう.': 8576, 'いう。': 8576, 'からな': -2348, 'してい': 2958,
u'たが,': 1516, u'たが、': 1516, u'ている': 1538, u'という': 1349, 'たが,': 1516, 'たが、': 1516, 'ている': 1538, 'という': 1349,
u'ました': 5543, u'ません': 1097, u'ようと': -4258, u'よると': 5865} 'ました': 5543, 'ません': 1097, 'ようと': -4258, 'よると': 5865}
UC1__ = {u'A': 484, u'K': 93, u'M': 645, u'O': -505} UC1__ = {'A': 484, 'K': 93, 'M': 645, 'O': -505}
UC2__ = {u'A': 819, u'H': 1059, u'I': 409, u'M': 3987, u'N': 5775, u'O': 646} UC2__ = {'A': 819, 'H': 1059, 'I': 409, 'M': 3987, 'N': 5775, 'O': 646}
UC3__ = {u'A': -1370, u'I': 2311} UC3__ = {'A': -1370, 'I': 2311}
UC4__ = {u'A': -2643, u'H': 1809, u'I': -1032, u'K': -3450, u'M': 3565, UC4__ = {'A': -2643, 'H': 1809, 'I': -1032, 'K': -3450, 'M': 3565,
u'N': 3876, u'O': 6646} 'N': 3876, 'O': 6646}
UC5__ = {u'H': 313, u'I': -1238, u'K': -799, u'M': 539, u'O': -831} UC5__ = {'H': 313, 'I': -1238, 'K': -799, 'M': 539, 'O': -831}
UC6__ = {u'H': -506, u'I': -253, u'K': 87, u'M': 247, u'O': -387} UC6__ = {'H': -506, 'I': -253, 'K': 87, 'M': 247, 'O': -387}
UP1__ = {u'O': -214} UP1__ = {'O': -214}
UP2__ = {u'B': 69, u'O': 935} UP2__ = {'B': 69, 'O': 935}
UP3__ = {u'B': 189} UP3__ = {'B': 189}
UQ1__ = {u'BH': 21, u'BI': -12, u'BK': -99, u'BN': 142, u'BO': -56, u'OH': -95, UQ1__ = {'BH': 21, 'BI': -12, 'BK': -99, 'BN': 142, 'BO': -56, 'OH': -95,
u'OI': 477, u'OK': 410, u'OO': -2422} 'OI': 477, 'OK': 410, 'OO': -2422}
UQ2__ = {u'BH': 216, u'BI': 113, u'OK': 1759} UQ2__ = {'BH': 216, 'BI': 113, 'OK': 1759}
UQ3__ = {u'BA': -479, u'BH': 42, u'BI': 1913, u'BK': -7198, u'BM': 3160, UQ3__ = {'BA': -479, 'BH': 42, 'BI': 1913, 'BK': -7198, 'BM': 3160,
u'BN': 6427, u'BO': 14761, u'OI': -827, u'ON': -3212} 'BN': 6427, 'BO': 14761, 'OI': -827, 'ON': -3212}
UW1__ = {u',': 156, u'': 156, u'': -463, u'': -941, u'': -127, u'': -553, UW1__ = {',': 156, '': 156, '': -463, '': -941, '': -127, '': -553,
u'': 121, u'': 505, u'': -201, u'': -547, u'': -123, u'': -789, '': 121, '': 505, '': -201, '': -547, '': -123, '': -789,
u'': -185, u'': -847, u'': -466, u'': -470, u'': 182, u'': -292, '': -185, '': -847, '': -466, '': -470, '': 182, '': -292,
u'': 208, u'': 169, u'': -446, u'': -137, u'': -135, u'': -402, '': 208, '': 169, '': -446, '': -137, '': -135, '': -402,
u'': -268, u'': -912, u'': 871, u'': -460, u'': 561, u'': 729, '': -268, '': -912, '': 871, '': -460, '': 561, '': 729,
u'': -411, u'': -141, u'': 361, u'': -408, u'': -386, u'': -718, '': -411, '': -141, '': 361, '': -408, '': -386, '': -718,
u'': -463, u'': -135} '': -463, '': -135}
UW2__ = {u',': -829, u'': -829, u'': 892, u'': -645, u'': 3145, u'': -538, UW2__ = {',': -829, '': -829, '': 892, '': -645, '': 3145, '': -538,
u'': 505, u'': 134, u'': -502, u'': 1454, u'': -856, u'': -412, '': 505, '': 134, '': -502, '': 1454, '': -856, '': -412,
u'': 1141, u'': 878, u'': 540, u'': 1529, u'': -675, u'': 300, '': 1141, '': 878, '': 540, '': 1529, '': -675, '': 300,
u'': -1011, u'': 188, u'': 1837, u'': -949, u'': -291, u'': -268, '': -1011, '': 188, '': 1837, '': -949, '': -291, '': -268,
u'': -981, u'': 1273, u'': 1063, u'': -1764, u'': 130, u'': -409, '': -981, '': 1273, '': 1063, '': -1764, '': 130, '': -409,
u'': -1273, u'': 1261, u'': 600, u'': -1263, u'': -402, u'': 1639, '': -1273, '': 1261, '': 600, '': -1263, '': -402, '': 1639,
u'': -579, u'': -694, u'': 571, u'': -2516, u'': 2095, u'': -587, '': -579, '': -694, '': 571, '': -2516, '': 2095, '': -587,
u'': 306, u'': 568, u'': 831, u'': -758, u'': -2150, u'': -302, '': 306, '': 568, '': 831, '': -758, '': -2150, '': -302,
u'': -968, u'': -861, u'': 492, u'': -123, u'': 978, u'': 362, '': -968, '': -861, '': 492, '': -123, '': 978, '': 362,
u'': 548, u'': -3025, u'': -1566, u'': -3414, u'': -422, u'': -1769, '': 548, '': -3025, '': -1566, '': -3414, '': -422, '': -1769,
u'': -865, u'': -483, u'': -1519, u'': 760, u'': 1023, u'': -2009, '': -865, '': -483, '': -1519, '': 760, '': 1023, '': -2009,
u'': -813, u'': -1060, u'': 1067, u'': -1519, u'': -1033, u'': 1522, '': -813, '': -1060, '': 1067, '': -1519, '': -1033, '': 1522,
u'': -1355, u'': -1682, u'': -1815, u'': -1462, u'': -630, u'': -1843, '': -1355, '': -1682, '': -1815, '': -1462, '': -630, '': -1843,
u'': -1650, u'': -931, u'': -665, u'': -2378, u'': -180, u'': -1740, '': -1650, '': -931, '': -665, '': -2378, '': -180, '': -1740,
u'': 752, u'': 529, u'': -1584, u'': -242, u'': -1165, u'': -763, '': 752, '': 529, '': -1584, '': -242, '': -1165, '': -763,
u'': 810, u'': 509, u'': -1353, u'': 838, u'西': -744, u'': -3874, '': 810, '': 509, '': -1353, '': 838, '西': -744, '': -3874,
u'調': 1010, u'': 1198, u'': 3041, u'': 1758, u'': -1257, u'': -645, '調': 1010, '': 1198, '': 3041, '': 1758, '': -1257, '': -645,
u'': 3145, u'': 831, u'': -587, u'': 306, u'': 568} '': 3145, '': 831, '': -587, '': 306, '': 568}
UW3__ = {u',': 4889, u'1': -800, u'': -1723, u'': 4889, u'': -2311, u'': 5827, UW3__ = {',': 4889, '1': -800, '': -1723, '': 4889, '': -2311, '': 5827,
u'': 2670, u'': -3573, u'': -2696, u'': 1006, u'': 2342, u'': 1983, '': 2670, '': -3573, '': -2696, '': 1006, '': 2342, '': 1983,
u'': -4864, u'': -1163, u'': 3271, u'': 1004, u'': 388, u'': 401, '': -4864, '': -1163, '': 3271, '': 1004, '': 388, '': 401,
u'': -3552, u'': -3116, u'': -1058, u'': -395, u'': 584, u'': 3685, '': -3552, '': -3116, '': -1058, '': -395, '': 584, '': 3685,
u'': -5228, u'': 842, u'': -521, u'': -1444, u'': -1081, u'': 6167, '': -5228, '': 842, '': -521, '': -1444, '': -1081, '': 6167,
u'': 2318, u'': 1691, u'': -899, u'': -2788, u'': 2745, u'': 4056, '': 2318, '': 1691, '': -899, '': -2788, '': 2745, '': 4056,
u'': 4555, u'': -2171, u'': -1798, u'': 1199, u'': -5516, u'': -4384, '': 4555, '': -2171, '': -1798, '': 1199, '': -5516, '': -4384,
u'': -120, u'': 1205, u'': 2323, u'': -788, u'': -202, u'': 727, '': -120, '': 1205, '': 2323, '': -788, '': -202, '': 727,
u'': 649, u'': 5905, u'': 2773, u'': -1207, u'': 6620, u'': -518, '': 649, '': 5905, '': 2773, '': -1207, '': 6620, '': -518,
u'': 551, u'': 1319, u'': 874, u'': -1350, u'': 521, u'': 1109, '': 551, '': 1319, '': 874, '': -1350, '': 521, '': 1109,
u'': 1591, u'': 2201, u'': 278, u'': -3794, u'': -1619, u'': -1759, '': 1591, '': 2201, '': 278, '': -3794, '': -1619, '': -1759,
u'': -2087, u'': 3815, u'': 653, u'': -758, u'': -1193, u'': 974, '': -2087, '': 3815, '': 653, '': -758, '': -1193, '': 974,
u'': 2742, u'': 792, u'': 1889, u'': -1368, u'': 811, u'': 4265, '': 2742, '': 792, '': 1889, '': -1368, '': 811, '': 4265,
u'': -361, u'': -2439, u'': 4858, u'': 3593, u'': 1574, u'': -3030, '': -361, '': -2439, '': 4858, '': 3593, '': 1574, '': -3030,
u'': 755, u'': -1880, u'': 5807, u'': 3095, u'': 457, u'': 2475, '': 755, '': -1880, '': 5807, '': 3095, '': 457, '': 2475,
u'': 1129, u'': 2286, u'': 4437, u'': 365, u'': -949, u'': -1872, '': 1129, '': 2286, '': 4437, '': 365, '': -949, '': -1872,
u'': 1327, u'': -1038, u'': 4646, u'': -2309, u'': -783, u'': -1006, '': 1327, '': -1038, '': 4646, '': -2309, '': -783, '': -1006,
u'': 483, u'': 1233, u'': 3588, u'': -241, u'': 3906, u'': -837, '': 483, '': 1233, '': 3588, '': -241, '': 3906, '': -837,
u'': 4513, u'': 642, u'': 1389, u'': 1219, u'': -241, u'': 2016, '': 4513, '': 642, '': 1389, '': 1219, '': -241, '': 2016,
u'': -1356, u'': -423, u'': -1008, u'': 1078, u'': -513, u'': -3102, '': -1356, '': -423, '': -1008, '': 1078, '': -513, '': -3102,
u'': 1155, u'': 3197, u'': -1804, u'': 2416, u'': -1030, u'': 1605, '': 1155, '': 3197, '': -1804, '': 2416, '': -1030, '': 1605,
u'': 1452, u'': -2352, u'': -3885, u'': 1905, u'': -1291, u'': 1822, '': 1452, '': -2352, '': -3885, '': 1905, '': -1291, '': 1822,
u'': -488, u'': -3973, u'': -2013, u'': -1479, u'': 3222, u'': -1489, '': -488, '': -3973, '': -2013, '': -1479, '': 3222, '': -1489,
u'': 1764, u'': 2099, u'': 5792, u'': -661, u'': -1248, u'': -951, '': 1764, '': 2099, '': 5792, '': -661, '': -1248, '': -951,
u'': -937, u'': 4125, u'': 360, u'': 3094, u'': 364, u'': -805, '': -937, '': 4125, '': 360, '': 3094, '': 364, '': -805,
u'': 5156, u'': 2438, u'': 484, u'': 2613, u'': -1694, u'': -1073, '': 5156, '': 2438, '': 484, '': 2613, '': -1694, '': -1073,
u'': 1868, u'': -495, u'': 979, u'': 461, u'': -3850, u'': -273, '': 1868, '': -495, '': 979, '': 461, '': -3850, '': -273,
u'': 914, u'': 1215, u'': 7313, u'': -1835, u'': 792, u'': 6293, '': 914, '': 1215, '': 7313, '': -1835, '': 792, '': 6293,
u'': -1528, u'': 4231, u'': 401, u'': -960, u'': 1201, u'': 7767, '': -1528, '': 4231, '': 401, '': -960, '': 1201, '': 7767,
u'': 3066, u'': 3663, u'': 1384, u'': -4229, u'': 1163, u'': 1255, '': 3066, '': 3663, '': 1384, '': -4229, '': 1163, '': 1255,
u'': 6457, u'': 725, u'': -2869, u'': 785, u'': 1044, u'調': -562, '': 6457, '': 725, '': -2869, '': 785, '': 1044, '調': -562,
u'': -733, u'': 1777, u'': 1835, u'': 1375, u'': -1504, u'': -1136, '': -733, '': 1777, '': 1835, '': 1375, '': -1504, '': -1136,
u'': -681, u'': 1026, u'': 4404, u'': 1200, u'': 2163, u'': 421, '': -681, '': 1026, '': 4404, '': 1200, '': 2163, '': 421,
u'': -1432, u'': 1302, u'': -1282, u'': 2009, u'': -1045, u'': 2066, '': -1432, '': 1302, '': -1282, '': 2009, '': -1045, '': 2066,
u'': 1620, u'': -800, u'': 2670, u'': -3794, u'': -1350, u'': 551, '': 1620, '': -800, '': 2670, '': -3794, '': -1350, '': 551,
u'グ': 1319, u'': 874, u'': 521, u'': 1109, u'': 1591, u'': 2201, u'': 278} 'グ': 1319, '': 874, '': 521, '': 1109, '': 1591, '': 2201, '': 278}
UW4__ = {u',': 3930, u'.': 3508, u'': -4841, u'': 3930, u'': 3508, u'': 4999, UW4__ = {',': 3930, '.': 3508, '': -4841, '': 3930, '': 3508, '': 4999,
u'': 1895, u'': 3798, u'': -5156, u'': 4752, u'': -3435, u'': -640, '': 1895, '': 3798, '': -5156, '': 4752, '': -3435, '': -640,
u'': -2514, u'': 2405, u'': 530, u'': 6006, u'': -4482, u'': -3821, '': -2514, '': 2405, '': 530, '': 6006, '': -4482, '': -3821,
u'': -3788, u'': -4376, u'': -4734, u'': 2255, u'': 1979, u'': 2864, '': -3788, '': -4376, '': -4734, '': 2255, '': 1979, '': 2864,
u'': -843, u'': -2506, u'': -731, u'': 1251, u'': 181, u'': 4091, '': -843, '': -2506, '': -731, '': 1251, '': 181, '': 4091,
u'': 5034, u'': 5408, u'': -3654, u'': -5882, u'': -1659, u'': 3994, '': 5034, '': 5408, '': -3654, '': -5882, '': -1659, '': 3994,
u'': 7410, u'': 4547, u'': 5433, u'': 6499, u'': 1853, u'': 1413, '': 7410, '': 4547, '': 5433, '': 6499, '': 1853, '': 1413,
u'': 7396, u'': 8578, u'': 1940, u'': 4249, u'': -4134, u'': 1345, '': 7396, '': 8578, '': 1940, '': 4249, '': -4134, '': 1345,
u'': 6665, u'': -744, u'': 1464, u'': 1051, u'': -2082, u'': -882, '': 6665, '': -744, '': 1464, '': 1051, '': -2082, '': -882,
u'': -5046, u'': 4169, u'': -2666, u'': 2795, u'': -1544, u'': 3351, '': -5046, '': 4169, '': -2666, '': 2795, '': -1544, '': 3351,
u'': -2922, u'': -9726, u'': -14896, u'': -2613, u'': -4570, '': -2922, '': -9726, '': -14896, '': -2613, '': -4570,
u'': -1783, u'': 13150, u'': -2352, u'': 2145, u'': 1789, u'': 1287, '': -1783, '': 13150, '': -2352, '': 2145, '': 1789, '': 1287,
u'': -724, u'': -403, u'': -1635, u'': -881, u'': -541, u'': -856, '': -724, '': -403, '': -1635, '': -881, '': -541, '': -856,
u'': -3637, u'': -4371, u'': -11870, u'': -2069, u'': 2210, u'': 782, '': -3637, '': -4371, '': -11870, '': -2069, '': 2210, '': 782,
u'': -190, u'': -1768, u'': 1036, u'': 544, u'': 950, u'': -1286, '': -190, '': -1768, '': 1036, '': 544, '': 950, '': -1286,
u'': 530, u'': 4292, u'': 601, u'': -2006, u'': -1212, u'': 584, '': 530, '': 4292, '': 601, '': -2006, '': -1212, '': 584,
u'': 788, u'': 1347, u'': 1623, u'': 3879, u'': -302, u'': -740, '': 788, '': 1347, '': 1623, '': 3879, '': -302, '': -740,
u'': -2715, u'': 776, u'': 4517, u'': 1013, u'': 1555, u'': -1834, '': -2715, '': 776, '': 4517, '': 1013, '': 1555, '': -1834,
u'': -681, u'': -910, u'': -851, u'': 1500, u'': -619, u'': -1200, '': -681, '': -910, '': -851, '': 1500, '': -619, '': -1200,
u'': 866, u'': -1410, u'': -2094, u'': -1413, u'': 1067, u'': 571, '': 866, '': -1410, '': -2094, '': -1413, '': 1067, '': 571,
u'': -4802, u'': -1397, u'': -1057, u'': -809, u'': 1910, u'': -1328, '': -4802, '': -1397, '': -1057, '': -809, '': 1910, '': -1328,
u'': -1500, u'': -2056, u'': -2667, u'': 2771, u'': 374, u'': -4556, '': -1500, '': -2056, '': -2667, '': 2771, '': 374, '': -4556,
u'': 456, u'': 553, u'': 916, u'': -1566, u'': 856, u'': 787, '': 456, '': 553, '': 916, '': -1566, '': 856, '': 787,
u'': 2182, u'': 704, u'': 522, u'': -856, u'': 1798, u'': 1829, '': 2182, '': 704, '': 522, '': -856, '': 1798, '': 1829,
u'': 845, u'': -9066, u'': -485, u'': -442, u'': -360, u'': -1043, '': 845, '': -9066, '': -485, '': -442, '': -360, '': -1043,
u'': 5388, u'': -2716, u'': -910, u'': -939, u'': -543, u'': -735, '': 5388, '': -2716, '': -910, '': -939, '': -543, '': -735,
u'': 672, u'': -1267, u'': -1286, u'': -1101, u'': -2900, u'': 1826, '': 672, '': -1267, '': -1286, '': -1101, '': -2900, '': 1826,
u'': 2586, u'': 922, u'': -3485, u'': 2997, u'': -867, u'': -2112, '': 2586, '': 922, '': -3485, '': 2997, '': -867, '': -2112,
u'': 788, u'': 2937, u'': 786, u'': 2171, u'': 1146, u'': -1169, '': 788, '': 2937, '': 786, '': 2171, '': 1146, '': -1169,
u'': 940, u'': -994, u'': 749, u'': 2145, u'': -730, u'': -852, '': 940, '': -994, '': 749, '': 2145, '': -730, '': -852,
u'': -792, u'': 792, u'': -1184, u'': -244, u'': -1000, u'': 730, '': -792, '': 792, '': -1184, '': -244, '': -1000, '': 730,
u'': -1481, u'': 1158, u'': -1433, u'': -3370, u'': 929, u'': -1291, '': -1481, '': 1158, '': -1433, '': -3370, '': 929, '': -1291,
u'': 2596, u'': -4866, u'': 1192, u'': -1100, u'': -2213, u'': 357, '': 2596, '': -4866, '': 1192, '': -1100, '': -2213, '': 357,
u'': -2344, u'': -2297, u'': -2604, u'': -878, u'': -1659, u'': -792, '': -2344, '': -2297, '': -2604, '': -878, '': -1659, '': -792,
u'': -1984, u'': 1749, u'': 2120, u'': 1895, u'': 3798, u'': -4371, '': -1984, '': 1749, '': 2120, '': 1895, '': 3798, '': -4371,
u'': -724, u'': -11870, u'': 2145, u'': 1789, u'': 1287, u'': -403, '': -724, '': -11870, '': 2145, '': 1789, '': 1287, '': -403,
u'': -1635, u'': -881, u'': -541, u'': -856, u'': -3637} '': -1635, '': -881, '': -541, '': -856, '': -3637}
UW5__ = {u',': 465, u'.': -299, u'1': -514, u'E2': -32768, u']': -2762, u'': 465, UW5__ = {',': 465, '.': -299, '1': -514, 'E2': -32768, ']': -2762, '': 465,
u'': -299, u'': 363, u'': 1655, u'': 331, u'': -503, u'': 1199, '': -299, '': 363, '': 1655, '': 331, '': -503, '': 1199,
u'': 527, u'': 647, u'': -421, u'': 1624, u'': 1971, u'': 312, '': 527, '': 647, '': -421, '': 1624, '': 1971, '': 312,
u'': -983, u'': -1537, u'': -1371, u'': -852, u'': -1186, u'': 1093, '': -983, '': -1537, '': -1371, '': -852, '': -1186, '': 1093,
u'': 52, u'': 921, u'': -18, u'': -850, u'': -127, u'': 1682, '': 52, '': 921, '': -18, '': -850, '': -127, '': 1682,
u'': -787, u'': -1224, u'': -635, u'': -578, u'': 1001, u'': 502, '': -787, '': -1224, '': -635, '': -578, '': 1001, '': 502,
u'': 865, u'': 3350, u'': 854, u'': -208, u'': 429, u'': 504, '': 865, '': 3350, '': 854, '': -208, '': 429, '': 504,
u'': 419, u'': -1264, u'': 327, u'': 241, u'': 451, u'': -343, '': 419, '': -1264, '': 327, '': 241, '': 451, '': -343,
u'': -871, u'': 722, u'': -1153, u'': -654, u'': 3519, u'': -901, '': -871, '': 722, '': -1153, '': -654, '': 3519, '': -901,
u'': 848, u'': 2104, u'': -1296, u'': -548, u'': 1785, u'': -1304, '': 848, '': 2104, '': -1296, '': -548, '': 1785, '': -1304,
u'': -2991, u'': 921, u'': 1763, u'': 872, u'': -814, u'': 1618, '': -2991, '': 921, '': 1763, '': 872, '': -814, '': 1618,
u'': -1682, u'': 218, u'': -4353, u'': 932, u'': 1356, u'': -1508, '': -1682, '': 218, '': -4353, '': 932, '': 1356, '': -1508,
u'': -1347, u'': 240, u'': -3912, u'': -3149, u'': 1319, u'': -1052, '': -1347, '': 240, '': -3912, '': -3149, '': 1319, '': -1052,
u'': -4003, u'': -997, u'': -278, u'': -813, u'': 1955, u'': -2233, '': -4003, '': -997, '': -278, '': -813, '': 1955, '': -2233,
u'': 663, u'': -1073, u'': 1219, u'': -1018, u'': -368, u'': 786, '': 663, '': -1073, '': 1219, '': -1018, '': -368, '': 786,
u'': 1191, u'': 2368, u'': -689, u'': -514, u'': -32768, u'': 363, '': 1191, '': 2368, '': -689, '': -514, '': -32768, '': 363,
u'': 241, u'': 451, u'': -343} '': 241, '': 451, '': -343}
UW6__ = {u',': 227, u'.': 808, u'1': -270, u'E1': 306, u'': 227, u'': 808, UW6__ = {',': 227, '.': 808, '1': -270, 'E1': 306, '': 227, '': 808,
u'': -307, u'': 189, u'': 241, u'': -73, u'': -121, u'': -200, '': -307, '': 189, '': 241, '': -73, '': -121, '': -200,
u'': 1782, u'': 383, u'': -428, u'': 573, u'': -1014, u'': 101, '': 1782, '': 383, '': -428, '': 573, '': -1014, '': 101,
u'': -105, u'': -253, u'': -149, u'': -417, u'': -236, u'': -206, '': -105, '': -253, '': -149, '': -417, '': -236, '': -206,
u'': 187, u'': -135, u'': 195, u'': -673, u'': -496, u'': -277, '': 187, '': -135, '': 195, '': -673, '': -496, '': -277,
u'': 201, u'': -800, u'': 624, u'': 302, u'': 1792, u'': -1212, '': 201, '': -800, '': 624, '': 302, '': 1792, '': -1212,
u'': 798, u'': -960, u'': 887, u'': -695, u'': 535, u'': -697, '': 798, '': -960, '': 887, '': -695, '': 535, '': -697,
u'': 753, u'': -507, u'': 974, u'': -822, u'': 1811, u'': 463, '': 753, '': -507, '': 974, '': -822, '': 1811, '': 463,
u'': 1082, u'': -270, u'': 306, u'': -673, u'': -496} '': 1082, '': -270, '': 306, '': -673, '': -496}
# ctype_ # ctype_
def ctype_(self, char): def ctype_(self, char):
@ -431,7 +431,7 @@ class DefaultSplitter(BaseSplitter):
for pattern, value in self.patterns_.items(): for pattern, value in self.patterns_.items():
if pattern.match(char): if pattern.match(char):
return value return value
return u'O' return 'O'
# ts_ # ts_
def ts_(self, dict, key): def ts_(self, dict, key):
@ -447,21 +447,21 @@ class DefaultSplitter(BaseSplitter):
return [] return []
result = [] result = []
seg = [u'B3', u'B2', u'B1'] seg = ['B3', 'B2', 'B1']
ctype = [u'O', u'O', u'O'] ctype = ['O', 'O', 'O']
for t in input: for t in input:
seg.append(t) seg.append(t)
ctype.append(self.ctype_(t)) ctype.append(self.ctype_(t))
seg.append(u'E1') seg.append('E1')
seg.append(u'E2') seg.append('E2')
seg.append(u'E3') seg.append('E3')
ctype.append(u'O') ctype.append('O')
ctype.append(u'O') ctype.append('O')
ctype.append(u'O') ctype.append('O')
word = seg[3] word = seg[3]
p1 = u'U' p1 = 'U'
p2 = u'U' p2 = 'U'
p3 = u'U' p3 = 'U'
for i in range(4, len(seg) - 3): for i in range(4, len(seg) - 3):
score = self.BIAS__ score = self.BIAS__
@ -520,11 +520,11 @@ class DefaultSplitter(BaseSplitter):
score += self.ts_(self.TQ2__, p2 + c2 + c3 + c4) score += self.ts_(self.TQ2__, p2 + c2 + c3 + c4)
score += self.ts_(self.TQ3__, p3 + c1 + c2 + c3) score += self.ts_(self.TQ3__, p3 + c1 + c2 + c3)
score += self.ts_(self.TQ4__, p3 + c2 + c3 + c4) score += self.ts_(self.TQ4__, p3 + c2 + c3 + c4)
p = u'O' p = 'O'
if score > 0: if score > 0:
result.append(word.strip()) result.append(word.strip())
word = u'' word = ''
p = u'B' p = 'B'
p1 = p2 p1 = p2
p2 = p3 p2 = p3
p3 = p p3 = p

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -25,7 +25,7 @@ if False:
# For type annotation # For type annotation
from typing import Dict, List # NOQA from typing import Dict, List # NOQA
english_stopwords = set(u""" english_stopwords = set("""
a and are as at a and are as at
be but by be but by
for for
@ -233,7 +233,7 @@ class SearchChinese(SearchLanguage):
language_name = 'Chinese' language_name = 'Chinese'
js_stemmer_code = js_porter_stemmer js_stemmer_code = js_porter_stemmer
stopwords = english_stopwords stopwords = english_stopwords
latin1_letters = re.compile(u'(?u)\\w+[\u0000-\u00ff]') latin1_letters = re.compile('(?u)\\w+[\u0000-\u00ff]')
def init(self, options): def init(self, options):
# type: (Dict) -> None # type: (Dict) -> None

View File

@ -15,12 +15,12 @@
{% if append_syspath -%} {% if append_syspath -%}
import os import os
import sys import sys
sys.path.insert(0, u'{{ module_path }}') sys.path.insert(0, '{{ module_path }}')
{% else -%} {% else -%}
# import os # import os
# import sys # import sys
{% if module_path -%} {% if module_path -%}
# sys.path.insert(0, u'{{ module_path }}') # sys.path.insert(0, '{{ module_path }}')
{% else -%} {% else -%}
# sys.path.insert(0, os.path.abspath('.')) # sys.path.insert(0, os.path.abspath('.'))
{% endif -%} {% endif -%}
@ -28,14 +28,14 @@ sys.path.insert(0, u'{{ module_path }}')
# -- Project information ----------------------------------------------------- # -- Project information -----------------------------------------------------
project = u'{{ project_str }}' project = '{{ project_str }}'
copyright = u'{{ copyright_str }}' copyright = '{{ copyright_str }}'
author = u'{{ author_str }}' author = '{{ author_str }}'
# The short X.Y version # The short X.Y version
version = u'{{ version_str }}' version = '{{ version_str }}'
# The full version, including alpha/beta/rc tags # The full version, including alpha/beta/rc tags
release = u'{{ release_str }}' release = '{{ release_str }}'
# -- General configuration --------------------------------------------------- # -- General configuration ---------------------------------------------------
@ -129,8 +129,8 @@ latex_elements = {
# (source start file, target name, title, # (source start file, target name, title,
# author, documentclass [howto, manual, or own class]). # author, documentclass [howto, manual, or own class]).
latex_documents = [ latex_documents = [
(master_doc, '{{ project_fn }}.tex', u'{{ project_doc_texescaped_str }}', (master_doc, '{{ project_fn }}.tex', '{{ project_doc_texescaped_str }}',
u'{{ author_texescaped_str }}', 'manual'), '{{ author_texescaped_str }}', 'manual'),
] ]

View File

@ -89,7 +89,7 @@ class Locale(SphinxTransform):
def apply(self, **kwargs): def apply(self, **kwargs):
# type: (Any) -> None # type: (Any) -> None
settings, source = self.document.settings, self.document['source'] settings, source = self.document.settings, self.document['source']
msgstr = u'' msgstr = ''
# XXX check if this is reliable # XXX check if this is reliable
assert source.startswith(self.env.srcdir) assert source.startswith(self.env.srcdir)

View File

@ -77,8 +77,8 @@ class ImageDownloader(BaseImageConverter):
filename, ext = os.path.splitext(node['uri']) filename, ext = os.path.splitext(node['uri'])
basename = sha1(filename.encode()).hexdigest() + ext basename = sha1(filename.encode()).hexdigest() + ext
dirname = node['uri'].replace('://', '/').translate({ord("?"): u"/", dirname = node['uri'].replace('://', '/').translate({ord("?"): "/",
ord("&"): u"/"}) ord("&"): "/"})
if len(dirname) > MAX_FILENAME_LEN: if len(dirname) > MAX_FILENAME_LEN:
dirname = sha1(dirname.encode()).hexdigest() dirname = sha1(dirname.encode()).hexdigest()
ensuredir(os.path.join(self.imagedir, dirname)) ensuredir(os.path.join(self.imagedir, dirname))

View File

@ -421,7 +421,7 @@ class UnicodeDecodeErrorHandler:
error.object[error.start:error.end] + b'<<<' + error.object[error.start:error.end] + b'<<<' +
error.object[error.end:lineend]), error.object[error.end:lineend]),
location=(self.docname, lineno)) location=(self.docname, lineno))
return (u'?', error.end) return ('?', error.end)
# Low-level utility functions and classes. # Low-level utility functions and classes.
@ -700,30 +700,30 @@ def xmlname_checker():
# https://www.w3.org/TR/REC-xml/#NT-Name # https://www.w3.org/TR/REC-xml/#NT-Name
# Only Python 3.3 or newer support character code in regular expression # Only Python 3.3 or newer support character code in regular expression
name_start_chars = [ name_start_chars = [
u':', [u'A', u'Z'], u'_', [u'a', u'z'], [u'\u00C0', u'\u00D6'], ':', ['A', 'Z'], '_', ['a', 'z'], ['\u00C0', '\u00D6'],
[u'\u00D8', u'\u00F6'], [u'\u00F8', u'\u02FF'], [u'\u0370', u'\u037D'], ['\u00D8', '\u00F6'], ['\u00F8', '\u02FF'], ['\u0370', '\u037D'],
[u'\u037F', u'\u1FFF'], [u'\u200C', u'\u200D'], [u'\u2070', u'\u218F'], ['\u037F', '\u1FFF'], ['\u200C', '\u200D'], ['\u2070', '\u218F'],
[u'\u2C00', u'\u2FEF'], [u'\u3001', u'\uD7FF'], [u'\uF900', u'\uFDCF'], ['\u2C00', '\u2FEF'], ['\u3001', '\uD7FF'], ['\uF900', '\uFDCF'],
[u'\uFDF0', u'\uFFFD']] ['\uFDF0', '\uFFFD']]
name_start_chars.append([u'\U00010000', u'\U000EFFFF']) name_start_chars.append(['\U00010000', '\U000EFFFF'])
name_chars = [ name_chars = [
u"\\-", u"\\.", [u'0', u'9'], u'\u00B7', [u'\u0300', u'\u036F'], "\\-", "\\.", ['0', '9'], '\u00B7', ['\u0300', '\u036F'],
[u'\u203F', u'\u2040'] ['\u203F', '\u2040']
] ]
def convert(entries, splitter=u'|'): def convert(entries, splitter='|'):
# type: (Any, str) -> str # type: (Any, str) -> str
results = [] results = []
for entry in entries: for entry in entries:
if isinstance(entry, list): if isinstance(entry, list):
results.append(u'[%s]' % convert(entry, u'-')) results.append('[%s]' % convert(entry, '-'))
else: else:
results.append(entry) results.append(entry)
return splitter.join(results) return splitter.join(results)
start_chars_regex = convert(name_start_chars) start_chars_regex = convert(name_start_chars)
name_chars_regex = convert(name_chars) name_chars_regex = convert(name_chars)
return re.compile(u'(%s)(%s|%s)*' % ( return re.compile('(%s)(%s|%s)*' % (
start_chars_regex, start_chars_regex, name_chars_regex)) start_chars_regex, start_chars_regex, name_chars_regex))

View File

@ -109,8 +109,8 @@ def parse_data_uri(uri):
return None return None
# data:[<MIME-type>][;charset=<encoding>][;base64],<data> # data:[<MIME-type>][;charset=<encoding>][;base64],<data>
mimetype = u'text/plain' mimetype = 'text/plain'
charset = u'US-ASCII' charset = 'US-ASCII'
properties, data = uri[5:].split(',', 1) properties, data = uri[5:].split(',', 1)
for prop in properties.split(';'): for prop in properties.split(';'):

View File

@ -146,7 +146,7 @@ class InventoryFile:
# for Python modules, and the first # for Python modules, and the first
# one is correct # one is correct
continue continue
if location.endswith(u'$'): if location.endswith('$'):
location = location[:-1] + name location = location[:-1] + name
location = join(uri, location) location = join(uri, location)
invdata.setdefault(type, {})[name] = (projname, version, invdata.setdefault(type, {})[name] = (projname, version,
@ -162,10 +162,10 @@ class InventoryFile:
with open(os.path.join(filename), 'wb') as f: with open(os.path.join(filename), 'wb') as f:
# header # header
f.write((u'# Sphinx inventory version 2\n' f.write(('# Sphinx inventory version 2\n'
u'# Project: %s\n' '# Project: %s\n'
u'# Version: %s\n' '# Version: %s\n'
u'# The remainder of this file is compressed using zlib.\n' % '# The remainder of this file is compressed using zlib.\n' %
(escape(env.config.project), (escape(env.config.project),
escape(env.config.version))).encode()) escape(env.config.version))).encode())
@ -181,8 +181,8 @@ class InventoryFile:
if anchor: if anchor:
uri += '#' + anchor uri += '#' + anchor
if dispname == name: if dispname == name:
dispname = u'-' dispname = '-'
entry = (u'%s %s:%s %s %s %s\n' % entry = ('%s %s:%s %s %s %s\n' %
(name, domainname, typ, prio, uri, dispname)) (name, domainname, typ, prio, uri, dispname))
f.write(compressor.compress(entry.encode())) f.write(compressor.compress(entry.encode()))
f.write(compressor.flush()) f.write(compressor.flush())

View File

@ -21,7 +21,7 @@ def get_node_equation_number(writer, node):
if writer.builder.config.math_numfig and writer.builder.config.numfig: if writer.builder.config.math_numfig and writer.builder.config.numfig:
figtype = 'displaymath' figtype = 'displaymath'
if writer.builder.name == 'singlehtml': if writer.builder.name == 'singlehtml':
key = u"%s/%s" % (writer.docnames[-1], figtype) key = "%s/%s" % (writer.docnames[-1], figtype)
else: else:
key = figtype key = figtype

View File

@ -37,93 +37,93 @@ if False: # For type annotation
from typing import Generator, Iterable, Tuple # NOQA from typing import Generator, Iterable, Tuple # NOQA
langquotes = {'af': u'“”‘’', langquotes = {'af': '“”‘’',
'af-x-altquot': u'„”‚’', 'af-x-altquot': '„”‚’',
'bg': u'„“‚‘', # Bulgarian, https://bg.wikipedia.org/wiki/Кавички 'bg': '„“‚‘', # Bulgarian, https://bg.wikipedia.org/wiki/Кавички
'ca': u'«»“”', 'ca': '«»“”',
'ca-x-altquot': u'“”‘’', 'ca-x-altquot': '“”‘’',
'cs': u'„“‚‘', 'cs': '„“‚‘',
'cs-x-altquot': u'»«›‹', 'cs-x-altquot': '»«›‹',
'da': u'»«›‹', 'da': '»«›‹',
'da-x-altquot': u'„“‚‘', 'da-x-altquot': '„“‚‘',
# 'da-x-altquot2': u'””’’', # 'da-x-altquot2': '””’’',
'de': u'„“‚‘', 'de': '„“‚‘',
'de-x-altquot': u'»«›‹', 'de-x-altquot': '»«›‹',
'de-ch': u'«»‹›', 'de-ch': '«»‹›',
'el': u'«»“”', 'el': '«»“”',
'en': u'“”‘’', 'en': '“”‘’',
'en-uk-x-altquot': u'‘’“”', # Attention: " → and ' → “ ! 'en-uk-x-altquot': '‘’“”', # Attention: " → and ' → “ !
'eo': u'“”‘’', 'eo': '“”‘’',
'es': u'«»“”', 'es': '«»“”',
'es-x-altquot': u'“”‘’', 'es-x-altquot': '“”‘’',
'et': u'„“‚‘', # no secondary quote listed in 'et': '„“‚‘', # no secondary quote listed in
'et-x-altquot': u'«»‹›', # the sources above (wikipedia.org) 'et-x-altquot': '«»‹›', # the sources above (wikipedia.org)
'eu': u'«»‹›', 'eu': '«»‹›',
'fi': u'””’’', 'fi': '””’’',
'fi-x-altquot': u'»»››', 'fi-x-altquot': '»»››',
'fr': (u'« ', u' »', u'', u''), # full no-break space 'fr': ('« ', ' »', '', ''), # full no-break space
'fr-x-altquot': (u'« ', u' »', u'', u''), # narrow no-break space 'fr-x-altquot': ('« ', ' »', '', ''), # narrow no-break space
'fr-ch': u'«»‹›', 'fr-ch': '«»‹›',
'fr-ch-x-altquot': (u'« ', u' »', u'', u''), # narrow no-break space 'fr-ch-x-altquot': ('« ', ' »', '', ''), # narrow no-break space
# http://typoguide.ch/ # http://typoguide.ch/
'gl': u'«»“”', 'gl': '«»“”',
'he': u'”“»«', # Hebrew is RTL, test position: 'he': '”“»«', # Hebrew is RTL, test position:
'he-x-altquot': u'„”‚’', # low quotation marks are opening. 'he-x-altquot': '„”‚’', # low quotation marks are opening.
# 'he-x-altquot': u'“„‘‚', # RTL: low quotation marks opening # 'he-x-altquot': '“„‘‚', # RTL: low quotation marks opening
'hr': u'„”‘’', # https://hrvatska-tipografija.com/polunavodnici/ 'hr': '„”‘’', # https://hrvatska-tipografija.com/polunavodnici/
'hr-x-altquot': u'»«›‹', 'hr-x-altquot': '»«›‹',
'hsb': u'„“‚‘', 'hsb': '„“‚‘',
'hsb-x-altquot': u'»«›‹', 'hsb-x-altquot': '»«›‹',
'hu': u'„”«»', 'hu': '„”«»',
'is': u'„“‚‘', 'is': '„“‚‘',
'it': u'«»“”', 'it': '«»“”',
'it-ch': u'«»‹›', 'it-ch': '«»‹›',
'it-x-altquot': u'“”‘’', 'it-x-altquot': '“”‘’',
# 'it-x-altquot2': u'“„‘‚', # [7] in headlines # 'it-x-altquot2': '“„‘‚', # [7] in headlines
'ja': u'「」『』', 'ja': '「」『』',
'lt': u'„“‚‘', 'lt': '„“‚‘',
'lv': u'„“‚‘', 'lv': '„“‚‘',
'mk': u'„“‚‘', # Macedonian, 'mk': '„“‚‘', # Macedonian,
# https://mk.wikipedia.org/wiki/Правопис_и_правоговораакедонскиот_јазик # https://mk.wikipedia.org/wiki/Правопис_и_правоговораакедонскиот_јазик
'nl': u'“”‘’', 'nl': '“”‘’',
'nl-x-altquot': u'„”‚’', 'nl-x-altquot': '„”‚’',
# 'nl-x-altquot2': u'””’’', # 'nl-x-altquot2': '””’’',
'nb': u'«»’’', # Norsk bokmål (canonical form 'no') 'nb': '«»’’', # Norsk bokmål (canonical form 'no')
'nn': u'«»’’', # Nynorsk [10] 'nn': '«»’’', # Nynorsk [10]
'nn-x-altquot': u'«»‘’', # [8], [10] 'nn-x-altquot': '«»‘’', # [8], [10]
# 'nn-x-altquot2': u'«»«»', # [9], [10] # 'nn-x-altquot2': '«»«»', # [9], [10]
# 'nn-x-altquot3': u'„“‚‘', # [10] # 'nn-x-altquot3': '„“‚‘', # [10]
'no': u'«»’’', # Norsk bokmål [10] 'no': '«»’’', # Norsk bokmål [10]
'no-x-altquot': u'«»‘’', # [8], [10] 'no-x-altquot': '«»‘’', # [8], [10]
# 'no-x-altquot2': u'«»«»', # [9], [10] # 'no-x-altquot2': '«»«»', # [9], [10]
# 'no-x-altquot3': u'„“‚‘', # [10] # 'no-x-altquot3': '„“‚‘', # [10]
'pl': u'„”«»', 'pl': '„”«»',
'pl-x-altquot': u'«»‚’', 'pl-x-altquot': '«»‚’',
# 'pl-x-altquot2': u'„”‚’', # 'pl-x-altquot2': '„”‚’',
# https://pl.wikipedia.org/wiki/Cudzys%C5%82%C3%B3w # https://pl.wikipedia.org/wiki/Cudzys%C5%82%C3%B3w
'pt': u'«»“”', 'pt': '«»“”',
'pt-br': u'“”‘’', 'pt-br': '“”‘’',
'ro': u'„”«»', 'ro': '„”«»',
'ru': u'«»„“', 'ru': '«»„“',
'sh': u'„”‚’', # Serbo-Croatian 'sh': '„”‚’', # Serbo-Croatian
'sh-x-altquot': u'»«›‹', 'sh-x-altquot': '»«›‹',
'sk': u'„“‚‘', # Slovak 'sk': '„“‚‘', # Slovak
'sk-x-altquot': u'»«›‹', 'sk-x-altquot': '»«›‹',
'sl': u'„“‚‘', # Slovenian 'sl': '„“‚‘', # Slovenian
'sl-x-altquot': u'»«›‹', 'sl-x-altquot': '»«›‹',
'sq': u'«»‹›', # Albanian 'sq': '«»‹›', # Albanian
'sq-x-altquot': u'“„‘‚', 'sq-x-altquot': '“„‘‚',
'sr': u'„”’’', 'sr': '„”’’',
'sr-x-altquot': u'»«›‹', 'sr-x-altquot': '»«›‹',
'sv': u'””’’', 'sv': '””’’',
'sv-x-altquot': u'»»››', 'sv-x-altquot': '»»››',
'tr': u'“”‘’', 'tr': '“”‘’',
'tr-x-altquot': u'«»‹›', 'tr-x-altquot': '«»‹›',
# 'tr-x-altquot2': u'“„‘‚', # [7] antiquated? # 'tr-x-altquot2': '“„‘‚', # [7] antiquated?
'uk': u'«»„“', 'uk': '«»„“',
'uk-x-altquot': u'„“‚‘', 'uk-x-altquot': '„“‚‘',
'zh-cn': u'“”‘’', 'zh-cn': '“”‘’',
'zh-tw': u'「」『』', 'zh-tw': '「」『』',
} }
@ -142,7 +142,7 @@ def educateQuotes(text, language='en'):
try: try:
apostrophe = smart.apostrophe apostrophe = smart.apostrophe
except Exception: except Exception:
apostrophe = u'' apostrophe = ''
# oldtext = text # oldtext = text
punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]""" punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""

View File

@ -84,7 +84,7 @@ class HTMLTranslator(SphinxTranslator, BaseTranslator):
self.permalink_text = self.config.html_add_permalinks self.permalink_text = self.config.html_add_permalinks
# support backwards-compatible setting to a bool # support backwards-compatible setting to a bool
if not isinstance(self.permalink_text, str): if not isinstance(self.permalink_text, str):
self.permalink_text = self.permalink_text and u'\u00B6' or '' self.permalink_text = self.permalink_text and '\u00B6' or ''
self.permalink_text = self.encode(self.permalink_text) self.permalink_text = self.encode(self.permalink_text)
self.secnumber_suffix = self.config.html_secnumber_suffix self.secnumber_suffix = self.config.html_secnumber_suffix
self.param_separator = '' self.param_separator = ''
@ -326,7 +326,7 @@ class HTMLTranslator(SphinxTranslator, BaseTranslator):
def append_fignumber(figtype, figure_id): def append_fignumber(figtype, figure_id):
# type: (str, str) -> None # type: (str, str) -> None
if self.builder.name == 'singlehtml': if self.builder.name == 'singlehtml':
key = u"%s/%s" % (self.docnames[-1], figtype) key = "%s/%s" % (self.docnames[-1], figtype)
else: else:
key = figtype key = figtype
@ -352,7 +352,7 @@ class HTMLTranslator(SphinxTranslator, BaseTranslator):
def add_permalink_ref(self, node, title): def add_permalink_ref(self, node, title):
# type: (nodes.Element, str) -> None # type: (nodes.Element, str) -> None
if node['ids'] and self.permalink_text and self.builder.add_permalinks: if node['ids'] and self.permalink_text and self.builder.add_permalinks:
format = u'<a class="headerlink" href="#%s" title="%s">%s</a>' format = '<a class="headerlink" href="#%s" title="%s">%s</a>'
self.body.append(format % (node['ids'][0], title, self.permalink_text)) self.body.append(format % (node['ids'][0], title, self.permalink_text))
def generate_targets_for_listing(self, node): def generate_targets_for_listing(self, node):
@ -403,9 +403,9 @@ class HTMLTranslator(SphinxTranslator, BaseTranslator):
if close_tag.startswith('</h'): if close_tag.startswith('</h'):
self.add_permalink_ref(node.parent, _('Permalink to this headline')) self.add_permalink_ref(node.parent, _('Permalink to this headline'))
elif close_tag.startswith('</a></h'): elif close_tag.startswith('</a></h'):
self.body.append(u'</a><a class="headerlink" href="#%s" ' % self.body.append('</a><a class="headerlink" href="#%s" ' %
node.parent['ids'][0] + node.parent['ids'][0] +
u'title="%s">%s' % ( 'title="%s">%s' % (
_('Permalink to this headline'), _('Permalink to this headline'),
self.permalink_text)) self.permalink_text))
elif isinstance(node.parent, nodes.table): elif isinstance(node.parent, nodes.table):

View File

@ -54,7 +54,7 @@ class HTML5Translator(SphinxTranslator, BaseTranslator):
self.permalink_text = self.config.html_add_permalinks self.permalink_text = self.config.html_add_permalinks
# support backwards-compatible setting to a bool # support backwards-compatible setting to a bool
if not isinstance(self.permalink_text, str): if not isinstance(self.permalink_text, str):
self.permalink_text = self.permalink_text and u'\u00B6' or '' self.permalink_text = self.permalink_text and '\u00B6' or ''
self.permalink_text = self.encode(self.permalink_text) self.permalink_text = self.encode(self.permalink_text)
self.secnumber_suffix = self.config.html_secnumber_suffix self.secnumber_suffix = self.config.html_secnumber_suffix
self.param_separator = '' self.param_separator = ''
@ -294,7 +294,7 @@ class HTML5Translator(SphinxTranslator, BaseTranslator):
def append_fignumber(figtype, figure_id): def append_fignumber(figtype, figure_id):
# type: (str, str) -> None # type: (str, str) -> None
if self.builder.name == 'singlehtml': if self.builder.name == 'singlehtml':
key = u"%s/%s" % (self.docnames[-1], figtype) key = "%s/%s" % (self.docnames[-1], figtype)
else: else:
key = figtype key = figtype
@ -320,7 +320,7 @@ class HTML5Translator(SphinxTranslator, BaseTranslator):
def add_permalink_ref(self, node, title): def add_permalink_ref(self, node, title):
# type: (nodes.Element, str) -> None # type: (nodes.Element, str) -> None
if node['ids'] and self.permalink_text and self.builder.add_permalinks: if node['ids'] and self.permalink_text and self.builder.add_permalinks:
format = u'<a class="headerlink" href="#%s" title="%s">%s</a>' format = '<a class="headerlink" href="#%s" title="%s">%s</a>'
self.body.append(format % (node['ids'][0], title, self.permalink_text)) self.body.append(format % (node['ids'][0], title, self.permalink_text))
# overwritten # overwritten
@ -349,9 +349,9 @@ class HTML5Translator(SphinxTranslator, BaseTranslator):
if close_tag.startswith('</h'): if close_tag.startswith('</h'):
self.add_permalink_ref(node.parent, _('Permalink to this headline')) self.add_permalink_ref(node.parent, _('Permalink to this headline'))
elif close_tag.startswith('</a></h'): elif close_tag.startswith('</a></h'):
self.body.append(u'</a><a class="headerlink" href="#%s" ' % self.body.append('</a><a class="headerlink" href="#%s" ' %
node.parent['ids'][0] + node.parent['ids'][0] +
u'title="%s">%s' % ( 'title="%s">%s' % (
_('Permalink to this headline'), _('Permalink to this headline'),
self.permalink_text)) self.permalink_text))
elif isinstance(node.parent, nodes.table): elif isinstance(node.parent, nodes.table):
@ -775,7 +775,7 @@ class HTML5Translator(SphinxTranslator, BaseTranslator):
self._table_row_index = 0 self._table_row_index = 0
classes = [cls.strip(u' \t\n') for cls in self.settings.table_style.split(',')] classes = [cls.strip(' \t\n') for cls in self.settings.table_style.split(',')]
classes.insert(0, "docutils") # compat classes.insert(0, "docutils") # compat
if 'align' in node: if 'align' in node:
classes.append('align-%s' % node['align']) classes.append('align-%s' % node['align'])

View File

@ -754,7 +754,7 @@ class LaTeXTranslator(SphinxTranslator):
def astext(self): def astext(self):
# type: () -> str # type: () -> str
self.elements.update({ self.elements.update({
'body': u''.join(self.body), 'body': ''.join(self.body),
'indices': self.generate_indices() 'indices': self.generate_indices()
}) })
return self.render('latex.tex_t', self.elements) return self.render('latex.tex_t', self.elements)
@ -876,7 +876,7 @@ class LaTeXTranslator(SphinxTranslator):
self.builder.docnames) self.builder.docnames)
if not content: if not content:
continue continue
ret.append(u'\\renewcommand{\\indexname}{%s}\n' % ret.append('\\renewcommand{\\indexname}{%s}\n' %
indexcls.localname) indexcls.localname)
generate(content, collapsed) generate(content, collapsed)
@ -900,7 +900,7 @@ class LaTeXTranslator(SphinxTranslator):
self.first_document = 0 self.first_document = 0
elif self.first_document == 0: elif self.first_document == 0:
# ... and all others are the appendices # ... and all others are the appendices
self.body.append(u'\n\\appendix\n') self.body.append('\n\\appendix\n')
self.first_document = -1 self.first_document = -1
if 'docname' in node: if 'docname' in node:
self.body.append(self.hypertarget(':doc')) self.body.append(self.hypertarget(':doc'))
@ -1032,7 +1032,7 @@ class LaTeXTranslator(SphinxTranslator):
short = '' short = ''
if node.traverse(nodes.image): if node.traverse(nodes.image):
short = ('[%s]' % short = ('[%s]' %
u' '.join(clean_astext(node).split()).translate(tex_escape_map)) ' '.join(clean_astext(node).split()).translate(tex_escape_map))
try: try:
self.body.append(r'\%s%s{' % (self.sectionnames[self.sectionlevel], short)) self.body.append(r'\%s%s{' % (self.sectionnames[self.sectionlevel], short))
@ -1221,7 +1221,7 @@ class LaTeXTranslator(SphinxTranslator):
def visit_seealso(self, node): def visit_seealso(self, node):
# type: (nodes.Element) -> None # type: (nodes.Element) -> None
self.body.append(u'\n\n\\sphinxstrong{%s:}\n\n' % admonitionlabels['seealso']) self.body.append('\n\n\\sphinxstrong{%s:}\n\n' % admonitionlabels['seealso'])
def depart_seealso(self, node): def depart_seealso(self, node):
# type: (nodes.Element) -> None # type: (nodes.Element) -> None
@ -1813,7 +1813,7 @@ class LaTeXTranslator(SphinxTranslator):
def _visit_named_admonition(self, node): def _visit_named_admonition(self, node):
# type: (nodes.Element) -> None # type: (nodes.Element) -> None
label = admonitionlabels[node.tagname] label = admonitionlabels[node.tagname]
self.body.append(u'\n\\begin{sphinxadmonition}{%s}{%s:}' % self.body.append('\n\\begin{sphinxadmonition}{%s}{%s:}' %
(node.tagname, label)) (node.tagname, label))
def _depart_named_admonition(self, node): def _depart_named_admonition(self, node):
@ -2155,18 +2155,18 @@ class LaTeXTranslator(SphinxTranslator):
# adjust max width of citation labels not to break the layout # adjust max width of citation labels not to break the layout
longest_label = longest_label[:MAX_CITATION_LABEL_LENGTH] longest_label = longest_label[:MAX_CITATION_LABEL_LENGTH]
self.body.append(u'\n\\begin{sphinxthebibliography}{%s}\n' % self.body.append('\n\\begin{sphinxthebibliography}{%s}\n' %
self.encode(longest_label)) self.encode(longest_label))
def depart_thebibliography(self, node): def depart_thebibliography(self, node):
# type: (thebibliography) -> None # type: (thebibliography) -> None
self.body.append(u'\\end{sphinxthebibliography}\n') self.body.append('\\end{sphinxthebibliography}\n')
def visit_citation(self, node): def visit_citation(self, node):
# type: (nodes.Element) -> None # type: (nodes.Element) -> None
label = cast(nodes.label, node[0]) label = cast(nodes.label, node[0])
self.body.append(u'\\bibitem[%s]{%s:%s}' % (self.encode(label.astext()), self.body.append('\\bibitem[%s]{%s:%s}' % (self.encode(label.astext()),
node['docname'], node['ids'][0])) node['docname'], node['ids'][0]))
def depart_citation(self, node): def depart_citation(self, node):
# type: (nodes.Element) -> None # type: (nodes.Element) -> None
@ -2256,7 +2256,7 @@ class LaTeXTranslator(SphinxTranslator):
location=(self.curfilestack[-1], node.line), **highlight_args location=(self.curfilestack[-1], node.line), **highlight_args
) )
# workaround for Unicode issue # workaround for Unicode issue
hlcode = hlcode.replace(u'', u'@texteuro[]') hlcode = hlcode.replace('', '@texteuro[]')
if self.in_footnote: if self.in_footnote:
self.body.append('\n\\sphinxSetupCodeBlockInFootnote') self.body.append('\n\\sphinxSetupCodeBlockInFootnote')
hlcode = hlcode.replace('\\begin{Verbatim}', hlcode = hlcode.replace('\\begin{Verbatim}',
@ -2498,10 +2498,10 @@ class LaTeXTranslator(SphinxTranslator):
if self.literal_whitespace: if self.literal_whitespace:
# Insert a blank before the newline, to avoid # Insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end. # ! LaTeX Error: There's no line here to end.
text = text.replace(u'\n', u'~\\\\\n').replace(u' ', u'~') text = text.replace('\n', '~\\\\\n').replace(' ', '~')
if self.no_contractions: if self.no_contractions:
text = text.replace('--', u'-{-}') text = text.replace('--', '-{-}')
text = text.replace("''", u"'{'}") text = text.replace("''", "'{'}")
return text return text
def encode_uri(self, text): def encode_uri(self, text):
@ -2670,7 +2670,7 @@ class LaTeXTranslator(SphinxTranslator):
def visit_admonition(self, node): def visit_admonition(self, node):
# type: (nodes.Element) -> None # type: (nodes.Element) -> None
self.body.append(u'\n\\begin{sphinxadmonition}{%s}{%s:}' % self.body.append('\n\\begin{sphinxadmonition}{%s}{%s:}' %
(name, admonitionlabels[name])) (name, admonitionlabels[name]))
return visit_admonition return visit_admonition

View File

@ -1206,12 +1206,12 @@ class TexinfoTranslator(SphinxTranslator):
if not name: if not name:
title = cast(nodes.title, node[0]) title = cast(nodes.title, node[0])
name = self.escape(title.astext()) name = self.escape(title.astext())
self.body.append(u'\n@cartouche\n@quotation %s ' % name) self.body.append('\n@cartouche\n@quotation %s ' % name)
def _visit_named_admonition(self, node): def _visit_named_admonition(self, node):
# type: (nodes.Element) -> None # type: (nodes.Element) -> None
label = admonitionlabels[node.tagname] label = admonitionlabels[node.tagname]
self.body.append(u'\n@cartouche\n@quotation %s ' % label) self.body.append('\n@cartouche\n@quotation %s ' % label)
def depart_admonition(self, node): def depart_admonition(self, node):
# type: (nodes.Element) -> None # type: (nodes.Element) -> None
@ -1514,7 +1514,7 @@ class TexinfoTranslator(SphinxTranslator):
def visit_seealso(self, node): def visit_seealso(self, node):
# type: (nodes.Element) -> None # type: (nodes.Element) -> None
self.body.append(u'\n\n@subsubheading %s\n\n' % self.body.append('\n\n@subsubheading %s\n\n' %
admonitionlabels['seealso']) admonitionlabels['seealso'])
def depart_seealso(self, node): def depart_seealso(self, node):
@ -1631,7 +1631,7 @@ class TexinfoTranslator(SphinxTranslator):
self.first_param = 0 self.first_param = 0
text = self.escape(node.astext()) text = self.escape(node.astext())
# replace no-break spaces with normal ones # replace no-break spaces with normal ones
text = text.replace(u' ', '@w{ }') text = text.replace(' ', '@w{ }')
self.body.append(text) self.body.append(text)
raise nodes.SkipNode raise nodes.SkipNode

View File

@ -1,2 +1,2 @@
copyright = u'2006-2009, Author' copyright = '2006-2009, Author'

View File

@ -111,7 +111,7 @@ class Class(Base):
"""should likewise be documented -- süß""" """should likewise be documented -- süß"""
udocattr = 'quux' udocattr = 'quux'
u"""should be documented as well - süß""" """should be documented as well - süß"""
# initialized to any class imported from another module # initialized to any class imported from another module
mdocattr = StringIO() mdocattr = StringIO()

View File

@ -101,7 +101,7 @@ class Class(Base):
"""should likewise be documented -- süß""" """should likewise be documented -- süß"""
udocattr = 'quux' udocattr = 'quux'
u"""should be documented as well - süß""" """should be documented as well - süß"""
# initialized to any class imported from another module # initialized to any class imported from another module
mdocattr = StringIO() mdocattr = StringIO()

View File

@ -42,7 +42,7 @@ def test_events(app, status, warning):
def test_emit_with_nonascii_name_node(app, status, warning): def test_emit_with_nonascii_name_node(app, status, warning):
node = nodes.section(names=[u'\u65e5\u672c\u8a9e']) node = nodes.section(names=['\u65e5\u672c\u8a9e'])
app.emit('my_event', node) app.emit('my_event', node)

View File

@ -325,12 +325,12 @@ def test_get_doc():
# charset guessing (this module is encoded in utf-8) # charset guessing (this module is encoded in utf-8)
def f(): def f():
"""Döcstring""" """Döcstring"""
assert getdocl('function', f) == [u'Döcstring'] assert getdocl('function', f) == ['Döcstring']
# already-unicode docstrings must be taken literally # already-unicode docstrings must be taken literally
def f(): def f():
u"""Döcstring""" """Döcstring"""
assert getdocl('function', f) == [u'Döcstring'] assert getdocl('function', f) == ['Döcstring']
# class docstring: depends on config value which one is taken # class docstring: depends on config value which one is taken
class C: class C:
@ -1031,13 +1031,13 @@ def test_autodoc_module_scope(app):
app.env.temp_data['autodoc:module'] = 'target' app.env.temp_data['autodoc:module'] = 'target'
actual = do_autodoc(app, 'attribute', 'Class.mdocattr') actual = do_autodoc(app, 'attribute', 'Class.mdocattr')
assert list(actual) == [ assert list(actual) == [
u'', '',
u'.. py:attribute:: Class.mdocattr', '.. py:attribute:: Class.mdocattr',
u' :module: target', ' :module: target',
u' :annotation: = <_io.StringIO object>', ' :annotation: = <_io.StringIO object>',
u'', '',
u' should be documented as well - süß', ' should be documented as well - süß',
u' ' ' '
] ]
@ -1047,13 +1047,13 @@ def test_autodoc_class_scope(app):
app.env.temp_data['autodoc:class'] = 'Class' app.env.temp_data['autodoc:class'] = 'Class'
actual = do_autodoc(app, 'attribute', 'mdocattr') actual = do_autodoc(app, 'attribute', 'mdocattr')
assert list(actual) == [ assert list(actual) == [
u'', '',
u'.. py:attribute:: Class.mdocattr', '.. py:attribute:: Class.mdocattr',
u' :module: target', ' :module: target',
u' :annotation: = <_io.StringIO object>', ' :annotation: = <_io.StringIO object>',
u'', '',
u' should be documented as well - süß', ' should be documented as well - süß',
u' ' ' '
] ]
@ -1102,43 +1102,43 @@ def test_autodoc_docstring_signature(app):
app.config.autodoc_docstring_signature = False app.config.autodoc_docstring_signature = False
actual = do_autodoc(app, 'class', 'target.DocstringSig', options) actual = do_autodoc(app, 'class', 'target.DocstringSig', options)
assert list(actual) == [ assert list(actual) == [
u'', '',
u'.. py:class:: DocstringSig', '.. py:class:: DocstringSig',
u' :module: target', ' :module: target',
u'', '',
u' ', ' ',
u' .. py:method:: DocstringSig.meth()', ' .. py:method:: DocstringSig.meth()',
u' :module: target', ' :module: target',
u' ', ' ',
u' meth(FOO, BAR=1) -> BAZ', ' meth(FOO, BAR=1) -> BAZ',
u' First line of docstring', ' First line of docstring',
u' ', ' ',
u' rest of docstring', ' rest of docstring',
u' ', ' ',
u' ', ' ',
u' ', ' ',
u' .. py:method:: DocstringSig.meth2()', ' .. py:method:: DocstringSig.meth2()',
u' :module: target', ' :module: target',
u' ', ' ',
u' First line, no signature', ' First line, no signature',
u' Second line followed by indentation::', ' Second line followed by indentation::',
u' ', ' ',
u' indented line', ' indented line',
u' ', ' ',
u' ', ' ',
u' .. py:attribute:: DocstringSig.prop1', ' .. py:attribute:: DocstringSig.prop1',
u' :module: target', ' :module: target',
u' ', ' ',
u' DocstringSig.prop1(self)', ' DocstringSig.prop1(self)',
u' First line of docstring', ' First line of docstring',
u' ', ' ',
u' ', ' ',
u' .. py:attribute:: DocstringSig.prop2', ' .. py:attribute:: DocstringSig.prop2',
u' :module: target', ' :module: target',
u' ', ' ',
u' First line of docstring', ' First line of docstring',
u' Second line of docstring', ' Second line of docstring',
u' ' ' '
] ]

View File

@ -31,7 +31,7 @@ def request_session_head(url, **kwargs):
@pytest.fixture @pytest.fixture
def nonascii_srcdir(request, rootdir, sphinx_test_tempdir): def nonascii_srcdir(request, rootdir, sphinx_test_tempdir):
# If supported, build in a non-ASCII source dir # If supported, build in a non-ASCII source dir
test_name = u'\u65e5\u672c\u8a9e' test_name = '\u65e5\u672c\u8a9e'
basedir = sphinx_test_tempdir / request.node.originalname basedir = sphinx_test_tempdir / request.node.originalname
try: try:
srcdir = basedir / test_name srcdir = basedir / test_name
@ -47,7 +47,7 @@ def nonascii_srcdir(request, rootdir, sphinx_test_tempdir):
""")) """))
master_doc = srcdir / 'index.txt' master_doc = srcdir / 'index.txt'
master_doc.write_text(master_doc.text() + dedent(u""" master_doc.write_text(master_doc.text() + dedent("""
.. toctree:: .. toctree::
%(test_name)s/%(test_name)s %(test_name)s/%(test_name)s

View File

@ -189,7 +189,7 @@ def test_nested_toc(app):
navpoints = toc.findall("./ncx:navMap/ncx:navPoint") navpoints = toc.findall("./ncx:navMap/ncx:navPoint")
assert len(navpoints) == 4 assert len(navpoints) == 4
assert navinfo(navpoints[0]) == ('navPoint1', '1', 'index.xhtml', assert navinfo(navpoints[0]) == ('navPoint1', '1', 'index.xhtml',
u"Welcome to Sphinx Testss documentation!") "Welcome to Sphinx Testss documentation!")
assert navpoints[0].findall("./ncx:navPoint") == [] assert navpoints[0].findall("./ncx:navPoint") == []
# toc.ncx / nested navPoints # toc.ncx / nested navPoints
@ -210,7 +210,7 @@ def test_nested_toc(app):
toc = nav.findall("./xhtml:body/xhtml:nav/xhtml:ol/xhtml:li") toc = nav.findall("./xhtml:body/xhtml:nav/xhtml:ol/xhtml:li")
assert len(toc) == 4 assert len(toc) == 4
assert navinfo(toc[0]) == ('index.xhtml', assert navinfo(toc[0]) == ('index.xhtml',
u"Welcome to Sphinx Testss documentation!") "Welcome to Sphinx Testss documentation!")
assert toc[0].findall("./xhtml:ol") == [] assert toc[0].findall("./xhtml:ol") == []
# nav.xhtml / nested toc # nav.xhtml / nested toc
@ -245,7 +245,7 @@ def test_escaped_toc(app):
navpoints = toc.findall("./ncx:navMap/ncx:navPoint") navpoints = toc.findall("./ncx:navMap/ncx:navPoint")
assert len(navpoints) == 4 assert len(navpoints) == 4
assert navinfo(navpoints[0]) == ('navPoint1', '1', 'index.xhtml', assert navinfo(navpoints[0]) == ('navPoint1', '1', 'index.xhtml',
u"Welcome to Sphinx Tests's documentation!") "Welcome to Sphinx Tests's documentation!")
assert navpoints[0].findall("./ncx:navPoint") == [] assert navpoints[0].findall("./ncx:navPoint") == []
# toc.ncx / nested navPoints # toc.ncx / nested navPoints
@ -254,7 +254,7 @@ def test_escaped_toc(app):
assert len(navchildren) == 4 assert len(navchildren) == 4
assert navinfo(navchildren[0]) == ('navPoint3', '2', 'foo.xhtml', '<foo>') assert navinfo(navchildren[0]) == ('navPoint3', '2', 'foo.xhtml', '<foo>')
assert navinfo(navchildren[1]) == ('navPoint4', '3', 'quux.xhtml', 'quux') assert navinfo(navchildren[1]) == ('navPoint4', '3', 'quux.xhtml', 'quux')
assert navinfo(navchildren[2]) == ('navPoint5', '4', 'foo.xhtml#foo-1', u'foo “1”') assert navinfo(navchildren[2]) == ('navPoint5', '4', 'foo.xhtml#foo-1', 'foo “1”')
assert navinfo(navchildren[3]) == ('navPoint8', '6', 'foo.xhtml#foo-2', 'foo.2') assert navinfo(navchildren[3]) == ('navPoint8', '6', 'foo.xhtml#foo-2', 'foo.2')
# nav.xhtml / nav # nav.xhtml / nav
@ -274,7 +274,7 @@ def test_escaped_toc(app):
tocchildren = toc[1].findall("./xhtml:ol/xhtml:li") tocchildren = toc[1].findall("./xhtml:ol/xhtml:li")
assert len(tocchildren) == 3 assert len(tocchildren) == 3
assert navinfo(tocchildren[0]) == ('quux.xhtml', 'quux') assert navinfo(tocchildren[0]) == ('quux.xhtml', 'quux')
assert navinfo(tocchildren[1]) == ('foo.xhtml#foo-1', u'foo “1”') assert navinfo(tocchildren[1]) == ('foo.xhtml#foo-1', 'foo “1”')
assert navinfo(tocchildren[2]) == ('foo.xhtml#foo-2', 'foo.2') assert navinfo(tocchildren[2]) == ('foo.xhtml#foo-2', 'foo.2')
grandchild = tocchildren[1].findall("./xhtml:ol/xhtml:li") grandchild = tocchildren[1].findall("./xhtml:ol/xhtml:li")

View File

@ -71,7 +71,7 @@ def test_msgfmt(app):
assert mo.isfile(), 'msgfmt failed' assert mo.isfile(), 'msgfmt failed'
_ = gettext.translation('test_root', app.outdir, languages=['en']).gettext _ = gettext.translation('test_root', app.outdir, languages=['en']).gettext
assert _("Testing various markup") == u"Testing various markup" assert _("Testing various markup") == "Testing various markup"
@pytest.mark.sphinx( @pytest.mark.sphinx(

View File

@ -153,11 +153,11 @@ def test_html_warnings(app, warning):
(".//pre/span", 'line 2'), (".//pre/span", 'line 2'),
], ],
'includes.html': [ 'includes.html': [
(".//pre", u'Max Strauß'), (".//pre", 'Max Strauß'),
(".//a[@class='reference download internal']", ''), (".//a[@class='reference download internal']", ''),
(".//pre/span", u'"quotes"'), (".//pre/span", '"quotes"'),
(".//pre/span", u"'included'"), (".//pre/span", "'included'"),
(".//pre/span[@class='s2']", u'üöä'), (".//pre/span[@class='s2']", 'üöä'),
(".//div[@class='inc-pyobj1 highlight-text notranslate']//pre", (".//div[@class='inc-pyobj1 highlight-text notranslate']//pre",
r'^class Foo:\n pass\n\s*$'), r'^class Foo:\n pass\n\s*$'),
(".//div[@class='inc-pyobj2 highlight-text notranslate']//pre", (".//div[@class='inc-pyobj2 highlight-text notranslate']//pre",
@ -165,7 +165,7 @@ def test_html_warnings(app, warning):
(".//div[@class='inc-lines highlight-text notranslate']//pre", (".//div[@class='inc-lines highlight-text notranslate']//pre",
r'^class Foo:\n pass\nclass Bar:\n$'), r'^class Foo:\n pass\nclass Bar:\n$'),
(".//div[@class='inc-startend highlight-text notranslate']//pre", (".//div[@class='inc-startend highlight-text notranslate']//pre",
u'^foo = "Including Unicode characters: üöä"\\n$'), '^foo = "Including Unicode characters: üöä"\\n$'),
(".//div[@class='inc-preappend highlight-text notranslate']//pre", (".//div[@class='inc-preappend highlight-text notranslate']//pre",
r'(?m)^START CODE$'), r'(?m)^START CODE$'),
(".//div[@class='inc-pyobj-dedent highlight-python notranslate']//span", (".//div[@class='inc-pyobj-dedent highlight-python notranslate']//span",
@ -205,7 +205,7 @@ def test_html_warnings(app, warning):
(".//li/strong", r'^program\\n$'), (".//li/strong", r'^program\\n$'),
(".//li/em", r'^dfn\\n$'), (".//li/em", r'^dfn\\n$'),
(".//li/kbd", r'^kbd\\n$'), (".//li/kbd", r'^kbd\\n$'),
(".//li/span", u'File \N{TRIANGULAR BULLET} Close'), (".//li/span", 'File \N{TRIANGULAR BULLET} Close'),
(".//li/code/span[@class='pre']", '^a/$'), (".//li/code/span[@class='pre']", '^a/$'),
(".//li/code/em/span[@class='pre']", '^varpart$'), (".//li/code/em/span[@class='pre']", '^varpart$'),
(".//li/code/em/span[@class='pre']", '^i$'), (".//li/code/em/span[@class='pre']", '^i$'),
@ -266,12 +266,12 @@ def test_html_warnings(app, warning):
# tests for numeric labels # tests for numeric labels
(".//a[@href='#id1'][@class='reference internal']/span", 'Testing various markup'), (".//a[@href='#id1'][@class='reference internal']/span", 'Testing various markup'),
# tests for smartypants # tests for smartypants
(".//li", u'Smart “quotes” in English text.'), (".//li", 'Smart “quotes” in English text.'),
(".//li", u'Smart — long and short dashes.'), (".//li", 'Smart — long and short dashes.'),
(".//li", u'Ellipsis…'), (".//li", 'Ellipsis…'),
(".//li//code//span[@class='pre']", 'foo--"bar"...'), (".//li//code//span[@class='pre']", 'foo--"bar"...'),
(".//p", u'Этот «абзац» должен использовать „русские“ кавычки.'), (".//p", 'Этот «абзац» должен использовать „русские“ кавычки.'),
(".//p", u'Il dit : « Cest “super” ! »'), (".//p", 'Il dit : « Cest “super” ! »'),
], ],
'objects.html': [ 'objects.html': [
(".//dt[@id='mod.Cls.meth1']", ''), (".//dt[@id='mod.Cls.meth1']", ''),

View File

@ -67,11 +67,11 @@ def cached_etree_parse():
(".//pre/span", 'line 2'), (".//pre/span", 'line 2'),
], ],
'includes.html': [ 'includes.html': [
(".//pre", u'Max Strauß'), (".//pre", 'Max Strauß'),
(".//a[@class='reference download internal']", ''), (".//a[@class='reference download internal']", ''),
(".//pre/span", u'"quotes"'), (".//pre/span", '"quotes"'),
(".//pre/span", u"'included'"), (".//pre/span", "'included'"),
(".//pre/span[@class='s2']", u'üöä'), (".//pre/span[@class='s2']", 'üöä'),
(".//div[@class='inc-pyobj1 highlight-text notranslate']//pre", (".//div[@class='inc-pyobj1 highlight-text notranslate']//pre",
r'^class Foo:\n pass\n\s*$'), r'^class Foo:\n pass\n\s*$'),
(".//div[@class='inc-pyobj2 highlight-text notranslate']//pre", (".//div[@class='inc-pyobj2 highlight-text notranslate']//pre",
@ -79,7 +79,7 @@ def cached_etree_parse():
(".//div[@class='inc-lines highlight-text notranslate']//pre", (".//div[@class='inc-lines highlight-text notranslate']//pre",
r'^class Foo:\n pass\nclass Bar:\n$'), r'^class Foo:\n pass\nclass Bar:\n$'),
(".//div[@class='inc-startend highlight-text notranslate']//pre", (".//div[@class='inc-startend highlight-text notranslate']//pre",
u'^foo = "Including Unicode characters: üöä"\\n$'), '^foo = "Including Unicode characters: üöä"\\n$'),
(".//div[@class='inc-preappend highlight-text notranslate']//pre", (".//div[@class='inc-preappend highlight-text notranslate']//pre",
r'(?m)^START CODE$'), r'(?m)^START CODE$'),
(".//div[@class='inc-pyobj-dedent highlight-python notranslate']//span", (".//div[@class='inc-pyobj-dedent highlight-python notranslate']//span",
@ -119,7 +119,7 @@ def cached_etree_parse():
(".//li/p/strong", r'^program\\n$'), (".//li/p/strong", r'^program\\n$'),
(".//li/p/em", r'^dfn\\n$'), (".//li/p/em", r'^dfn\\n$'),
(".//li/p/kbd", r'^kbd\\n$'), (".//li/p/kbd", r'^kbd\\n$'),
(".//li/p/span", u'File \N{TRIANGULAR BULLET} Close'), (".//li/p/span", 'File \N{TRIANGULAR BULLET} Close'),
(".//li/p/code/span[@class='pre']", '^a/$'), (".//li/p/code/span[@class='pre']", '^a/$'),
(".//li/p/code/em/span[@class='pre']", '^varpart$'), (".//li/p/code/em/span[@class='pre']", '^varpart$'),
(".//li/p/code/em/span[@class='pre']", '^i$'), (".//li/p/code/em/span[@class='pre']", '^i$'),

View File

@ -27,6 +27,6 @@ def test_htmlhelp_file_suffix(app, warning):
def test_default_htmlhelp_basename(): def test_default_htmlhelp_basename():
config = Config({'project': u'Sphinx Documentation'}) config = Config({'project': 'Sphinx Documentation'})
config.init_values() config.init_values()
assert default_htmlhelp_basename(config) == 'sphinxdoc' assert default_htmlhelp_basename(config) == 'sphinxdoc'

View File

@ -321,25 +321,25 @@ def test_numref_with_language_ja(app, status, warning):
print(result) print(result)
print(status.getvalue()) print(status.getvalue())
print(warning.getvalue()) print(warning.getvalue())
assert u'\\renewcommand{\\figurename}{\u56f3}' in result # 図 assert '\\renewcommand{\\figurename}{\u56f3}' in result # 図
assert u'\\renewcommand{\\tablename}{\u8868}' in result # 表 assert '\\renewcommand{\\tablename}{\u8868}' in result # 表
assert u'\\renewcommand{\\literalblockname}{\u30ea\u30b9\u30c8}' in result # リスト assert '\\renewcommand{\\literalblockname}{\u30ea\u30b9\u30c8}' in result # リスト
assert (u'\\hyperref[\\detokenize{index:fig1}]' assert ('\\hyperref[\\detokenize{index:fig1}]'
u'{\u56f3 \\ref{\\detokenize{index:fig1}}}') in result '{\u56f3 \\ref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{baz:fig22}]' assert ('\\hyperref[\\detokenize{baz:fig22}]'
'{Figure\\ref{\\detokenize{baz:fig22}}}') in result '{Figure\\ref{\\detokenize{baz:fig22}}}') in result
assert (u'\\hyperref[\\detokenize{index:table-1}]' assert ('\\hyperref[\\detokenize{index:table-1}]'
u'{\u8868 \\ref{\\detokenize{index:table-1}}}') in result '{\u8868 \\ref{\\detokenize{index:table-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:table22}]' assert ('\\hyperref[\\detokenize{baz:table22}]'
'{Table:\\ref{\\detokenize{baz:table22}}}') in result '{Table:\\ref{\\detokenize{baz:table22}}}') in result
assert (u'\\hyperref[\\detokenize{index:code-1}]' assert ('\\hyperref[\\detokenize{index:code-1}]'
u'{\u30ea\u30b9\u30c8 \\ref{\\detokenize{index:code-1}}}') in result '{\u30ea\u30b9\u30c8 \\ref{\\detokenize{index:code-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:code22}]' assert ('\\hyperref[\\detokenize{baz:code22}]'
'{Code-\\ref{\\detokenize{baz:code22}}}') in result '{Code-\\ref{\\detokenize{baz:code22}}}') in result
assert (u'\\hyperref[\\detokenize{foo:foo}]' assert ('\\hyperref[\\detokenize{foo:foo}]'
u'{\\ref{\\detokenize{foo:foo}} \u7ae0}') in result '{\\ref{\\detokenize{foo:foo}} \u7ae0}') in result
assert (u'\\hyperref[\\detokenize{bar:bar-a}]' assert ('\\hyperref[\\detokenize{bar:bar-a}]'
u'{\\ref{\\detokenize{bar:bar-a}} \u7ae0}') in result '{\\ref{\\detokenize{bar:bar-a}} \u7ae0}') in result
assert ('\\hyperref[\\detokenize{index:fig1}]{Fig.\\ref{\\detokenize{index:fig1}} ' assert ('\\hyperref[\\detokenize{index:fig1}]{Fig.\\ref{\\detokenize{index:fig1}} '
'\\nameref{\\detokenize{index:fig1}}}') in result '\\nameref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]{Sect.\\ref{\\detokenize{foo:foo}} ' assert ('\\hyperref[\\detokenize{foo:foo}]{Sect.\\ref{\\detokenize{foo:foo}} '
@ -1302,7 +1302,7 @@ def test_latex_glossary(app, status, warning):
app.builder.build_all() app.builder.build_all()
result = (app.outdir / 'test.tex').text(encoding='utf8') result = (app.outdir / 'test.tex').text(encoding='utf8')
assert (u'\\item[{änhlich\\index{änhlich@\\spxentry{änhlich}|spxpagem}' assert ('\\item[{änhlich\\index{änhlich@\\spxentry{änhlich}|spxpagem}'
r'\phantomsection' r'\phantomsection'
r'\label{\detokenize{index:term-anhlich}}}] \leavevmode' in result) r'\label{\detokenize{index:term-anhlich}}}] \leavevmode' in result)
assert (r'\item[{boson\index{boson@\spxentry{boson}|spxpagem}\phantomsection' assert (r'\item[{boson\index{boson@\spxentry{boson}|spxpagem}\phantomsection'
@ -1317,7 +1317,7 @@ def test_latex_glossary(app, status, warning):
r'\label{\detokenize{index:term-myon}}}] \leavevmode' r'\label{\detokenize{index:term-myon}}}] \leavevmode'
r'\item[{electron\index{electron@\spxentry{electron}|spxpagem}\phantomsection' r'\item[{electron\index{electron@\spxentry{electron}|spxpagem}\phantomsection'
r'\label{\detokenize{index:term-electron}}}] \leavevmode' in result) r'\label{\detokenize{index:term-electron}}}] \leavevmode' in result)
assert (u'\\item[{über\\index{über@\\spxentry{über}|spxpagem}\\phantomsection' assert ('\\item[{über\\index{über@\\spxentry{über}|spxpagem}\\phantomsection'
r'\label{\detokenize{index:term-uber}}}] \leavevmode' in result) r'\label{\detokenize{index:term-uber}}}] \leavevmode' in result)

View File

@ -33,10 +33,10 @@ def test_all(app, status, warning):
def test_default_man_pages(): def test_default_man_pages():
config = Config({'master_doc': 'index', config = Config({'master_doc': 'index',
'project': u'STASI™ Documentation', 'project': 'STASI™ Documentation',
'author': u"Wolfgang Schäuble & G'Beckstein", 'author': "Wolfgang Schäuble & G'Beckstein",
'release': '1.0'}) 'release': '1.0'})
config.init_values() config.init_values()
expected = [('index', 'stasi', u'STASI™ Documentation 1.0', expected = [('index', 'stasi', 'STASI™ Documentation 1.0',
[u"Wolfgang Schäuble & G'Beckstein"], 1)] ["Wolfgang Schäuble & G'Beckstein"], 1)]
assert default_man_pages(config) == expected assert default_man_pages(config) == expected

View File

@ -95,10 +95,10 @@ def test_texinfo_citation(app, status, warning):
def test_default_texinfo_documents(): def test_default_texinfo_documents():
config = Config({'master_doc': 'index', config = Config({'master_doc': 'index',
'project': u'STASI™ Documentation', 'project': 'STASI™ Documentation',
'author': u"Wolfgang Schäuble & G'Beckstein"}) 'author': "Wolfgang Schäuble & G'Beckstein"})
config.init_values() config.init_values()
expected = [('index', 'stasi', u'STASI™ Documentation', expected = [('index', 'stasi', 'STASI™ Documentation',
u"Wolfgang Schäuble & G'Beckstein", 'stasi', "Wolfgang Schäuble & G'Beckstein", 'stasi',
'One line description of project', 'Miscellaneous')] 'One line description of project', 'Miscellaneous')]
assert default_texinfo_documents(config) == expected assert default_texinfo_documents(config) == expected

View File

@ -50,12 +50,12 @@ def test_lineblock(app, status, warning):
app.builder.build_update() app.builder.build_update()
result = (app.outdir / 'lineblock.txt').text(encoding='utf-8') result = (app.outdir / 'lineblock.txt').text(encoding='utf-8')
expect = ( expect = (
u"* one\n" "* one\n"
u"\n" "\n"
u" line-block 1\n" " line-block 1\n"
u" line-block 2\n" " line-block 2\n"
u"\n" "\n"
u"followed paragraph.\n" "followed paragraph.\n"
) )
assert result == expect assert result == expect

View File

@ -121,24 +121,24 @@ def test_overrides():
@mock.patch("sphinx.config.logger") @mock.patch("sphinx.config.logger")
def test_errors_warnings(logger, tempdir): def test_errors_warnings(logger, tempdir):
# test the error for syntax errors in the config file # test the error for syntax errors in the config file
(tempdir / 'conf.py').write_text(u'project = \n', encoding='ascii') (tempdir / 'conf.py').write_text('project = \n', encoding='ascii')
with pytest.raises(ConfigError) as excinfo: with pytest.raises(ConfigError) as excinfo:
Config.read(tempdir, {}, None) Config.read(tempdir, {}, None)
assert 'conf.py' in str(excinfo.value) assert 'conf.py' in str(excinfo.value)
# test the automatic conversion of 2.x only code in configs # test the automatic conversion of 2.x only code in configs
(tempdir / 'conf.py').write_text( (tempdir / 'conf.py').write_text(
u'# -*- coding: utf-8\n\nproject = u"Jägermeister"\n', '# -*- coding: utf-8\n\nproject = u"Jägermeister"\n',
encoding='utf-8') encoding='utf-8')
cfg = Config.read(tempdir, {}, None) cfg = Config.read(tempdir, {}, None)
cfg.init_values() cfg.init_values()
assert cfg.project == u'Jägermeister' assert cfg.project == 'Jägermeister'
assert logger.called is False assert logger.called is False
def test_errors_if_setup_is_not_callable(tempdir, make_app): def test_errors_if_setup_is_not_callable(tempdir, make_app):
# test the error to call setup() in the config file # test the error to call setup() in the config file
(tempdir / 'conf.py').write_text(u'setup = 1') (tempdir / 'conf.py').write_text('setup = 1')
with pytest.raises(ConfigError) as excinfo: with pytest.raises(ConfigError) as excinfo:
make_app(srcdir=tempdir) make_app(srcdir=tempdir)
assert 'callable' in str(excinfo.value) assert 'callable' in str(excinfo.value)
@ -184,7 +184,7 @@ def test_config_eol(logger, tempdir):
configfile.write_bytes(b'project = "spam"' + eol) configfile.write_bytes(b'project = "spam"' + eol)
cfg = Config.read(tempdir, {}, None) cfg = Config.read(tempdir, {}, None)
cfg.init_values() cfg.init_values()
assert cfg.project == u'spam' assert cfg.project == 'spam'
assert logger.called is False assert logger.called is False
@ -228,12 +228,12 @@ TYPECHECK_WARNINGS = [
('value8', B(), None, C(), False), # sibling type ('value8', B(), None, C(), False), # sibling type
('value9', None, None, 'foo', False), # no default or no annotations ('value9', None, None, 'foo', False), # no default or no annotations
('value10', None, None, 123, False), # no default or no annotations ('value10', None, None, 123, False), # no default or no annotations
('value11', None, [str], u'bar', False), # str vs unicode ('value11', None, [str], 'bar', False), # str vs unicode
('value12', 'string', None, u'bar', False), # str vs unicode ('value12', 'string', None, 'bar', False), # str vs unicode
('value13', None, string_classes, 'bar', False), # string_classes ('value13', None, string_classes, 'bar', False), # string_classes
('value14', None, string_classes, u'bar', False), # string_classes ('value14', None, string_classes, 'bar', False), # string_classes
('value15', u'unicode', None, 'bar', False), # str vs unicode ('value15', 'unicode', None, 'bar', False), # str vs unicode
('value16', u'unicode', None, u'bar', False), # str vs unicode ('value16', 'unicode', None, 'bar', False), # str vs unicode
] ]

View File

@ -95,10 +95,10 @@ def test_LiteralIncludeReader_lines1(literal_inc_path):
options = {'lines': '1-4'} options = {'lines': '1-4'}
reader = LiteralIncludeReader(literal_inc_path, options, DUMMY_CONFIG) reader = LiteralIncludeReader(literal_inc_path, options, DUMMY_CONFIG)
content, lines = reader.read() content, lines = reader.read()
assert content == (u"# Literally included file using Python highlighting\n" assert content == ("# Literally included file using Python highlighting\n"
u"# -*- coding: utf-8 -*-\n" "# -*- coding: utf-8 -*-\n"
u"\n" "\n"
u"foo = \"Including Unicode characters: üöä\"\n") "foo = \"Including Unicode characters: üöä\"\n")
@pytest.mark.xfail(os.name != 'posix', reason="Not working on windows") @pytest.mark.xfail(os.name != 'posix', reason="Not working on windows")
@ -106,9 +106,9 @@ def test_LiteralIncludeReader_lines2(literal_inc_path):
options = {'lines': '1,4,6'} options = {'lines': '1,4,6'}
reader = LiteralIncludeReader(literal_inc_path, options, DUMMY_CONFIG) reader = LiteralIncludeReader(literal_inc_path, options, DUMMY_CONFIG)
content, lines = reader.read() content, lines = reader.read()
assert content == (u"# Literally included file using Python highlighting\n" assert content == ("# Literally included file using Python highlighting\n"
u"foo = \"Including Unicode characters: üöä\"\n" "foo = \"Including Unicode characters: üöä\"\n"
u"class Foo:\n") "class Foo:\n")
@pytest.mark.xfail(os.name != 'posix', reason="Not working on windows") @pytest.mark.xfail(os.name != 'posix', reason="Not working on windows")
@ -116,9 +116,9 @@ def test_LiteralIncludeReader_lines_and_lineno_match1(literal_inc_path):
options = {'lines': '4-6', 'lineno-match': True} options = {'lines': '4-6', 'lineno-match': True}
reader = LiteralIncludeReader(literal_inc_path, options, DUMMY_CONFIG) reader = LiteralIncludeReader(literal_inc_path, options, DUMMY_CONFIG)
content, lines = reader.read() content, lines = reader.read()
assert content == (u"foo = \"Including Unicode characters: üöä\"\n" assert content == ("foo = \"Including Unicode characters: üöä\"\n"
u"\n" "\n"
u"class Foo:\n") "class Foo:\n")
assert reader.lineno_start == 4 assert reader.lineno_start == 4
@ -312,11 +312,11 @@ def test_code_block(app, status, warning):
def test_code_block_caption_html(app, status, warning): def test_code_block_caption_html(app, status, warning):
app.builder.build(['caption']) app.builder.build(['caption'])
html = (app.outdir / 'caption.html').text(encoding='utf-8') html = (app.outdir / 'caption.html').text(encoding='utf-8')
caption = (u'<div class="code-block-caption">' caption = ('<div class="code-block-caption">'
u'<span class="caption-number">Listing 1 </span>' '<span class="caption-number">Listing 1 </span>'
u'<span class="caption-text">caption <em>test</em> rb' '<span class="caption-text">caption <em>test</em> rb'
u'</span><a class="headerlink" href="#id1" ' '</span><a class="headerlink" href="#id1" '
u'title="Permalink to this code">\xb6</a></div>') 'title="Permalink to this code">\xb6</a></div>')
assert caption in html assert caption in html
@ -460,11 +460,11 @@ def test_literalinclude_file_whole_of_emptyline(app, status, warning):
def test_literalinclude_caption_html(app, status, warning): def test_literalinclude_caption_html(app, status, warning):
app.builder.build('index') app.builder.build('index')
html = (app.outdir / 'caption.html').text(encoding='utf-8') html = (app.outdir / 'caption.html').text(encoding='utf-8')
caption = (u'<div class="code-block-caption">' caption = ('<div class="code-block-caption">'
u'<span class="caption-number">Listing 2 </span>' '<span class="caption-number">Listing 2 </span>'
u'<span class="caption-text">caption <strong>test</strong> py' '<span class="caption-text">caption <strong>test</strong> py'
u'</span><a class="headerlink" href="#id2" ' '</span><a class="headerlink" href="#id2" '
u'title="Permalink to this code">\xb6</a></div>') 'title="Permalink to this code">\xb6</a></div>')
assert caption in html assert caption in html

View File

@ -83,7 +83,7 @@ def test_texinfo(app, status, warning):
docutilsconf='[general]\nsource_link=true\n') docutilsconf='[general]\nsource_link=true\n')
def test_docutils_source_link_with_nonascii_file(app, status, warning): def test_docutils_source_link_with_nonascii_file(app, status, warning):
srcdir = path(app.srcdir) srcdir = path(app.srcdir)
mb_name = u'\u65e5\u672c\u8a9e' mb_name = '\u65e5\u672c\u8a9e'
try: try:
(srcdir / (mb_name + '.txt')).write_text('') (srcdir / (mb_name + '.txt')).write_text('')
except UnicodeEncodeError: except UnicodeEncodeError:

View File

@ -39,25 +39,20 @@ def test_domain_js_xrefs(app, status, warning):
doctree = app.env.get_doctree('roles') doctree = app.env.get_doctree('roles')
refnodes = list(doctree.traverse(addnodes.pending_xref)) refnodes = list(doctree.traverse(addnodes.pending_xref))
assert_refnode(refnodes[0], None, None, u'TopLevel', u'class') assert_refnode(refnodes[0], None, None, 'TopLevel', 'class')
assert_refnode(refnodes[1], None, None, u'top_level', u'func') assert_refnode(refnodes[1], None, None, 'top_level', 'func')
assert_refnode(refnodes[2], None, u'NestedParentA', u'child_1', u'func') assert_refnode(refnodes[2], None, 'NestedParentA', 'child_1', 'func')
assert_refnode(refnodes[3], None, u'NestedParentA', assert_refnode(refnodes[3], None, 'NestedParentA', 'NestedChildA.subchild_2', 'func')
u'NestedChildA.subchild_2', u'func') assert_refnode(refnodes[4], None, 'NestedParentA', 'child_2', 'func')
assert_refnode(refnodes[4], None, u'NestedParentA', u'child_2', u'func') assert_refnode(refnodes[5], False, 'NestedParentA', 'any_child', domain='')
assert_refnode(refnodes[5], False, u'NestedParentA', u'any_child', domain='') assert_refnode(refnodes[6], None, 'NestedParentA', 'NestedChildA', 'class')
assert_refnode(refnodes[6], None, u'NestedParentA', u'NestedChildA', u'class') assert_refnode(refnodes[7], None, 'NestedParentA.NestedChildA', 'subchild_2', 'func')
assert_refnode(refnodes[7], None, u'NestedParentA.NestedChildA', assert_refnode(refnodes[8], None, 'NestedParentA.NestedChildA',
u'subchild_2', u'func') 'NestedParentA.child_1', 'func')
assert_refnode(refnodes[8], None, u'NestedParentA.NestedChildA', assert_refnode(refnodes[9], None, 'NestedParentA', 'NestedChildA.subchild_1', 'func')
u'NestedParentA.child_1', u'func') assert_refnode(refnodes[10], None, 'NestedParentB', 'child_1', 'func')
assert_refnode(refnodes[9], None, u'NestedParentA', assert_refnode(refnodes[11], None, 'NestedParentB', 'NestedParentB', 'class')
u'NestedChildA.subchild_1', u'func') assert_refnode(refnodes[12], None, None, 'NestedParentA.NestedChildA', 'class')
assert_refnode(refnodes[10], None, u'NestedParentB', u'child_1', u'func')
assert_refnode(refnodes[11], None, u'NestedParentB', u'NestedParentB',
u'class')
assert_refnode(refnodes[12], None, None, u'NestedParentA.NestedChildA',
u'class')
assert len(refnodes) == 13 assert len(refnodes) == 13
doctree = app.env.get_doctree('module') doctree = app.env.get_doctree('module')
@ -118,24 +113,23 @@ def test_domain_js_find_obj(app, status, warning):
app.builder.build_all() app.builder.build_all()
assert (find_obj(None, None, u'NONEXISTANT', u'class') == assert (find_obj(None, None, 'NONEXISTANT', 'class') == (None, None))
(None, None)) assert (find_obj(None, None, 'NestedParentA', 'class') ==
assert (find_obj(None, None, u'NestedParentA', u'class') == ('NestedParentA', ('roles', 'class')))
(u'NestedParentA', (u'roles', u'class'))) assert (find_obj(None, None, 'NestedParentA.NestedChildA', 'class') ==
assert (find_obj(None, None, u'NestedParentA.NestedChildA', u'class') == ('NestedParentA.NestedChildA', ('roles', 'class')))
(u'NestedParentA.NestedChildA', (u'roles', u'class'))) assert (find_obj(None, 'NestedParentA', 'NestedChildA', 'class') ==
assert (find_obj(None, 'NestedParentA', u'NestedChildA', u'class') == ('NestedParentA.NestedChildA', ('roles', 'class')))
(u'NestedParentA.NestedChildA', (u'roles', u'class'))) assert (find_obj(None, None, 'NestedParentA.NestedChildA.subchild_1', 'func') ==
assert (find_obj(None, None, u'NestedParentA.NestedChildA.subchild_1', u'func') == ('NestedParentA.NestedChildA.subchild_1', ('roles', 'function')))
(u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'function'))) assert (find_obj(None, 'NestedParentA', 'NestedChildA.subchild_1', 'func') ==
assert (find_obj(None, u'NestedParentA', u'NestedChildA.subchild_1', u'func') == ('NestedParentA.NestedChildA.subchild_1', ('roles', 'function')))
(u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'function'))) assert (find_obj(None, 'NestedParentA.NestedChildA', 'subchild_1', 'func') ==
assert (find_obj(None, u'NestedParentA.NestedChildA', u'subchild_1', u'func') == ('NestedParentA.NestedChildA.subchild_1', ('roles', 'function')))
(u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'function'))) assert (find_obj('module_a.submodule', 'ModTopLevel', 'mod_child_2', 'meth') ==
assert (find_obj(u'module_a.submodule', u'ModTopLevel', u'mod_child_2', u'meth') == ('module_a.submodule.ModTopLevel.mod_child_2', ('module', 'method')))
(u'module_a.submodule.ModTopLevel.mod_child_2', (u'module', u'method'))) assert (find_obj('module_b.submodule', 'ModTopLevel', 'module_a.submodule', 'mod') ==
assert (find_obj(u'module_b.submodule', u'ModTopLevel', u'module_a.submodule', u'mod') == ('module_a.submodule', ('module', 'module')))
(u'module_a.submodule', (u'module', u'module')))
def test_get_full_qualified_name(): def test_get_full_qualified_name():

View File

@ -31,22 +31,22 @@ def parse(sig):
def test_function_signatures(): def test_function_signatures():
rv = parse('func(a=1) -> int object') rv = parse('func(a=1) -> int object')
assert text_type(rv) == u'a=1' assert text_type(rv) == 'a=1'
rv = parse('func(a=1, [b=None])') rv = parse('func(a=1, [b=None])')
assert text_type(rv) == u'a=1, [b=None]' assert text_type(rv) == 'a=1, [b=None]'
rv = parse('func(a=1[, b=None])') rv = parse('func(a=1[, b=None])')
assert text_type(rv) == u'a=1, [b=None]' assert text_type(rv) == 'a=1, [b=None]'
rv = parse("compile(source : string, filename, symbol='file')") rv = parse("compile(source : string, filename, symbol='file')")
assert text_type(rv) == u"source : string, filename, symbol='file'" assert text_type(rv) == "source : string, filename, symbol='file'"
rv = parse('func(a=[], [b=None])') rv = parse('func(a=[], [b=None])')
assert text_type(rv) == u'a=[], [b=None]' assert text_type(rv) == 'a=[], [b=None]'
rv = parse('func(a=[][, b=None])') rv = parse('func(a=[][, b=None])')
assert text_type(rv) == u'a=[], [b=None]' assert text_type(rv) == 'a=[], [b=None]'
@pytest.mark.sphinx('dummy', testroot='domain-py') @pytest.mark.sphinx('dummy', testroot='domain-py')
@ -70,26 +70,20 @@ def test_domain_py_xrefs(app, status, warning):
doctree = app.env.get_doctree('roles') doctree = app.env.get_doctree('roles')
refnodes = list(doctree.traverse(addnodes.pending_xref)) refnodes = list(doctree.traverse(addnodes.pending_xref))
assert_refnode(refnodes[0], None, None, u'TopLevel', u'class') assert_refnode(refnodes[0], None, None, 'TopLevel', 'class')
assert_refnode(refnodes[1], None, None, u'top_level', u'meth') assert_refnode(refnodes[1], None, None, 'top_level', 'meth')
assert_refnode(refnodes[2], None, u'NestedParentA', u'child_1', u'meth') assert_refnode(refnodes[2], None, 'NestedParentA', 'child_1', 'meth')
assert_refnode(refnodes[3], None, u'NestedParentA', assert_refnode(refnodes[3], None, 'NestedParentA', 'NestedChildA.subchild_2', 'meth')
u'NestedChildA.subchild_2', u'meth') assert_refnode(refnodes[4], None, 'NestedParentA', 'child_2', 'meth')
assert_refnode(refnodes[4], None, u'NestedParentA', u'child_2', u'meth') assert_refnode(refnodes[5], False, 'NestedParentA', 'any_child', domain='')
assert_refnode(refnodes[5], False, u'NestedParentA', u'any_child', domain='') assert_refnode(refnodes[6], None, 'NestedParentA', 'NestedChildA', 'class')
assert_refnode(refnodes[6], None, u'NestedParentA', u'NestedChildA', assert_refnode(refnodes[7], None, 'NestedParentA.NestedChildA', 'subchild_2', 'meth')
u'class') assert_refnode(refnodes[8], None, 'NestedParentA.NestedChildA',
assert_refnode(refnodes[7], None, u'NestedParentA.NestedChildA', 'NestedParentA.child_1', 'meth')
u'subchild_2', u'meth') assert_refnode(refnodes[9], None, 'NestedParentA', 'NestedChildA.subchild_1', 'meth')
assert_refnode(refnodes[8], None, u'NestedParentA.NestedChildA', assert_refnode(refnodes[10], None, 'NestedParentB', 'child_1', 'meth')
u'NestedParentA.child_1', u'meth') assert_refnode(refnodes[11], None, 'NestedParentB', 'NestedParentB', 'class')
assert_refnode(refnodes[9], None, u'NestedParentA', assert_refnode(refnodes[12], None, None, 'NestedParentA.NestedChildA', 'class')
u'NestedChildA.subchild_1', u'meth')
assert_refnode(refnodes[10], None, u'NestedParentB', u'child_1', u'meth')
assert_refnode(refnodes[11], None, u'NestedParentB', u'NestedParentB',
u'class')
assert_refnode(refnodes[12], None, None, u'NestedParentA.NestedChildA',
u'class')
assert len(refnodes) == 13 assert len(refnodes) == 13
doctree = app.env.get_doctree('module') doctree = app.env.get_doctree('module')
@ -169,20 +163,19 @@ def test_domain_py_find_obj(app, status, warning):
app.builder.build_all() app.builder.build_all()
assert (find_obj(None, None, u'NONEXISTANT', u'class') == assert (find_obj(None, None, 'NONEXISTANT', 'class') == [])
[]) assert (find_obj(None, None, 'NestedParentA', 'class') ==
assert (find_obj(None, None, u'NestedParentA', u'class') == [('NestedParentA', ('roles', 'class'))])
[(u'NestedParentA', (u'roles', u'class'))]) assert (find_obj(None, None, 'NestedParentA.NestedChildA', 'class') ==
assert (find_obj(None, None, u'NestedParentA.NestedChildA', u'class') == [('NestedParentA.NestedChildA', ('roles', 'class'))])
[(u'NestedParentA.NestedChildA', (u'roles', u'class'))]) assert (find_obj(None, 'NestedParentA', 'NestedChildA', 'class') ==
assert (find_obj(None, 'NestedParentA', u'NestedChildA', u'class') == [('NestedParentA.NestedChildA', ('roles', 'class'))])
[(u'NestedParentA.NestedChildA', (u'roles', u'class'))]) assert (find_obj(None, None, 'NestedParentA.NestedChildA.subchild_1', 'meth') ==
assert (find_obj(None, None, u'NestedParentA.NestedChildA.subchild_1', u'meth') == [('NestedParentA.NestedChildA.subchild_1', ('roles', 'method'))])
[(u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'method'))]) assert (find_obj(None, 'NestedParentA', 'NestedChildA.subchild_1', 'meth') ==
assert (find_obj(None, u'NestedParentA', u'NestedChildA.subchild_1', u'meth') == [('NestedParentA.NestedChildA.subchild_1', ('roles', 'method'))])
[(u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'method'))]) assert (find_obj(None, 'NestedParentA.NestedChildA', 'subchild_1', 'meth') ==
assert (find_obj(None, u'NestedParentA.NestedChildA', u'subchild_1', u'meth') == [('NestedParentA.NestedChildA.subchild_1', ('roles', 'method'))])
[(u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'method'))])
def test_get_full_qualified_name(): def test_get_full_qualified_name():

View File

@ -13,14 +13,14 @@ from sphinx.domains.rst import parse_directive
def test_parse_directive(): def test_parse_directive():
s = parse_directive(u' foö ') s = parse_directive(' foö ')
assert s == (u'foö', '') assert s == ('foö', '')
s = parse_directive(u' .. foö :: ') s = parse_directive(' .. foö :: ')
assert s == (u'foö', ' ') assert s == ('foö', ' ')
s = parse_directive(u'.. foö:: args1 args2') s = parse_directive('.. foö:: args1 args2')
assert s == (u'foö', ' args1 args2') assert s == ('foö', ' args1 args2')
s = parse_directive('.. :: bar') s = parse_directive('.. :: bar')
assert s == ('.. :: bar', '') assert s == ('.. :: bar', '')

View File

@ -31,25 +31,25 @@ def test_create_single_index():
('single', 'pip; install', 'id3', '', None), ('single', 'pip; install', 'id3', '', None),
('single', 'pip; upgrade', 'id4', '', None), ('single', 'pip; upgrade', 'id4', '', None),
('single', 'Sphinx', 'id5', '', None), ('single', 'Sphinx', 'id5', '', None),
('single', u'Ель', 'id6', '', None), ('single', 'Ель', 'id6', '', None),
('single', u'ёлка', 'id7', '', None), ('single', 'ёлка', 'id7', '', None),
('single', u'‏תירבע‎', 'id8', '', None), ('single', '‏תירבע‎', 'id8', '', None),
('single', u'9-symbol', 'id9', '', None), ('single', '9-symbol', 'id9', '', None),
('single', u'&-symbol', 'id10', '', None), ('single', '&-symbol', 'id10', '', None),
], ],
}) })
index = IndexEntries(env).create_index(dummy_builder) index = IndexEntries(env).create_index(dummy_builder)
assert len(index) == 6 assert len(index) == 6
assert index[0] == (u'Symbols', [(u'&-symbol', [[('', '#id10')], [], None]), assert index[0] == ('Symbols', [('&-symbol', [[('', '#id10')], [], None]),
(u'9-symbol', [[('', '#id9')], [], None])]) ('9-symbol', [[('', '#id9')], [], None])])
assert index[1] == (u'D', [(u'docutils', [[('', '#id1')], [], None])]) assert index[1] == ('D', [('docutils', [[('', '#id1')], [], None])])
assert index[2] == (u'P', [(u'pip', [[], [(u'install', [('', '#id3')]), assert index[2] == ('P', [('pip', [[], [('install', [('', '#id3')]),
(u'upgrade', [('', '#id4')])], None]), ('upgrade', [('', '#id4')])], None]),
(u'Python', [[('', '#id2')], [], None])]) ('Python', [[('', '#id2')], [], None])])
assert index[3] == (u'S', [(u'Sphinx', [[('', '#id5')], [], None])]) assert index[3] == ('S', [('Sphinx', [[('', '#id5')], [], None])])
assert index[4] == (u'Е', [(u'ёлка', [[('', '#id7')], [], None]), assert index[4] == ('Е', [('ёлка', [[('', '#id7')], [], None]),
(u'Ель', [[('', '#id6')], [], None])]) ('Ель', [[('', '#id6')], [], None])])
assert index[5] == (u'ת', [(u'‏תירבע‎', [[('', '#id8')], [], None])]) assert index[5] == ('ת', [('‏תירבע‎', [[('', '#id8')], [], None])])
def test_create_pair_index(): def test_create_pair_index():
@ -63,15 +63,15 @@ def test_create_pair_index():
}) })
index = IndexEntries(env).create_index(dummy_builder) index = IndexEntries(env).create_index(dummy_builder)
assert len(index) == 5 assert len(index) == 5
assert index[0] == (u'D', assert index[0] == ('D',
[(u'documentation tool', [[], [(u'Sphinx', [('', '#id3')])], None]), [('documentation tool', [[], [('Sphinx', [('', '#id3')])], None]),
(u'docutils', [[], [(u'reStructuredText', [('', '#id1')])], None])]) ('docutils', [[], [('reStructuredText', [('', '#id1')])], None])])
assert index[1] == (u'I', [(u'interpreter', [[], [(u'Python', [('', '#id2')])], None])]) assert index[1] == ('I', [('interpreter', [[], [('Python', [('', '#id2')])], None])])
assert index[2] == (u'P', [(u'Python', [[], [(u'interpreter', [('', '#id2')])], None])]) assert index[2] == ('P', [('Python', [[], [('interpreter', [('', '#id2')])], None])])
assert index[3] == (u'R', assert index[3] == ('R',
[(u'reStructuredText', [[], [(u'docutils', [('', '#id1')])], None])]) [('reStructuredText', [[], [('docutils', [('', '#id1')])], None])])
assert index[4] == (u'S', assert index[4] == ('S',
[(u'Sphinx', [[], [(u'documentation tool', [('', '#id3')])], None])]) [('Sphinx', [[], [('documentation tool', [('', '#id3')])], None])])
def test_create_triple_index(): def test_create_triple_index():
@ -84,12 +84,12 @@ def test_create_triple_index():
}) })
index = IndexEntries(env).create_index(dummy_builder) index = IndexEntries(env).create_index(dummy_builder)
assert len(index) == 5 assert len(index) == 5
assert index[0] == (u'B', [(u'bar', [[], [(u'baz, foo', [('', '#id1')])], None]), assert index[0] == ('B', [('bar', [[], [('baz, foo', [('', '#id1')])], None]),
(u'baz', [[], [(u'foo bar', [('', '#id1')])], None])]) ('baz', [[], [('foo bar', [('', '#id1')])], None])])
assert index[1] == (u'F', [(u'foo', [[], [(u'bar baz', [('', '#id1')])], None])]) assert index[1] == ('F', [('foo', [[], [('bar baz', [('', '#id1')])], None])])
assert index[2] == (u'P', [(u'Python', [[], [(u'Sphinx reST', [('', '#id2')])], None])]) assert index[2] == ('P', [('Python', [[], [('Sphinx reST', [('', '#id2')])], None])])
assert index[3] == (u'R', [(u'reST', [[], [(u'Python Sphinx', [('', '#id2')])], None])]) assert index[3] == ('R', [('reST', [[], [('Python Sphinx', [('', '#id2')])], None])])
assert index[4] == (u'S', [(u'Sphinx', [[], [(u'reST, Python', [('', '#id2')])], None])]) assert index[4] == ('S', [('Sphinx', [[], [('reST, Python', [('', '#id2')])], None])])
def test_create_see_index(): def test_create_see_index():
@ -105,9 +105,9 @@ def test_create_see_index():
}) })
index = IndexEntries(env).create_index(dummy_builder) index = IndexEntries(env).create_index(dummy_builder)
assert len(index) == 3 assert len(index) == 3
assert index[0] == (u'D', [(u'docutils', [[], [(u'see reStructuredText', [])], None])]) assert index[0] == ('D', [('docutils', [[], [('see reStructuredText', [])], None])])
assert index[1] == (u'P', [(u'Python', [[], [(u'see interpreter', [])], None])]) assert index[1] == ('P', [('Python', [[], [('see interpreter', [])], None])])
assert index[2] == (u'S', [(u'Sphinx', [[], [(u'see documentation tool', [])], None])]) assert index[2] == ('S', [('Sphinx', [[], [('see documentation tool', [])], None])])
def test_create_seealso_index(): def test_create_seealso_index():
@ -123,12 +123,9 @@ def test_create_seealso_index():
}) })
index = IndexEntries(env).create_index(dummy_builder) index = IndexEntries(env).create_index(dummy_builder)
assert len(index) == 3 assert len(index) == 3
assert index[0] == (u'D', assert index[0] == ('D', [('docutils', [[], [('see also reStructuredText', [])], None])])
[(u'docutils', [[], [(u'see also reStructuredText', [])], None])]) assert index[1] == ('P', [('Python', [[], [('see also interpreter', [])], None])])
assert index[1] == (u'P', assert index[2] == ('S', [('Sphinx', [[], [('see also documentation tool', [])], None])])
[(u'Python', [[], [(u'see also interpreter', [])], None])])
assert index[2] == (u'S',
[(u'Sphinx', [[], [(u'see also documentation tool', [])], None])])
def test_create_index_by_key(): def test_create_index_by_key():
@ -137,11 +134,11 @@ def test_create_index_by_key():
'index': [ 'index': [
('single', 'docutils', 'id1', '', None), ('single', 'docutils', 'id1', '', None),
('single', 'Python', 'id2', '', None), ('single', 'Python', 'id2', '', None),
('single', u'スフィンクス', 'id3', '', u''), ('single', 'スフィンクス', 'id3', '', ''),
], ],
}) })
index = IndexEntries(env).create_index(dummy_builder) index = IndexEntries(env).create_index(dummy_builder)
assert len(index) == 3 assert len(index) == 3
assert index[0] == (u'D', [(u'docutils', [[('', '#id1')], [], None])]) assert index[0] == ('D', [('docutils', [[('', '#id1')], [], None])])
assert index[1] == (u'P', [(u'Python', [[('', '#id2')], [], None])]) assert index[1] == ('P', [('Python', [[('', '#id2')], [], None])])
assert index[2] == (u'', [(u'スフィンクス', [[('', '#id3')], [], u''])]) assert index[2] == ('', [('スフィンクス', [[('', '#id3')], [], ''])])

View File

@ -37,7 +37,7 @@ def test_process_doc(app):
list_item)]) list_item)])
assert_node(toctree[0][0], assert_node(toctree[0][0],
[compact_paragraph, reference, u"Welcome to Sphinx Testss documentation!"]) [compact_paragraph, reference, "Welcome to Sphinx Testss documentation!"])
assert_node(toctree[0][0][0], reference, anchorname='') assert_node(toctree[0][0][0], reference, anchorname='')
assert_node(toctree[0][1][0], addnodes.toctree, assert_node(toctree[0][1][0], addnodes.toctree,
caption="Table of Contents", glob=False, hidden=False, caption="Table of Contents", glob=False, hidden=False,
@ -152,7 +152,7 @@ def test_get_toc_for(app):
addnodes.toctree)])], addnodes.toctree)])],
[list_item, compact_paragraph])]) # [2][0] [list_item, compact_paragraph])]) # [2][0]
assert_node(toctree[0][0], assert_node(toctree[0][0],
[compact_paragraph, reference, u"Welcome to Sphinx Testss documentation!"]) [compact_paragraph, reference, "Welcome to Sphinx Testss documentation!"])
assert_node(toctree[0][1][2], assert_node(toctree[0][1][2],
([compact_paragraph, reference, "subsection"], ([compact_paragraph, reference, "subsection"],
[bullet_list, list_item, compact_paragraph, reference, "subsubsection"])) [bullet_list, list_item, compact_paragraph, reference, "subsubsection"]))
@ -179,7 +179,7 @@ def test_get_toc_for_only(app):
addnodes.toctree)])], addnodes.toctree)])],
[list_item, compact_paragraph])]) # [2][0] [list_item, compact_paragraph])]) # [2][0]
assert_node(toctree[0][0], assert_node(toctree[0][0],
[compact_paragraph, reference, u"Welcome to Sphinx Testss documentation!"]) [compact_paragraph, reference, "Welcome to Sphinx Testss documentation!"])
assert_node(toctree[0][1][1], assert_node(toctree[0][1][1],
([compact_paragraph, reference, "Section for HTML"], ([compact_paragraph, reference, "Section for HTML"],
[bullet_list, addnodes.toctree])) [bullet_list, addnodes.toctree]))

View File

@ -266,10 +266,10 @@ def test_excludes_module_should_not_be_skipped(apidoc):
@pytest.mark.apidoc( @pytest.mark.apidoc(
coderoot='test-root', coderoot='test-root',
options=[ options=[
'--doc-project', u'プロジェクト名', '--doc-project', 'プロジェクト名',
'--doc-author', u'著者名', '--doc-author', '著者名',
'--doc-version', u'バージョン', '--doc-version', 'バージョン',
'--doc-release', u'リリース', '--doc-release', 'リリース',
], ],
) )
def test_multibyte_parameters(make_app, apidoc): def test_multibyte_parameters(make_app, apidoc):
@ -280,10 +280,10 @@ def test_multibyte_parameters(make_app, apidoc):
conf_py = (outdir / 'conf.py').text() conf_py = (outdir / 'conf.py').text()
conf_py_ = remove_unicode_literals(conf_py) conf_py_ = remove_unicode_literals(conf_py)
assert u"project = 'プロジェクト名'" in conf_py_ assert "project = 'プロジェクト名'" in conf_py_
assert u"author = '著者名'" in conf_py_ assert "author = '著者名'" in conf_py_
assert u"version = 'バージョン'" in conf_py_ assert "version = 'バージョン'" in conf_py_
assert u"release = 'リリース'" in conf_py_ assert "release = 'リリース'" in conf_py_
app = make_app('text', srcdir=outdir) app = make_app('text', srcdir=outdir)
app.build() app.build()

View File

@ -36,10 +36,10 @@ def test_autosectionlabel_html(app, status, warning):
assert re.search(html, content, re.S) assert re.search(html, content, re.S)
# for smart_quotes (refs: #4027) # for smart_quotes (refs: #4027)
html = (u'<li><a class="reference internal" ' html = ('<li><a class="reference internal" '
u'href="#this-one-s-got-an-apostrophe">' 'href="#this-one-s-got-an-apostrophe">'
u'<span class="std std-ref">This ones got an apostrophe' '<span class="std std-ref">This ones got an apostrophe'
u'</span></a></li>') '</span></a></li>')
assert re.search(html, content, re.S) assert re.search(html, content, re.S)

View File

@ -52,8 +52,8 @@ def test_mangle_signature():
TEST = [[y.strip() for y in x.split("::")] for x in TEST.split("\n") TEST = [[y.strip() for y in x.split("::")] for x in TEST.split("\n")
if '::' in x] if '::' in x]
for inp, outp in TEST: for inp, outp in TEST:
res = mangle_signature(inp).strip().replace(u"\u00a0", " ") res = mangle_signature(inp).strip().replace("\u00a0", " ")
assert res == outp, (u"'%s' -> '%s' != '%s'" % (inp, res, outp)) assert res == outp, ("'%s' -> '%s' != '%s'" % (inp, res, outp))
def test_extract_summary(capsys): def test_extract_summary(capsys):

View File

@ -40,14 +40,14 @@ def test_jsmath(app, status, warning):
assert '<div class="math notranslate nohighlight">\na^2 + b^2 = c^2</div>' in content assert '<div class="math notranslate nohighlight">\na^2 + b^2 = c^2</div>' in content
assert ('<div class="math notranslate nohighlight">\n\\begin{split}a + 1 &lt; ' assert ('<div class="math notranslate nohighlight">\n\\begin{split}a + 1 &lt; '
'b\\end{split}</div>' in content) 'b\\end{split}</div>' in content)
assert (u'<span class="eqno">(1)<a class="headerlink" href="#equation-foo" ' assert ('<span class="eqno">(1)<a class="headerlink" href="#equation-foo" '
u'title="Permalink to this equation">\xb6</a></span>' 'title="Permalink to this equation">\xb6</a></span>'
u'<div class="math notranslate nohighlight" id="equation-foo">' '<div class="math notranslate nohighlight" id="equation-foo">'
'\ne^{i\\pi} = 1</div>' in content) '\ne^{i\\pi} = 1</div>' in content)
assert (u'<span class="eqno">(2)<a class="headerlink" href="#equation-math-0" ' assert ('<span class="eqno">(2)<a class="headerlink" href="#equation-math-0" '
u'title="Permalink to this equation">\xb6</a></span>' 'title="Permalink to this equation">\xb6</a></span>'
u'<div class="math notranslate nohighlight" id="equation-math-0">\n' '<div class="math notranslate nohighlight" id="equation-math-0">\n'
u'e^{ix} = \\cos x + i\\sin x</div>' in content) 'e^{ix} = \\cos x + i\\sin x</div>' in content)
assert '<div class="math notranslate nohighlight">\nn \\in \\mathbb N</div>' in content assert '<div class="math notranslate nohighlight">\nn \\in \\mathbb N</div>' in content
assert '<div class="math notranslate nohighlight">\na + 1 &lt; b</div>' in content assert '<div class="math notranslate nohighlight">\na + 1 &lt; b</div>' in content

View File

@ -341,7 +341,7 @@ class ModifyIterTest(BaseIteratorsTest):
self.assertEqual(expected, [i for i in it]) self.assertEqual(expected, [i for i in it])
def test_modifier_rstrip_unicode(self): def test_modifier_rstrip_unicode(self):
a = [u'', u' ', u' a ', u'b ', u' c', u' ', u''] a = ['', ' ', ' a ', 'b ', ' c', ' ', '']
it = modify_iter(a, modifier=lambda s: s.rstrip()) it = modify_iter(a, modifier=lambda s: s.rstrip())
expected = [u'', u'', u' a', u'b', u' c', u'', u''] expected = ['', '', ' a', 'b', ' c', '', '']
self.assertEqual(expected, [i for i in it]) self.assertEqual(expected, [i for i in it])

View File

@ -106,7 +106,7 @@ def assert_count(expected_expr, result, count):
def test_text_toctree(app): def test_text_toctree(app):
app.build() app.build()
result = (app.outdir / 'index.txt').text(encoding='utf-8') result = (app.outdir / 'index.txt').text(encoding='utf-8')
assert_startswith(result, u"CONTENTS\n********\n\nTABLE OF CONTENTS\n") assert_startswith(result, "CONTENTS\n********\n\nTABLE OF CONTENTS\n")
@sphinx_intl @sphinx_intl
@ -128,9 +128,9 @@ def test_text_warning_node(app):
app.build() app.build()
# test warnings in translation # test warnings in translation
result = (app.outdir / 'warnings.txt').text(encoding='utf-8') result = (app.outdir / 'warnings.txt').text(encoding='utf-8')
expect = (u"3. I18N WITH REST WARNINGS" expect = ("3. I18N WITH REST WARNINGS"
u"\n**************************\n" "\n**************************\n"
u"\nLINE OF >>``<<BROKEN LITERAL MARKUP.\n") "\nLINE OF >>``<<BROKEN LITERAL MARKUP.\n")
assert result == expect assert result == expect
@ -142,9 +142,9 @@ def test_text_title_underline(app):
app.build() app.build()
# --- simple translation; check title underlines # --- simple translation; check title underlines
result = (app.outdir / 'bom.txt').text(encoding='utf-8') result = (app.outdir / 'bom.txt').text(encoding='utf-8')
expect = (u"2. Datei mit UTF-8" expect = ("2. Datei mit UTF-8"
u"\n******************\n" # underline matches new translation "\n******************\n" # underline matches new translation
u"\nThis file has umlauts: äöü.\n") "\nThis file has umlauts: äöü.\n")
assert result == expect assert result == expect
@ -155,7 +155,7 @@ def test_text_subdirs(app):
app.build() app.build()
# --- check translation in subdirs # --- check translation in subdirs
result = (app.outdir / 'subdir' / 'index.txt').text(encoding='utf-8') result = (app.outdir / 'subdir' / 'index.txt').text(encoding='utf-8')
assert_startswith(result, u"1. subdir contents\n******************\n") assert_startswith(result, "1. subdir contents\n******************\n")
@sphinx_intl @sphinx_intl
@ -165,46 +165,46 @@ def test_text_inconsistency_warnings(app, warning):
app.build() app.build()
# --- check warnings for inconsistency in number of references # --- check warnings for inconsistency in number of references
result = (app.outdir / 'refs_inconsistency.txt').text(encoding='utf-8') result = (app.outdir / 'refs_inconsistency.txt').text(encoding='utf-8')
expect = (u"8. I18N WITH REFS INCONSISTENCY" expect = ("8. I18N WITH REFS INCONSISTENCY"
u"\n*******************************\n" "\n*******************************\n"
u"\n* FOR CITATION [ref3].\n" "\n* FOR CITATION [ref3].\n"
u"\n* reference FOR reference.\n" "\n* reference FOR reference.\n"
u"\n* ORPHAN REFERENCE: I18N WITH REFS INCONSISTENCY.\n" "\n* ORPHAN REFERENCE: I18N WITH REFS INCONSISTENCY.\n"
u"\n[1] THIS IS A AUTO NUMBERED FOOTNOTE.\n" "\n[1] THIS IS A AUTO NUMBERED FOOTNOTE.\n"
u"\n[ref2] THIS IS A CITATION.\n" "\n[ref2] THIS IS A CITATION.\n"
u"\n[100] THIS IS A NUMBERED FOOTNOTE.\n") "\n[100] THIS IS A NUMBERED FOOTNOTE.\n")
assert result == expect assert result == expect
warnings = getwarning(warning) warnings = getwarning(warning)
warning_fmt = u'.*/refs_inconsistency.txt:\\d+: ' \ warning_fmt = ('.*/refs_inconsistency.txt:\\d+: '
u'WARNING: inconsistent %(reftype)s in translated message.' \ 'WARNING: inconsistent %(reftype)s in translated message.'
u' original: %(original)s, translated: %(translated)s\n' ' original: %(original)s, translated: %(translated)s\n')
expected_warning_expr = ( expected_warning_expr = (
warning_fmt % { warning_fmt % {
u'reftype': u'footnote references', 'reftype': 'footnote references',
u'original': u"\\[u?'\\[#\\]_'\\]", 'original': "\\['\\[#\\]_'\\]",
u'translated': u"\\[\\]" 'translated': "\\[\\]"
} + } +
warning_fmt % { warning_fmt % {
u'reftype': u'footnote references', 'reftype': 'footnote references',
u'original': u"\\[u?'\\[100\\]_'\\]", 'original': "\\['\\[100\\]_'\\]",
u'translated': u"\\[\\]" 'translated': "\\[\\]"
} + } +
warning_fmt % { warning_fmt % {
u'reftype': u'references', 'reftype': 'references',
u'original': u"\\[u?'reference_'\\]", 'original': "\\['reference_'\\]",
u'translated': u"\\[u?'reference_', u?'reference_'\\]" 'translated': "\\['reference_', 'reference_'\\]"
} + } +
warning_fmt % { warning_fmt % {
u'reftype': u'references', 'reftype': 'references',
u'original': u"\\[\\]", 'original': "\\[\\]",
u'translated': u"\\[u?'`I18N WITH REFS INCONSISTENCY`_'\\]" 'translated': "\\['`I18N WITH REFS INCONSISTENCY`_'\\]"
}) })
assert_re_search(expected_warning_expr, warnings) assert_re_search(expected_warning_expr, warnings)
expected_citation_warning_expr = ( expected_citation_warning_expr = (
u'.*/refs_inconsistency.txt:\\d+: WARNING: Citation \\[ref2\\] is not referenced.\n' + '.*/refs_inconsistency.txt:\\d+: WARNING: Citation \\[ref2\\] is not referenced.\n' +
u'.*/refs_inconsistency.txt:\\d+: WARNING: citation not found: ref3') '.*/refs_inconsistency.txt:\\d+: WARNING: citation not found: ref3')
assert_re_search(expected_citation_warning_expr, warnings) assert_re_search(expected_citation_warning_expr, warnings)
@ -215,18 +215,18 @@ def test_text_literalblock_warnings(app, warning):
app.build() app.build()
# --- check warning for literal block # --- check warning for literal block
result = (app.outdir / 'literalblock.txt').text(encoding='utf-8') result = (app.outdir / 'literalblock.txt').text(encoding='utf-8')
expect = (u"9. I18N WITH LITERAL BLOCK" expect = ("9. I18N WITH LITERAL BLOCK"
u"\n**************************\n" "\n**************************\n"
u"\nCORRECT LITERAL BLOCK:\n" "\nCORRECT LITERAL BLOCK:\n"
u"\n this is" "\n this is"
u"\n literal block\n" "\n literal block\n"
u"\nMISSING LITERAL BLOCK:\n" "\nMISSING LITERAL BLOCK:\n"
u"\n<SYSTEM MESSAGE:") "\n<SYSTEM MESSAGE:")
assert_startswith(result, expect) assert_startswith(result, expect)
warnings = getwarning(warning) warnings = getwarning(warning)
expected_warning_expr = u'.*/literalblock.txt:\\d+: ' \ expected_warning_expr = ('.*/literalblock.txt:\\d+: '
u'WARNING: Literal block expected; none found.' 'WARNING: Literal block expected; none found.')
assert_re_search(expected_warning_expr, warnings) assert_re_search(expected_warning_expr, warnings)
@ -237,17 +237,16 @@ def test_text_definition_terms(app):
app.build() app.build()
# --- definition terms: regression test for #975, #2198, #2205 # --- definition terms: regression test for #975, #2198, #2205
result = (app.outdir / 'definition_terms.txt').text(encoding='utf-8') result = (app.outdir / 'definition_terms.txt').text(encoding='utf-8')
expect = (u"13. I18N WITH DEFINITION TERMS" expect = ("13. I18N WITH DEFINITION TERMS"
u"\n******************************\n" "\n******************************\n"
u"\nSOME TERM" "\nSOME TERM"
u"\n THE CORRESPONDING DEFINITION\n" "\n THE CORRESPONDING DEFINITION\n"
u"\nSOME *TERM* WITH LINK" "\nSOME *TERM* WITH LINK"
u"\n THE CORRESPONDING DEFINITION #2\n" "\n THE CORRESPONDING DEFINITION #2\n"
u"\nSOME **TERM** WITH : CLASSIFIER1 : CLASSIFIER2" "\nSOME **TERM** WITH : CLASSIFIER1 : CLASSIFIER2"
u"\n THE CORRESPONDING DEFINITION\n" "\n THE CORRESPONDING DEFINITION\n"
u"\nSOME TERM WITH : CLASSIFIER[]" "\nSOME TERM WITH : CLASSIFIER[]"
u"\n THE CORRESPONDING DEFINITION\n" "\n THE CORRESPONDING DEFINITION\n")
)
assert result == expect assert result == expect
@ -258,13 +257,13 @@ def test_text_glossary_term(app, warning):
app.build() app.build()
# --- glossary terms: regression test for #1090 # --- glossary terms: regression test for #1090
result = (app.outdir / 'glossary_terms.txt').text(encoding='utf-8') result = (app.outdir / 'glossary_terms.txt').text(encoding='utf-8')
expect = (u"18. I18N WITH GLOSSARY TERMS" expect = ("18. I18N WITH GLOSSARY TERMS"
u"\n****************************\n" "\n****************************\n"
u"\nSOME NEW TERM" "\nSOME NEW TERM"
u"\n THE CORRESPONDING GLOSSARY\n" "\n THE CORRESPONDING GLOSSARY\n"
u"\nSOME OTHER NEW TERM" "\nSOME OTHER NEW TERM"
u"\n THE CORRESPONDING GLOSSARY #2\n" "\n THE CORRESPONDING GLOSSARY #2\n"
u"\nLINK TO *SOME NEW TERM*.\n") "\nLINK TO *SOME NEW TERM*.\n")
assert result == expect assert result == expect
warnings = getwarning(warning) warnings = getwarning(warning)
assert 'term not in glossary' not in warnings assert 'term not in glossary' not in warnings
@ -277,17 +276,17 @@ def test_text_glossary_term_inconsistencies(app, warning):
app.build() app.build()
# --- glossary term inconsistencies: regression test for #1090 # --- glossary term inconsistencies: regression test for #1090
result = (app.outdir / 'glossary_terms_inconsistency.txt').text(encoding='utf-8') result = (app.outdir / 'glossary_terms_inconsistency.txt').text(encoding='utf-8')
expect = (u"19. I18N WITH GLOSSARY TERMS INCONSISTENCY" expect = ("19. I18N WITH GLOSSARY TERMS INCONSISTENCY"
u"\n******************************************\n" "\n******************************************\n"
u"\n1. LINK TO *SOME NEW TERM*.\n") "\n1. LINK TO *SOME NEW TERM*.\n")
assert result == expect assert result == expect
warnings = getwarning(warning) warnings = getwarning(warning)
expected_warning_expr = ( expected_warning_expr = (
u'.*/glossary_terms_inconsistency.txt:\\d+: ' '.*/glossary_terms_inconsistency.txt:\\d+: '
u'WARNING: inconsistent term references in translated message.' 'WARNING: inconsistent term references in translated message.'
u" original: \\[u?':term:`Some term`', u?':term:`Some other term`'\\]," " original: \\[':term:`Some term`', ':term:`Some other term`'\\],"
u" translated: \\[u?':term:`SOME NEW TERM`'\\]\n") " translated: \\[':term:`SOME NEW TERM`'\\]\n")
assert_re_search(expected_warning_expr, warnings) assert_re_search(expected_warning_expr, warnings)
@ -322,12 +321,12 @@ def test_text_seealso(app):
app.build() app.build()
# --- seealso # --- seealso
result = (app.outdir / 'seealso.txt').text(encoding='utf-8') result = (app.outdir / 'seealso.txt').text(encoding='utf-8')
expect = (u"12. I18N WITH SEEALSO" expect = ("12. I18N WITH SEEALSO"
u"\n*********************\n" "\n*********************\n"
u"\nSee also: SHORT TEXT 1\n" "\nSee also: SHORT TEXT 1\n"
u"\nSee also: LONG TEXT 1\n" "\nSee also: LONG TEXT 1\n"
u"\nSee also: SHORT TEXT 2\n" "\nSee also: SHORT TEXT 2\n"
u"\n LONG TEXT 2\n") "\n LONG TEXT 2\n")
assert result == expect assert result == expect
@ -338,39 +337,38 @@ def test_text_figure_captions(app):
app.build() app.build()
# --- figure captions: regression test for #940 # --- figure captions: regression test for #940
result = (app.outdir / 'figure.txt').text(encoding='utf-8') result = (app.outdir / 'figure.txt').text(encoding='utf-8')
expect = (u"14. I18N WITH FIGURE CAPTION" expect = ("14. I18N WITH FIGURE CAPTION"
u"\n****************************\n" "\n****************************\n"
u"\n [image]MY CAPTION OF THE FIGURE\n" "\n [image]MY CAPTION OF THE FIGURE\n"
u"\n MY DESCRIPTION PARAGRAPH1 OF THE FIGURE.\n" "\n MY DESCRIPTION PARAGRAPH1 OF THE FIGURE.\n"
u"\n MY DESCRIPTION PARAGRAPH2 OF THE FIGURE.\n" "\n MY DESCRIPTION PARAGRAPH2 OF THE FIGURE.\n"
u"\n" "\n"
u"\n14.1. FIGURE IN THE BLOCK" "\n14.1. FIGURE IN THE BLOCK"
u"\n=========================\n" "\n=========================\n"
u"\nBLOCK\n" "\nBLOCK\n"
u"\n [image]MY CAPTION OF THE FIGURE\n" "\n [image]MY CAPTION OF THE FIGURE\n"
u"\n MY DESCRIPTION PARAGRAPH1 OF THE FIGURE.\n" "\n MY DESCRIPTION PARAGRAPH1 OF THE FIGURE.\n"
u"\n MY DESCRIPTION PARAGRAPH2 OF THE FIGURE.\n" "\n MY DESCRIPTION PARAGRAPH2 OF THE FIGURE.\n"
u"\n" "\n"
u"\n" "\n"
u"14.2. IMAGE URL AND ALT\n" "14.2. IMAGE URL AND ALT\n"
u"=======================\n" "=======================\n"
u"\n" "\n"
u"[image: i18n][image]\n" "[image: i18n][image]\n"
u"\n" "\n"
u" [image: img][image]\n" " [image: img][image]\n"
u"\n" "\n"
u"\n" "\n"
u"14.3. IMAGE ON SUBSTITUTION\n" "14.3. IMAGE ON SUBSTITUTION\n"
u"===========================\n" "===========================\n"
u"\n" "\n"
u"\n" "\n"
u"14.4. IMAGE UNDER NOTE\n" "14.4. IMAGE UNDER NOTE\n"
u"======================\n" "======================\n"
u"\n" "\n"
u"Note: [image: i18n under note][image]\n" "Note: [image: i18n under note][image]\n"
u"\n" "\n"
u" [image: img under note][image]\n" " [image: img under note][image]\n")
)
assert result == expect assert result == expect
@ -381,14 +379,14 @@ def test_text_rubric(app):
app.build() app.build()
# --- rubric: regression test for pull request #190 # --- rubric: regression test for pull request #190
result = (app.outdir / 'rubric.txt').text(encoding='utf-8') result = (app.outdir / 'rubric.txt').text(encoding='utf-8')
expect = (u"I18N WITH RUBRIC" expect = ("I18N WITH RUBRIC"
u"\n****************\n" "\n****************\n"
u"\n-[ RUBRIC TITLE ]-\n" "\n-[ RUBRIC TITLE ]-\n"
u"\n" "\n"
u"\nRUBRIC IN THE BLOCK" "\nRUBRIC IN THE BLOCK"
u"\n===================\n" "\n===================\n"
u"\nBLOCK\n" "\nBLOCK\n"
u"\n -[ RUBRIC TITLE ]-\n") "\n -[ RUBRIC TITLE ]-\n")
assert result == expect assert result == expect
@ -399,25 +397,25 @@ def test_text_docfields(app):
app.build() app.build()
# --- docfields # --- docfields
result = (app.outdir / 'docfields.txt').text(encoding='utf-8') result = (app.outdir / 'docfields.txt').text(encoding='utf-8')
expect = (u"21. I18N WITH DOCFIELDS" expect = ("21. I18N WITH DOCFIELDS"
u"\n***********************\n" "\n***********************\n"
u"\nclass Cls1\n" "\nclass Cls1\n"
u"\n Parameters:" "\n Parameters:"
u"\n **param** -- DESCRIPTION OF PARAMETER param\n" "\n **param** -- DESCRIPTION OF PARAMETER param\n"
u"\nclass Cls2\n" "\nclass Cls2\n"
u"\n Parameters:" "\n Parameters:"
u"\n * **foo** -- DESCRIPTION OF PARAMETER foo\n" "\n * **foo** -- DESCRIPTION OF PARAMETER foo\n"
u"\n * **bar** -- DESCRIPTION OF PARAMETER bar\n" "\n * **bar** -- DESCRIPTION OF PARAMETER bar\n"
u"\nclass Cls3(values)\n" "\nclass Cls3(values)\n"
u"\n Raises:" "\n Raises:"
u"\n **ValueError** -- IF THE VALUES ARE OUT OF RANGE\n" "\n **ValueError** -- IF THE VALUES ARE OUT OF RANGE\n"
u"\nclass Cls4(values)\n" "\nclass Cls4(values)\n"
u"\n Raises:" "\n Raises:"
u"\n * **TypeError** -- IF THE VALUES ARE NOT VALID\n" "\n * **TypeError** -- IF THE VALUES ARE NOT VALID\n"
u"\n * **ValueError** -- IF THE VALUES ARE OUT OF RANGE\n" "\n * **ValueError** -- IF THE VALUES ARE OUT OF RANGE\n"
u"\nclass Cls5\n" "\nclass Cls5\n"
u"\n Returns:" "\n Returns:"
u'\n A NEW "Cls3" INSTANCE\n') '\n A NEW "Cls3" INSTANCE\n')
assert result == expect assert result == expect
@ -708,21 +706,21 @@ def test_html_versionchanges(app):
return '' return ''
expect1 = ( expect1 = (
u"""<p><span class="versionmodified">Deprecated since version 1.0: </span>""" """<p><span class="versionmodified">Deprecated since version 1.0: </span>"""
u"""THIS IS THE <em>FIRST</em> PARAGRAPH OF DEPRECATED.</p>\n""" """THIS IS THE <em>FIRST</em> PARAGRAPH OF DEPRECATED.</p>\n"""
u"""<p>THIS IS THE <em>SECOND</em> PARAGRAPH OF DEPRECATED.</p>\n""") """<p>THIS IS THE <em>SECOND</em> PARAGRAPH OF DEPRECATED.</p>\n""")
matched_content = get_content(result, "deprecated") matched_content = get_content(result, "deprecated")
assert expect1 == matched_content assert expect1 == matched_content
expect2 = ( expect2 = (
u"""<p><span class="versionmodified">New in version 1.0: </span>""" """<p><span class="versionmodified">New in version 1.0: </span>"""
u"""THIS IS THE <em>FIRST</em> PARAGRAPH OF VERSIONADDED.</p>\n""") """THIS IS THE <em>FIRST</em> PARAGRAPH OF VERSIONADDED.</p>\n""")
matched_content = get_content(result, "versionadded") matched_content = get_content(result, "versionadded")
assert expect2 == matched_content assert expect2 == matched_content
expect3 = ( expect3 = (
u"""<p><span class="versionmodified">Changed in version 1.0: </span>""" """<p><span class="versionmodified">Changed in version 1.0: </span>"""
u"""THIS IS THE <em>FIRST</em> PARAGRAPH OF VERSIONCHANGED.</p>\n""") """THIS IS THE <em>FIRST</em> PARAGRAPH OF VERSIONCHANGED.</p>\n""")
matched_content = get_content(result, "versionchanged") matched_content = get_content(result, "versionchanged")
assert expect3 == matched_content assert expect3 == matched_content
@ -816,7 +814,7 @@ def test_xml_footnotes(app, warning):
['ref']) ['ref'])
warnings = getwarning(warning) warnings = getwarning(warning)
warning_expr = u'.*/footnote.xml:\\d*: SEVERE: Duplicate ID: ".*".\n' warning_expr = '.*/footnote.xml:\\d*: SEVERE: Duplicate ID: ".*".\n'
assert_not_re_search(warning_expr, warnings) assert_not_re_search(warning_expr, warnings)
@ -1177,7 +1175,7 @@ def test_text_references(app, warning):
app.builder.build_specific([app.srcdir / 'refs.txt']) app.builder.build_specific([app.srcdir / 'refs.txt'])
warnings = warning.getvalue().replace(os.sep, '/') warnings = warning.getvalue().replace(os.sep, '/')
warning_expr = u'refs.txt:\\d+: ERROR: Unknown target name:' warning_expr = 'refs.txt:\\d+: ERROR: Unknown target name:'
assert_count(warning_expr, warnings, 0) assert_count(warning_expr, warnings, 0)
@ -1231,7 +1229,7 @@ def test_image_glob_intl(app):
srcdir='test_intl_images', srcdir='test_intl_images',
confoverrides={ confoverrides={
'language': 'xx', 'language': 'xx',
'figure_language_filename': u'{root}{ext}.{language}', 'figure_language_filename': '{root}{ext}.{language}',
} }
) )
@pytest.mark.xfail(os.name != 'posix', reason="Not working on windows") @pytest.mark.xfail(os.name != 'posix', reason="Not working on windows")

View File

@ -163,14 +163,14 @@ def get_verifier(verify, verify_re):
# interpolation of arrows in menuselection # interpolation of arrows in menuselection
'verify', 'verify',
':menuselection:`a --> b`', ':menuselection:`a --> b`',
(u'<p><span class="menuselection">a \N{TRIANGULAR BULLET} b</span></p>'), ('<p><span class="menuselection">a \N{TRIANGULAR BULLET} b</span></p>'),
'\\sphinxmenuselection{a \\(\\rightarrow\\) b}', '\\sphinxmenuselection{a \\(\\rightarrow\\) b}',
), ),
( (
# interpolation of ampersands in menuselection # interpolation of ampersands in menuselection
'verify', 'verify',
':menuselection:`&Foo -&&- &Bar`', ':menuselection:`&Foo -&&- &Bar`',
(u'<p><span class="menuselection"><span class="accelerator">F</span>oo ' ('<p><span class="menuselection"><span class="accelerator">F</span>oo '
'-&amp;- <span class="accelerator">B</span>ar</span></p>'), '-&amp;- <span class="accelerator">B</span>ar</span></p>'),
r'\sphinxmenuselection{\sphinxaccelerator{F}oo -\&- \sphinxaccelerator{B}ar}', r'\sphinxmenuselection{\sphinxaccelerator{F}oo -\&- \sphinxaccelerator{B}ar}',
), ),
@ -178,7 +178,7 @@ def get_verifier(verify, verify_re):
# interpolation of ampersands in guilabel # interpolation of ampersands in guilabel
'verify', 'verify',
':guilabel:`&Foo -&&- &Bar`', ':guilabel:`&Foo -&&- &Bar`',
(u'<p><span class="guilabel"><span class="accelerator">F</span>oo ' ('<p><span class="guilabel"><span class="accelerator">F</span>oo '
'-&amp;- <span class="accelerator">B</span>ar</span></p>'), '-&amp;- <span class="accelerator">B</span>ar</span></p>'),
r'\sphinxguilabel{\sphinxaccelerator{F}oo -\&- \sphinxaccelerator{B}ar}', r'\sphinxguilabel{\sphinxaccelerator{F}oo -\&- \sphinxaccelerator{B}ar}',
), ),
@ -194,8 +194,8 @@ def get_verifier(verify, verify_re):
# verify smarty-pants quotes # verify smarty-pants quotes
'verify', 'verify',
'"John"', '"John"',
u'<p>“John”</p>', '<p>“John”</p>',
u"“John”", "“John”",
), ),
( (
# ... but not in literal text # ... but not in literal text
@ -215,24 +215,24 @@ def get_verifier(verify, verify_re):
( (
# correct escaping in normal mode # correct escaping in normal mode
'verify', 'verify',
u'Γ\\\\∞$', 'Γ\\\\∞$',
None, None,
u'Γ\\textbackslash{}\\(\\infty\\)\\$', 'Γ\\textbackslash{}\\(\\infty\\)\\$',
), ),
( (
# in verbatim code fragments # in verbatim code fragments
'verify', 'verify',
u'::\n\n\\∞${}', '::\n\n\\∞${}',
None, None,
(u'\\fvset{hllines={, ,}}%\n' ('\\fvset{hllines={, ,}}%\n'
u'\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n' '\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
u'\\PYGZbs{}\\(\\infty\\)\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n' '\\PYGZbs{}\\(\\infty\\)\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
u'\\end{sphinxVerbatim}'), '\\end{sphinxVerbatim}'),
), ),
( (
# in URIs # in URIs
'verify_re', 'verify_re',
u'`test <http://example.com/~me/>`_', '`test <http://example.com/~me/>`_',
None, None,
r'\\sphinxhref{http://example.com/~me/}{test}.*', r'\\sphinxhref{http://example.com/~me/}{test}.*',
), ),

View File

@ -26,27 +26,27 @@ def test_docinfo(app, status, warning):
""" """
app.build() app.build()
expecteddocinfo = { expecteddocinfo = {
'author': u'David Goodger', 'author': 'David Goodger',
'authors': [u'Me', u'Myself', u'I'], 'authors': ['Me', 'Myself', 'I'],
'address': u'123 Example Street\nExample, EX Canada\nA1B 2C3', 'address': '123 Example Street\nExample, EX Canada\nA1B 2C3',
'field name': u'This is a generic bibliographic field.', 'field name': 'This is a generic bibliographic field.',
'field name 2': (u'Generic bibliographic fields may contain multiple ' 'field name 2': ('Generic bibliographic fields may contain multiple '
u'body elements.\n\nLike this.'), 'body elements.\n\nLike this.'),
'status': u'This is a “work in progress”', 'status': 'This is a “work in progress”',
'version': u'1', 'version': '1',
'copyright': (u'This document has been placed in the public domain. ' 'copyright': ('This document has been placed in the public domain. '
u'You\nmay do with it as you wish. You may copy, modify,' 'You\nmay do with it as you wish. You may copy, modify,'
u'\nredistribute, reattribute, sell, buy, rent, lease,\n' '\nredistribute, reattribute, sell, buy, rent, lease,\n'
u'destroy, or improve it, quote it at length, excerpt,\n' 'destroy, or improve it, quote it at length, excerpt,\n'
u'incorporate, collate, fold, staple, or mutilate it, or ' 'incorporate, collate, fold, staple, or mutilate it, or '
u'do\nanything else to it that your or anyone elses ' 'do\nanything else to it that your or anyone elses '
u'heart\ndesires.'), 'heart\ndesires.'),
'contact': u'goodger@python.org', 'contact': 'goodger@python.org',
'date': u'2006-05-21', 'date': '2006-05-21',
'organization': u'humankind', 'organization': 'humankind',
'revision': u'4564', 'revision': '4564',
'tocdepth': 1, 'tocdepth': 1,
'orphan': u'', 'orphan': '',
'nocomments': u'', 'nocomments': '',
} }
assert app.env.metadata['index'] == expecteddocinfo assert app.env.metadata['index'] == expecteddocinfo

View File

@ -92,16 +92,16 @@ def test_do_prompt_inputstrip():
def test_do_prompt_with_nonascii(): def test_do_prompt_with_nonascii():
answers = { answers = {
'Q1': u'\u30c9\u30a4\u30c4', 'Q1': '\u30c9\u30a4\u30c4',
} }
qs.term_input = mock_input(answers) qs.term_input = mock_input(answers)
try: try:
result = qs.do_prompt('Q1', default=u'\u65e5\u672c') result = qs.do_prompt('Q1', default='\u65e5\u672c')
except UnicodeEncodeError: except UnicodeEncodeError:
raise pytest.skip.Exception( raise pytest.skip.Exception(
'non-ASCII console input not supported on this encoding: %s', 'non-ASCII console input not supported on this encoding: %s',
qs.TERM_ENCODING) qs.TERM_ENCODING)
assert result == u'\u30c9\u30a4\u30c4' assert result == '\u30c9\u30a4\u30c4'
def test_quickstart_defaults(tempdir): def test_quickstart_defaults(tempdir):
@ -145,8 +145,8 @@ def test_quickstart_all_answers(tempdir):
'Root path': tempdir, 'Root path': tempdir,
'Separate source and build': 'y', 'Separate source and build': 'y',
'Name prefix for templates': '.', 'Name prefix for templates': '.',
'Project name': u'STASI™'.encode(), 'Project name': 'STASI™'.encode(),
'Author name': u'Wolfgang Schäuble & G\'Beckstein'.encode(), 'Author name': 'Wolfgang Schäuble & G\'Beckstein'.encode(),
'Project version': '2.0', 'Project version': '2.0',
'Project release': '2.0.1', 'Project release': '2.0.1',
'Project language': 'de', 'Project language': 'de',
@ -182,16 +182,16 @@ def test_quickstart_all_answers(tempdir):
assert ns['templates_path'] == ['.templates'] assert ns['templates_path'] == ['.templates']
assert ns['source_suffix'] == '.txt' assert ns['source_suffix'] == '.txt'
assert ns['master_doc'] == 'contents' assert ns['master_doc'] == 'contents'
assert ns['project'] == u'STASI™' assert ns['project'] == 'STASI™'
assert ns['copyright'] == u'%s, Wolfgang Schäuble & G\'Beckstein' % \ assert ns['copyright'] == '%s, Wolfgang Schäuble & G\'Beckstein' % \
time.strftime('%Y') time.strftime('%Y')
assert ns['version'] == '2.0' assert ns['version'] == '2.0'
assert ns['release'] == '2.0.1' assert ns['release'] == '2.0.1'
assert ns['todo_include_todos'] is True assert ns['todo_include_todos'] is True
assert ns['html_static_path'] == ['.static'] assert ns['html_static_path'] == ['.static']
assert ns['latex_documents'] == [ assert ns['latex_documents'] == [
('contents', 'STASI.tex', u'STASI™ Documentation', ('contents', 'STASI.tex', 'STASI™ Documentation',
u'Wolfgang Schäuble \\& G\'Beckstein', 'manual')] 'Wolfgang Schäuble \\& G\'Beckstein', 'manual')]
assert (tempdir / 'build').isdir() assert (tempdir / 'build').isdir()
assert (tempdir / 'source' / '.static').isdir() assert (tempdir / 'source' / '.static').isdir()
@ -222,7 +222,7 @@ def test_generated_files_eol(tempdir):
def test_quickstart_and_build(tempdir): def test_quickstart_and_build(tempdir):
answers = { answers = {
'Root path': tempdir, 'Root path': tempdir,
'Project name': u'Fullwidth characters: \u30c9\u30a4\u30c4', 'Project name': 'Fullwidth characters: \u30c9\u30a4\u30c4',
'Author name': 'Georg Brandl', 'Author name': 'Georg Brandl',
'Project version': '0.1', 'Project version': '0.1',
} }
@ -247,7 +247,7 @@ def test_quickstart_and_build(tempdir):
def test_default_filename(tempdir): def test_default_filename(tempdir):
answers = { answers = {
'Root path': tempdir, 'Root path': tempdir,
'Project name': u'\u30c9\u30a4\u30c4', # Fullwidth characters only 'Project name': '\u30c9\u30a4\u30c4', # Fullwidth characters only
'Author name': 'Georg Brandl', 'Author name': 'Georg Brandl',
'Project version': '0.1', 'Project version': '0.1',
} }

View File

@ -79,7 +79,7 @@ def test_build_sphinx_multiple_invalid_builders(setup_command):
@pytest.fixture @pytest.fixture
def nonascii_srcdir(request, setup_command): def nonascii_srcdir(request, setup_command):
mb_name = u'\u65e5\u672c\u8a9e' mb_name = '\u65e5\u672c\u8a9e'
srcdir = (setup_command.pkgroot / 'doc') srcdir = (setup_command.pkgroot / 'doc')
try: try:
(srcdir / mb_name).makedirs() (srcdir / mb_name).makedirs()

View File

@ -19,7 +19,7 @@ def test_basic(app, status, warning):
app.build() app.build()
content = (app.outdir / 'index.html').text() content = (app.outdir / 'index.html').text()
assert u'<p> “Sphinx” is a tool that makes it easy …</p>' in content assert '<p> “Sphinx” is a tool that makes it easy …</p>' in content
@pytest.mark.sphinx(buildername='text', testroot='smartquotes', freshenv=True) @pytest.mark.sphinx(buildername='text', testroot='smartquotes', freshenv=True)
@ -27,7 +27,7 @@ def test_text_builder(app, status, warning):
app.build() app.build()
content = (app.outdir / 'index.txt').text() content = (app.outdir / 'index.txt').text()
assert u'-- "Sphinx" is a tool that makes it easy ...' in content assert '-- "Sphinx" is a tool that makes it easy ...' in content
@pytest.mark.sphinx(buildername='man', testroot='smartquotes', freshenv=True) @pytest.mark.sphinx(buildername='man', testroot='smartquotes', freshenv=True)
@ -35,7 +35,7 @@ def test_man_builder(app, status, warning):
app.build() app.build()
content = (app.outdir / 'python.1').text() content = (app.outdir / 'python.1').text()
assert u'\\-\\- "Sphinx" is a tool that makes it easy ...' in content assert '\\-\\- "Sphinx" is a tool that makes it easy ...' in content
@pytest.mark.sphinx(buildername='latex', testroot='smartquotes', freshenv=True) @pytest.mark.sphinx(buildername='latex', testroot='smartquotes', freshenv=True)
@ -43,7 +43,7 @@ def test_latex_builder(app, status, warning):
app.build() app.build()
content = (app.outdir / 'test.tex').text() content = (app.outdir / 'test.tex').text()
assert u'\\textendash{} “Sphinx” is a tool that makes it easy …' in content assert '\\textendash{} “Sphinx” is a tool that makes it easy …' in content
@pytest.mark.sphinx(buildername='html', testroot='smartquotes', freshenv=True, @pytest.mark.sphinx(buildername='html', testroot='smartquotes', freshenv=True,
@ -52,7 +52,7 @@ def test_ja_html_builder(app, status, warning):
app.build() app.build()
content = (app.outdir / 'index.html').text() content = (app.outdir / 'index.html').text()
assert u'<p>-- &quot;Sphinx&quot; is a tool that makes it easy ...</p>' in content assert '<p>-- &quot;Sphinx&quot; is a tool that makes it easy ...</p>' in content
@pytest.mark.sphinx(buildername='html', testroot='smartquotes', freshenv=True, @pytest.mark.sphinx(buildername='html', testroot='smartquotes', freshenv=True,
@ -61,7 +61,7 @@ def test_smartquotes_disabled(app, status, warning):
app.build() app.build()
content = (app.outdir / 'index.html').text() content = (app.outdir / 'index.html').text()
assert u'<p>-- &quot;Sphinx&quot; is a tool that makes it easy ...</p>' in content assert '<p>-- &quot;Sphinx&quot; is a tool that makes it easy ...</p>' in content
@pytest.mark.skipif(docutils.__version_info__ < (0, 14), @pytest.mark.skipif(docutils.__version_info__ < (0, 14),
@ -72,7 +72,7 @@ def test_smartquotes_action(app, status, warning):
app.build() app.build()
content = (app.outdir / 'index.html').text() content = (app.outdir / 'index.html').text()
assert u'<p>-- “Sphinx” is a tool that makes it easy ...</p>' in content assert '<p>-- “Sphinx” is a tool that makes it easy ...</p>' in content
@pytest.mark.sphinx(buildername='html', testroot='smartquotes', freshenv=True, @pytest.mark.sphinx(buildername='html', testroot='smartquotes', freshenv=True,
@ -81,7 +81,7 @@ def test_smartquotes_excludes_language(app, status, warning):
app.build() app.build()
content = (app.outdir / 'index.html').text() content = (app.outdir / 'index.html').text()
assert u'<p> 「Sphinx」 is a tool that makes it easy …</p>' in content assert '<p> 「Sphinx」 is a tool that makes it easy …</p>' in content
@pytest.mark.sphinx(buildername='man', testroot='smartquotes', freshenv=True, @pytest.mark.sphinx(buildername='man', testroot='smartquotes', freshenv=True,
@ -90,4 +90,4 @@ def test_smartquotes_excludes_builders(app, status, warning):
app.build() app.build()
content = (app.outdir / 'python.1').text() content = (app.outdir / 'python.1').text()
assert u' “Sphinx” is a tool that makes it easy …' in content assert ' “Sphinx” is a tool that makes it easy …' in content

View File

@ -26,17 +26,17 @@ from sphinx.util import logging
def test_encode_uri(): def test_encode_uri():
expected = (u'https://ru.wikipedia.org/wiki/%D0%A1%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D0%B0_' expected = ('https://ru.wikipedia.org/wiki/%D0%A1%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D0%B0_'
u'%D1%83%D0%BF%D1%80%D0%B0%D0%B2%D0%BB%D0%B5%D0%BD%D0%B8%D1%8F_' '%D1%83%D0%BF%D1%80%D0%B0%D0%B2%D0%BB%D0%B5%D0%BD%D0%B8%D1%8F_'
u'%D0%B1%D0%B0%D0%B7%D0%B0%D0%BC%D0%B8_%D0%B4%D0%B0%D0%BD%D0%BD%D1%8B%D1%85') '%D0%B1%D0%B0%D0%B7%D0%B0%D0%BC%D0%B8_%D0%B4%D0%B0%D0%BD%D0%BD%D1%8B%D1%85')
uri = (u'https://ru.wikipedia.org/wiki' uri = ('https://ru.wikipedia.org/wiki'
u'/Система_управления_базами_данных') '/Система_управления_базами_данных')
assert expected == encode_uri(uri) assert expected == encode_uri(uri)
expected = (u'https://github.com/search?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+is%3A' expected = ('https://github.com/search?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+is%3A'
u'sprint-friendly+user%3Ajupyter&type=Issues&ref=searchresults') 'sprint-friendly+user%3Ajupyter&type=Issues&ref=searchresults')
uri = (u'https://github.com/search?utf8=✓&q=is%3Aissue+is%3Aopen+is%3A' uri = ('https://github.com/search?utf8=✓&q=is%3Aissue+is%3Aopen+is%3A'
u'sprint-friendly+user%3Ajupyter&type=Issues&ref=searchresults') 'sprint-friendly+user%3Ajupyter&type=Issues&ref=searchresults')
assert expected == encode_uri(uri) assert expected == encode_uri(uri)

View File

@ -177,7 +177,7 @@ def test_format_date():
assert i18n.format_date(format, date=date, language='') == 'February 07, 2016' assert i18n.format_date(format, date=date, language='') == 'February 07, 2016'
assert i18n.format_date(format, date=date, language='unknown') == 'February 07, 2016' assert i18n.format_date(format, date=date, language='unknown') == 'February 07, 2016'
assert i18n.format_date(format, date=date, language='en') == 'February 07, 2016' assert i18n.format_date(format, date=date, language='en') == 'February 07, 2016'
assert i18n.format_date(format, date=date, language='ja') == u'2月 07, 2016' assert i18n.format_date(format, date=date, language='ja') == '2月 07, 2016'
assert i18n.format_date(format, date=date, language='de') == 'Februar 07, 2016' assert i18n.format_date(format, date=date, language='de') == 'Februar 07, 2016'
# raw string # raw string

View File

@ -11,7 +11,7 @@ def test_jsdump():
assert dumps(data) == '{a1:1}' assert dumps(data) == '{a1:1}'
assert data == loads(dumps(data)) assert data == loads(dumps(data))
data = {u'a\xe8': 1} data = {'a\xe8': 1}
assert dumps(data) == '{"a\\u00e8":1}' assert dumps(data) == '{"a\\u00e8":1}'
assert data == loads(dumps(data)) assert data == loads(dumps(data))

View File

@ -303,7 +303,7 @@ def test_output_with_unencodable_char(app, status, warning):
# info with UnicodeEncodeError # info with UnicodeEncodeError
status.truncate(0) status.truncate(0)
status.seek(0) status.seek(0)
logger.info(u"unicode \u206d...") logger.info("unicode \u206d...")
assert status.getvalue() == "unicode ?...\n" assert status.getvalue() == "unicode ?...\n"

View File

@ -128,6 +128,6 @@ def test_insert_similar():
new_nodes = list(merge_doctrees(original, insert_similar, is_paragraph)) new_nodes = list(merge_doctrees(original, insert_similar, is_paragraph))
uids = [n.uid for n in insert_similar.traverse(is_paragraph)] uids = [n.uid for n in insert_similar.traverse(is_paragraph)]
assert len(new_nodes) == 1 assert len(new_nodes) == 1
assert new_nodes[0].rawsource == u'Anyway I need more' assert new_nodes[0].rawsource == 'Anyway I need more'
assert original_uids[0] == uids[0] assert original_uids[0] == uids[0]
assert original_uids[1:] == uids[2:] assert original_uids[1:] == uids[2:]