mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Avoid respecifying default encoding for .encode()/.decode() calls
In Python 3, both .encode() and .decode() default the encoding to 'utf-8'. See the docs: https://docs.python.org/3/library/stdtypes.html#str.encode https://docs.python.org/3/library/stdtypes.html#bytes.decode Simplify and shorten the code by using the default instead of respecifying it.
This commit is contained in:
parent
6113261948
commit
5bf25eb445
@ -88,7 +88,7 @@ def get_stable_hash(obj):
|
|||||||
return get_stable_hash(list(obj.items()))
|
return get_stable_hash(list(obj.items()))
|
||||||
elif isinstance(obj, (list, tuple)):
|
elif isinstance(obj, (list, tuple)):
|
||||||
obj = sorted(get_stable_hash(o) for o in obj)
|
obj = sorted(get_stable_hash(o) for o in obj)
|
||||||
return md5(text_type(obj).encode('utf8')).hexdigest()
|
return md5(text_type(obj).encode()).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
class Stylesheet(text_type):
|
class Stylesheet(text_type):
|
||||||
|
@ -172,7 +172,7 @@ def term_decode(text):
|
|||||||
'and terminal encoding unknown -- assuming '
|
'and terminal encoding unknown -- assuming '
|
||||||
'UTF-8 or Latin-1.')))
|
'UTF-8 or Latin-1.')))
|
||||||
try:
|
try:
|
||||||
return text.decode('utf-8')
|
return text.decode()
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
return text.decode('latin1')
|
return text.decode('latin1')
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ class ClickableMapDefinition:
|
|||||||
if self.id == '%3':
|
if self.id == '%3':
|
||||||
# graphviz generates wrong ID if graph name not specified
|
# graphviz generates wrong ID if graph name not specified
|
||||||
# https://gitlab.com/graphviz/graphviz/issues/1327
|
# https://gitlab.com/graphviz/graphviz/issues/1327
|
||||||
hashed = sha1(dot.encode('utf-8')).hexdigest()
|
hashed = sha1(dot.encode()).hexdigest()
|
||||||
self.id = 'grapviz%s' % hashed[-10:]
|
self.id = 'grapviz%s' % hashed[-10:]
|
||||||
self.content[0] = self.content[0].replace('%3', self.id)
|
self.content[0] = self.content[0].replace('%3', self.id)
|
||||||
|
|
||||||
@ -221,7 +221,7 @@ def render_dot(self, code, options, format, prefix='graphviz'):
|
|||||||
"""Render graphviz code into a PNG or PDF output file."""
|
"""Render graphviz code into a PNG or PDF output file."""
|
||||||
graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot)
|
graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot)
|
||||||
hashkey = (code + str(options) + str(graphviz_dot) +
|
hashkey = (code + str(options) + str(graphviz_dot) +
|
||||||
str(self.builder.config.graphviz_dot_args)).encode('utf-8')
|
str(self.builder.config.graphviz_dot_args)).encode()
|
||||||
|
|
||||||
fname = '%s-%s.%s' % (prefix, sha1(hashkey).hexdigest(), format)
|
fname = '%s-%s.%s' % (prefix, sha1(hashkey).hexdigest(), format)
|
||||||
relfn = posixpath.join(self.builder.imgpath, fname)
|
relfn = posixpath.join(self.builder.imgpath, fname)
|
||||||
@ -259,7 +259,7 @@ def render_dot(self, code, options, format, prefix='graphviz'):
|
|||||||
try:
|
try:
|
||||||
# Graphviz may close standard input when an error occurs,
|
# Graphviz may close standard input when an error occurs,
|
||||||
# resulting in a broken pipe on communicate()
|
# resulting in a broken pipe on communicate()
|
||||||
stdout, stderr = p.communicate(code.encode('utf-8'))
|
stdout, stderr = p.communicate(code.encode())
|
||||||
except (OSError, IOError) as err:
|
except (OSError, IOError) as err:
|
||||||
if err.errno not in (EPIPE, EINVAL):
|
if err.errno not in (EPIPE, EINVAL):
|
||||||
raise
|
raise
|
||||||
|
@ -235,7 +235,7 @@ def render_math(self, math):
|
|||||||
|
|
||||||
latex = generate_latex_macro(math, self.builder.config)
|
latex = generate_latex_macro(math, self.builder.config)
|
||||||
|
|
||||||
filename = "%s.%s" % (sha1(latex.encode('utf-8')).hexdigest(), image_format)
|
filename = "%s.%s" % (sha1(latex.encode()).hexdigest(), image_format)
|
||||||
relfn = posixpath.join(self.builder.imgpath, 'math', filename)
|
relfn = posixpath.join(self.builder.imgpath, 'math', filename)
|
||||||
outfn = path.join(self.builder.outdir, self.builder.imagedir, 'math', filename)
|
outfn = path.join(self.builder.outdir, self.builder.imagedir, 'math', filename)
|
||||||
if path.isfile(outfn):
|
if path.isfile(outfn):
|
||||||
|
@ -389,7 +389,7 @@ class InheritanceDiagram(SphinxDirective):
|
|||||||
|
|
||||||
def get_graph_hash(node):
|
def get_graph_hash(node):
|
||||||
# type: (inheritance_diagram) -> str
|
# type: (inheritance_diagram) -> str
|
||||||
encoded = (node['content'] + str(node['parts'])).encode('utf-8')
|
encoded = (node['content'] + str(node['parts'])).encode()
|
||||||
return md5(encoded).hexdigest()[-10:]
|
return md5(encoded).hexdigest()[-10:]
|
||||||
|
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ class ModuleAnalyzer:
|
|||||||
eggpath, relpath = re.split('(?<=\\.egg)/', filename)
|
eggpath, relpath = re.split('(?<=\\.egg)/', filename)
|
||||||
try:
|
try:
|
||||||
with ZipFile(eggpath) as egg:
|
with ZipFile(eggpath) as egg:
|
||||||
code = egg.read(relpath).decode('utf-8')
|
code = egg.read(relpath).decode()
|
||||||
return cls.for_string(code, modname, filename)
|
return cls.for_string(code, modname, filename)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raise PycodeError('error opening %r' % filename, exc)
|
raise PycodeError('error opening %r' % filename, exc)
|
||||||
|
@ -480,7 +480,7 @@ class Parser:
|
|||||||
def parse_comments(self):
|
def parse_comments(self):
|
||||||
# type: () -> None
|
# type: () -> None
|
||||||
"""Parse the code and pick up comments."""
|
"""Parse the code and pick up comments."""
|
||||||
tree = ast.parse(self.code.encode('utf-8'))
|
tree = ast.parse(self.code.encode())
|
||||||
picker = VariableCommentPicker(self.code.splitlines(True), self.encoding)
|
picker = VariableCommentPicker(self.code.splitlines(True), self.encoding)
|
||||||
picker.visit(tree)
|
picker.visit(tree)
|
||||||
self.comments = picker.comments
|
self.comments = picker.comments
|
||||||
|
@ -289,7 +289,7 @@ class IndexBuilder:
|
|||||||
|
|
||||||
if scoring:
|
if scoring:
|
||||||
with open(scoring, 'rb') as fp:
|
with open(scoring, 'rb') as fp:
|
||||||
self.js_scorer_code = fp.read().decode('utf-8')
|
self.js_scorer_code = fp.read().decode()
|
||||||
else:
|
else:
|
||||||
self.js_scorer_code = u''
|
self.js_scorer_code = u''
|
||||||
self.js_splitter_code = splitter_code
|
self.js_splitter_code = splitter_code
|
||||||
|
@ -75,12 +75,12 @@ class ImageDownloader(BaseImageConverter):
|
|||||||
basename = basename.split('?')[0]
|
basename = basename.split('?')[0]
|
||||||
if basename == '' or len(basename) > MAX_FILENAME_LEN:
|
if basename == '' or len(basename) > MAX_FILENAME_LEN:
|
||||||
filename, ext = os.path.splitext(node['uri'])
|
filename, ext = os.path.splitext(node['uri'])
|
||||||
basename = sha1(filename.encode("utf-8")).hexdigest() + ext
|
basename = sha1(filename.encode()).hexdigest() + ext
|
||||||
|
|
||||||
dirname = node['uri'].replace('://', '/').translate({ord("?"): u"/",
|
dirname = node['uri'].replace('://', '/').translate({ord("?"): u"/",
|
||||||
ord("&"): u"/"})
|
ord("&"): u"/"})
|
||||||
if len(dirname) > MAX_FILENAME_LEN:
|
if len(dirname) > MAX_FILENAME_LEN:
|
||||||
dirname = sha1(dirname.encode('utf-8')).hexdigest()
|
dirname = sha1(dirname.encode()).hexdigest()
|
||||||
ensuredir(os.path.join(self.imagedir, dirname))
|
ensuredir(os.path.join(self.imagedir, dirname))
|
||||||
path = os.path.join(self.imagedir, dirname, basename)
|
path = os.path.join(self.imagedir, dirname, basename)
|
||||||
|
|
||||||
|
@ -180,7 +180,7 @@ class DownloadFiles(dict):
|
|||||||
def add_file(self, docname, filename):
|
def add_file(self, docname, filename):
|
||||||
# type: (str, str) -> None
|
# type: (str, str) -> None
|
||||||
if filename not in self:
|
if filename not in self:
|
||||||
digest = md5(filename.encode('utf-8')).hexdigest()
|
digest = md5(filename.encode()).hexdigest()
|
||||||
dest = '%s/%s' % (digest, os.path.basename(filename))
|
dest = '%s/%s' % (digest, os.path.basename(filename))
|
||||||
self[filename] = (set(), dest)
|
self[filename] = (set(), dest)
|
||||||
|
|
||||||
@ -266,7 +266,7 @@ def save_traceback(app):
|
|||||||
platform.python_implementation(),
|
platform.python_implementation(),
|
||||||
docutils.__version__, docutils.__version_details__,
|
docutils.__version__, docutils.__version_details__,
|
||||||
jinja2.__version__, # type: ignore
|
jinja2.__version__, # type: ignore
|
||||||
last_msgs)).encode('utf-8'))
|
last_msgs)).encode())
|
||||||
if app is not None:
|
if app is not None:
|
||||||
for ext in app.extensions.values():
|
for ext in app.extensions.values():
|
||||||
modfile = getattr(ext.module, '__file__', 'unknown')
|
modfile = getattr(ext.module, '__file__', 'unknown')
|
||||||
@ -274,8 +274,8 @@ def save_traceback(app):
|
|||||||
modfile = modfile.decode(fs_encoding, 'replace')
|
modfile = modfile.decode(fs_encoding, 'replace')
|
||||||
if ext.version != 'builtin':
|
if ext.version != 'builtin':
|
||||||
os.write(fd, ('# %s (%s) from %s\n' %
|
os.write(fd, ('# %s (%s) from %s\n' %
|
||||||
(ext.name, ext.version, modfile)).encode('utf-8'))
|
(ext.name, ext.version, modfile)).encode())
|
||||||
os.write(fd, exc_format.encode('utf-8'))
|
os.write(fd, exc_format.encode())
|
||||||
os.close(fd)
|
os.close(fd)
|
||||||
return path
|
return path
|
||||||
|
|
||||||
@ -490,7 +490,7 @@ def force_decode(string, encoding):
|
|||||||
string = string.decode(encoding)
|
string = string.decode(encoding)
|
||||||
else:
|
else:
|
||||||
# try decoding with utf-8, should only work for real UTF-8
|
# try decoding with utf-8, should only work for real UTF-8
|
||||||
string = string.decode('utf-8')
|
string = string.decode()
|
||||||
except UnicodeError:
|
except UnicodeError:
|
||||||
# last resort -- can't fail
|
# last resort -- can't fail
|
||||||
string = string.decode('latin1')
|
string = string.decode('latin1')
|
||||||
@ -632,8 +632,8 @@ def encode_uri(uri):
|
|||||||
# type: (str) -> str
|
# type: (str) -> str
|
||||||
split = list(urlsplit(uri))
|
split = list(urlsplit(uri))
|
||||||
split[1] = split[1].encode('idna').decode('ascii')
|
split[1] = split[1].encode('idna').decode('ascii')
|
||||||
split[2] = quote_plus(split[2].encode('utf-8'), '/')
|
split[2] = quote_plus(split[2].encode(), '/')
|
||||||
query = list((q, v.encode('utf-8')) for (q, v) in parse_qsl(split[3]))
|
query = list((q, v.encode()) for (q, v) in parse_qsl(split[3]))
|
||||||
split[3] = urlencode(query)
|
split[3] = urlencode(query)
|
||||||
return urlunsplit(split)
|
return urlunsplit(split)
|
||||||
|
|
||||||
|
@ -129,7 +129,7 @@ def test_svg(h, f):
|
|||||||
# type: (bytes, IO) -> str
|
# type: (bytes, IO) -> str
|
||||||
"""An additional imghdr library helper; test the header is SVG's or not."""
|
"""An additional imghdr library helper; test the header is SVG's or not."""
|
||||||
try:
|
try:
|
||||||
if '<svg' in h.decode('utf-8').lower():
|
if '<svg' in h.decode().lower():
|
||||||
return 'svg+xml'
|
return 'svg+xml'
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
pass
|
pass
|
||||||
|
@ -52,10 +52,10 @@ class InventoryFileReader:
|
|||||||
# type: () -> str
|
# type: () -> str
|
||||||
pos = self.buffer.find(b'\n')
|
pos = self.buffer.find(b'\n')
|
||||||
if pos != -1:
|
if pos != -1:
|
||||||
line = self.buffer[:pos].decode('utf-8')
|
line = self.buffer[:pos].decode()
|
||||||
self.buffer = self.buffer[pos + 1:]
|
self.buffer = self.buffer[pos + 1:]
|
||||||
elif self.eof:
|
elif self.eof:
|
||||||
line = self.buffer.decode('utf-8')
|
line = self.buffer.decode()
|
||||||
self.buffer = b''
|
self.buffer = b''
|
||||||
else:
|
else:
|
||||||
self.read_buffer()
|
self.read_buffer()
|
||||||
@ -86,7 +86,7 @@ class InventoryFileReader:
|
|||||||
buf += chunk
|
buf += chunk
|
||||||
pos = buf.find(b'\n')
|
pos = buf.find(b'\n')
|
||||||
while pos != -1:
|
while pos != -1:
|
||||||
yield buf[:pos].decode('utf-8')
|
yield buf[:pos].decode()
|
||||||
buf = buf[pos + 1:]
|
buf = buf[pos + 1:]
|
||||||
pos = buf.find(b'\n')
|
pos = buf.find(b'\n')
|
||||||
|
|
||||||
@ -167,7 +167,7 @@ class InventoryFile:
|
|||||||
u'# Version: %s\n'
|
u'# Version: %s\n'
|
||||||
u'# The remainder of this file is compressed using zlib.\n' %
|
u'# The remainder of this file is compressed using zlib.\n' %
|
||||||
(escape(env.config.project),
|
(escape(env.config.project),
|
||||||
escape(env.config.version))).encode('utf-8'))
|
escape(env.config.version))).encode())
|
||||||
|
|
||||||
# body
|
# body
|
||||||
compressor = zlib.compressobj(9)
|
compressor = zlib.compressobj(9)
|
||||||
@ -184,5 +184,5 @@ class InventoryFile:
|
|||||||
dispname = u'-'
|
dispname = u'-'
|
||||||
entry = (u'%s %s:%s %s %s %s\n' %
|
entry = (u'%s %s:%s %s %s %s\n' %
|
||||||
(name, domainname, typ, prio, uri, dispname))
|
(name, domainname, typ, prio, uri, dispname))
|
||||||
f.write(compressor.compress(entry.encode('utf-8')))
|
f.write(compressor.compress(entry.encode()))
|
||||||
f.write(compressor.flush())
|
f.write(compressor.flush())
|
||||||
|
@ -354,7 +354,7 @@ def test_html_download(app):
|
|||||||
confoverrides={'html_experimental_html5_writer': True})
|
confoverrides={'html_experimental_html5_writer': True})
|
||||||
def test_html_download_role(app, status, warning):
|
def test_html_download_role(app, status, warning):
|
||||||
app.build()
|
app.build()
|
||||||
digest = md5((app.srcdir / 'dummy.dat').encode('utf-8')).hexdigest()
|
digest = md5((app.srcdir / 'dummy.dat').encode()).hexdigest()
|
||||||
assert (app.outdir / '_downloads' / digest / 'dummy.dat').exists()
|
assert (app.outdir / '_downloads' / digest / 'dummy.dat').exists()
|
||||||
|
|
||||||
content = (app.outdir / 'index.html').text()
|
content = (app.outdir / 'index.html').text()
|
||||||
|
@ -47,7 +47,7 @@ def reference_check(app, *args, **kwds):
|
|||||||
@mock.patch('sphinx.ext.intersphinx._read_from_url')
|
@mock.patch('sphinx.ext.intersphinx._read_from_url')
|
||||||
def test_fetch_inventory_redirection(_read_from_url, InventoryFile, app, status, warning):
|
def test_fetch_inventory_redirection(_read_from_url, InventoryFile, app, status, warning):
|
||||||
intersphinx_setup(app)
|
intersphinx_setup(app)
|
||||||
_read_from_url().readline.return_value = '# Sphinx inventory version 2'.encode('utf-8')
|
_read_from_url().readline.return_value = '# Sphinx inventory version 2'.encode()
|
||||||
|
|
||||||
# same uri and inv, not redirected
|
# same uri and inv, not redirected
|
||||||
_read_from_url().url = 'http://hostname/' + INVENTORY_FILENAME
|
_read_from_url().url = 'http://hostname/' + INVENTORY_FILENAME
|
||||||
|
@ -145,8 +145,8 @@ def test_quickstart_all_answers(tempdir):
|
|||||||
'Root path': tempdir,
|
'Root path': tempdir,
|
||||||
'Separate source and build': 'y',
|
'Separate source and build': 'y',
|
||||||
'Name prefix for templates': '.',
|
'Name prefix for templates': '.',
|
||||||
'Project name': u'STASI™'.encode('utf-8'),
|
'Project name': u'STASI™'.encode(),
|
||||||
'Author name': u'Wolfgang Schäuble & G\'Beckstein'.encode('utf-8'),
|
'Author name': u'Wolfgang Schäuble & G\'Beckstein'.encode(),
|
||||||
'Project version': '2.0',
|
'Project version': '2.0',
|
||||||
'Project release': '2.0.1',
|
'Project release': '2.0.1',
|
||||||
'Project language': 'de',
|
'Project language': 'de',
|
||||||
|
@ -99,7 +99,7 @@ def nonascii_srcdir(request, setup_command):
|
|||||||
.. toctree::
|
.. toctree::
|
||||||
|
|
||||||
%(mb_name)s/%(mb_name)s
|
%(mb_name)s/%(mb_name)s
|
||||||
""" % locals())).encode('utf-8'))
|
""" % locals())).encode())
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.usefixtures('nonascii_srcdir')
|
@pytest.mark.usefixtures('nonascii_srcdir')
|
||||||
|
@ -21,14 +21,14 @@ inventory_v1 = '''\
|
|||||||
# Version: 1.0
|
# Version: 1.0
|
||||||
module mod foo.html
|
module mod foo.html
|
||||||
module.cls class foo.html
|
module.cls class foo.html
|
||||||
'''.encode('utf-8')
|
'''.encode()
|
||||||
|
|
||||||
inventory_v2 = '''\
|
inventory_v2 = '''\
|
||||||
# Sphinx inventory version 2
|
# Sphinx inventory version 2
|
||||||
# Project: foo
|
# Project: foo
|
||||||
# Version: 2.0
|
# Version: 2.0
|
||||||
# The remainder of this file is compressed with zlib.
|
# The remainder of this file is compressed with zlib.
|
||||||
'''.encode('utf-8') + zlib.compress('''\
|
'''.encode() + zlib.compress('''\
|
||||||
module1 py:module 0 foo.html#module-module1 Long Module desc
|
module1 py:module 0 foo.html#module-module1 Long Module desc
|
||||||
module2 py:module 0 foo.html#module-$ -
|
module2 py:module 0 foo.html#module-$ -
|
||||||
module1.func py:function 1 sub/foo.html#$ -
|
module1.func py:function 1 sub/foo.html#$ -
|
||||||
@ -47,16 +47,16 @@ foo.bar js:class 1 index.html#foo.bar -
|
|||||||
foo.bar.baz js:method 1 index.html#foo.bar.baz -
|
foo.bar.baz js:method 1 index.html#foo.bar.baz -
|
||||||
foo.bar.qux js:data 1 index.html#foo.bar.qux -
|
foo.bar.qux js:data 1 index.html#foo.bar.qux -
|
||||||
a term including:colon std:term -1 glossary.html#term-a-term-including-colon -
|
a term including:colon std:term -1 glossary.html#term-a-term-including-colon -
|
||||||
'''.encode('utf-8'))
|
'''.encode())
|
||||||
|
|
||||||
inventory_v2_not_having_version = '''\
|
inventory_v2_not_having_version = '''\
|
||||||
# Sphinx inventory version 2
|
# Sphinx inventory version 2
|
||||||
# Project: foo
|
# Project: foo
|
||||||
# Version:
|
# Version:
|
||||||
# The remainder of this file is compressed with zlib.
|
# The remainder of this file is compressed with zlib.
|
||||||
'''.encode('utf-8') + zlib.compress('''\
|
'''.encode() + zlib.compress('''\
|
||||||
module1 py:module 0 foo.html#module-module1 Long Module desc
|
module1 py:module 0 foo.html#module-module1 Long Module desc
|
||||||
'''.encode('utf-8'))
|
'''.encode())
|
||||||
|
|
||||||
|
|
||||||
def test_read_inventory_v1():
|
def test_read_inventory_v1():
|
||||||
|
@ -136,7 +136,7 @@ with open('../sphinx/search/jssplitter.py', 'w') as f:
|
|||||||
f.write(python_src)
|
f.write(python_src)
|
||||||
|
|
||||||
with open('./regression_test.js', 'w') as f:
|
with open('./regression_test.js', 'w') as f:
|
||||||
f.write(js_test_src.encode('utf-8'))
|
f.write(js_test_src.encode())
|
||||||
|
|
||||||
print("starting test...")
|
print("starting test...")
|
||||||
result = subprocess.call(['node', './regression_test.js'])
|
result = subprocess.call(['node', './regression_test.js'])
|
||||||
|
Loading…
Reference in New Issue
Block a user