Merge pull request #5612 from tk0miya/removal_six

Remove use six.move module
This commit is contained in:
Takeshi KOMIYA 2018-11-12 11:13:58 +09:00 committed by GitHub
commit 7ff3d1875a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 21 additions and 35 deletions

View File

@ -18,10 +18,10 @@ import sys
import warnings import warnings
from collections import deque from collections import deque
from inspect import isclass from inspect import isclass
from io import StringIO
from os import path from os import path
from docutils.parsers.rst import Directive, directives, roles from docutils.parsers.rst import Directive, directives, roles
from six.moves import cStringIO
import sphinx import sphinx
from sphinx import package_dir, locale from sphinx import package_dir, locale
@ -164,14 +164,14 @@ class Sphinx:
self.parallel = parallel self.parallel = parallel
if status is None: if status is None:
self._status = cStringIO() # type: IO self._status = StringIO() # type: IO
self.quiet = True self.quiet = True
else: else:
self._status = status self._status = status
self.quiet = False self.quiet = False
if warning is None: if warning is None:
self._warning = cStringIO() # type: IO self._warning = StringIO() # type: IO
else: else:
self._warning = warning self._warning = warning
self._warncount = 0 self._warncount = 0

View File

@ -9,25 +9,16 @@
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
""" """
import queue
import re import re
import socket import socket
import threading import threading
from html.parser import HTMLParser
from os import path from os import path
from urllib.parse import unquote
from docutils import nodes from docutils import nodes
from requests.exceptions import HTTPError from requests.exceptions import HTTPError
from six.moves import queue, html_parser
from six.moves.urllib.parse import unquote
# 2015-06-25 barry@python.org. This exception was deprecated in Python 3.3 and
# removed in Python 3.5, however for backward compatibility reasons, we're not
# going to just remove it. If it doesn't exist, define an exception that will
# never be caught but leaves the code in check_anchor() intact.
try:
from six.moves.html_parser import HTMLParseError # type: ignore
except ImportError:
class HTMLParseError(Exception): # type: ignore
pass
from sphinx.builders import Builder from sphinx.builders import Builder
from sphinx.locale import __ from sphinx.locale import __
@ -47,7 +38,7 @@ if False:
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class AnchorCheckParser(html_parser.HTMLParser): class AnchorCheckParser(HTMLParser):
"""Specialized HTML parser that looks for a specific anchor.""" """Specialized HTML parser that looks for a specific anchor."""
def __init__(self, search_anchor): def __init__(self, search_anchor):
@ -71,18 +62,13 @@ def check_anchor(response, anchor):
Returns True if anchor was found, False otherwise. Returns True if anchor was found, False otherwise.
""" """
parser = AnchorCheckParser(anchor) parser = AnchorCheckParser(anchor)
try: # Read file in chunks. If we find a matching anchor, we break
# Read file in chunks. If we find a matching anchor, we break # the loop early in hopes not to have to download the whole thing.
# the loop early in hopes not to have to download the whole thing. for chunk in response.iter_content(chunk_size=4096, decode_unicode=True):
for chunk in response.iter_content(chunk_size=4096, decode_unicode=True): parser.feed(chunk)
parser.feed(chunk) if parser.found:
if parser.found: break
break parser.close()
parser.close()
except HTMLParseError:
# HTMLParser is usually pretty good with sloppy HTML, but it tends to
# choke on EOF. But we're done then anyway.
pass
return parser.found return parser.found

View File

@ -20,6 +20,7 @@ import time
import warnings import warnings
from collections import OrderedDict from collections import OrderedDict
from os import path from os import path
from urllib.parse import quote
# try to import readline, unix specific enhancement # try to import readline, unix specific enhancement
try: try:
@ -35,7 +36,6 @@ except ImportError:
from docutils.utils import column_width from docutils.utils import column_width
from six import text_type, binary_type from six import text_type, binary_type
from six.moves.urllib.parse import quote as urlquote
import sphinx.locale import sphinx.locale
from sphinx import __display_version__, package_dir from sphinx import __display_version__, package_dir
@ -386,7 +386,7 @@ def generate(d, overwrite=True, silent=False, templatedir=None):
d['PY3'] = True d['PY3'] = True
d['project_fn'] = make_filename(d['project']) d['project_fn'] = make_filename(d['project'])
d['project_url'] = urlquote(d['project'].encode('idna')) d['project_url'] = quote(d['project'].encode('idna'))
d['project_manpage'] = d['project_fn'].lower() d['project_manpage'] = d['project_fn'].lower()
d['now'] = time.asctime() d['now'] = time.asctime()
d['project_underline'] = column_width(d['project']) * '=' d['project_underline'] = column_width(d['project']) * '='

View File

@ -31,11 +31,11 @@ import posixpath
import sys import sys
import time import time
from os import path from os import path
from urllib.parse import urlsplit, urlunsplit
from docutils import nodes from docutils import nodes
from docutils.utils import relative_path from docutils.utils import relative_path
from six import string_types, text_type from six import string_types, text_type
from six.moves.urllib.parse import urlsplit, urlunsplit
import sphinx import sphinx
from sphinx.builders.html import INVENTORY_FILENAME from sphinx.builders.html import INVENTORY_FILENAME

View File

@ -25,10 +25,10 @@ from datetime import datetime
from hashlib import md5 from hashlib import md5
from os import path from os import path
from time import mktime, strptime from time import mktime, strptime
from urllib.parse import urlsplit, urlunsplit, quote_plus, parse_qsl, urlencode
from docutils.utils import relative_path from docutils.utils import relative_path
from six import text_type, binary_type from six import text_type, binary_type
from six.moves.urllib.parse import urlsplit, urlunsplit, quote_plus, parse_qsl, urlencode
from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.errors import PycodeError, SphinxParallelError, ExtensionError from sphinx.errors import PycodeError, SphinxParallelError, ExtensionError

View File

@ -13,11 +13,11 @@ from __future__ import absolute_import
import warnings import warnings
from contextlib import contextmanager from contextlib import contextmanager
from urllib.parse import urlsplit
import pkg_resources import pkg_resources
import requests import requests
from six import string_types from six import string_types
from six.moves.urllib.parse import urlsplit
try: try:
from requests.packages.urllib3.exceptions import SSLError from requests.packages.urllib3.exceptions import SSLError

View File

@ -16,7 +16,7 @@ from operator import itemgetter
from os import path from os import path
from uuid import uuid4 from uuid import uuid4
from six.moves import range, zip_longest from six.moves import zip_longest
from sphinx.deprecation import RemovedInSphinx30Warning from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.transforms import SphinxTransform from sphinx.transforms import SphinxTransform
@ -151,7 +151,7 @@ def levenshtein_distance(a, b):
deletions = current_row[j] + 1 deletions = current_row[j] + 1
substitutions = previous_row[j] + (column1 != column2) substitutions = previous_row[j] + (column1 != column2)
current_row.append(min(insertions, deletions, substitutions)) current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row # type: ignore previous_row = current_row
return previous_row[-1] return previous_row[-1]