mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
util: pep8 fixes
This commit is contained in:
parent
2a6b9d5808
commit
d0efb42a41
@ -21,7 +21,7 @@ from os import path
|
||||
from codecs import open, BOM_UTF8
|
||||
from collections import deque
|
||||
|
||||
from six import iteritems, text_type, binary_type, string_types
|
||||
from six import iteritems, text_type, binary_type
|
||||
from six.moves import range
|
||||
import docutils
|
||||
from docutils.utils import relative_path
|
||||
@ -189,6 +189,7 @@ _DEBUG_HEADER = '''\
|
||||
# Loaded extensions:
|
||||
'''
|
||||
|
||||
|
||||
def save_traceback(app):
|
||||
"""Save the current exception's traceback in a temporary file."""
|
||||
import platform
|
||||
@ -279,6 +280,7 @@ def get_full_modname(modname, attribute):
|
||||
# a regex to recognize coding cookies
|
||||
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
|
||||
|
||||
|
||||
def detect_encoding(readline):
|
||||
"""Like tokenize.detect_encoding() from Py3k, but a bit simplified."""
|
||||
|
||||
@ -390,8 +392,10 @@ def force_decode(string, encoding):
|
||||
class attrdict(dict):
|
||||
def __getattr__(self, key):
|
||||
return self[key]
|
||||
|
||||
def __setattr__(self, key, val):
|
||||
self[key] = val
|
||||
|
||||
def __delattr__(self, key):
|
||||
del self[key]
|
||||
|
||||
@ -438,7 +442,7 @@ def split_index_msg(type, value):
|
||||
def format_exception_cut_frames(x=1):
|
||||
"""Format an exception with traceback, but only the last x frames."""
|
||||
typ, val, tb = sys.exc_info()
|
||||
#res = ['Traceback (most recent call last):\n']
|
||||
# res = ['Traceback (most recent call last):\n']
|
||||
res = []
|
||||
tbres = traceback.format_tb(tb)
|
||||
res += tbres[-x:]
|
||||
|
@ -11,6 +11,7 @@
|
||||
import warnings
|
||||
|
||||
from docutils import nodes
|
||||
from docutils.parsers.rst import Directive
|
||||
|
||||
from docutils import __version__ as _du_version
|
||||
docutils_version = tuple(int(x) for x in _du_version.split('.')[:2])
|
||||
@ -35,5 +36,3 @@ def make_admonition(node_class, name, arguments, options, content, lineno,
|
||||
admonition_node['classes'] += classes
|
||||
state.nested_parse(content, content_offset, admonition_node)
|
||||
return [admonition_node]
|
||||
|
||||
from docutils.parsers.rst import Directive
|
||||
|
@ -22,6 +22,7 @@ except ImportError:
|
||||
_ansi_re = re.compile('\x1b\\[(\\d\\d;){0,2}\\d\\dm')
|
||||
codes = {}
|
||||
|
||||
|
||||
def get_terminal_width():
|
||||
"""Borrowed from the py lib."""
|
||||
try:
|
||||
@ -41,6 +42,8 @@ def get_terminal_width():
|
||||
|
||||
|
||||
_tw = get_terminal_width()
|
||||
|
||||
|
||||
def term_width_line(text):
|
||||
if not codes:
|
||||
# if no coloring, don't output fancy backspaces
|
||||
@ -49,6 +52,7 @@ def term_width_line(text):
|
||||
# codes are not displayed, this must be taken into account
|
||||
return text.ljust(_tw + len(text) - len(_ansi_re.sub('', text))) + '\r'
|
||||
|
||||
|
||||
def color_terminal():
|
||||
if sys.platform == 'win32' and colorama is not None:
|
||||
colorama.init()
|
||||
@ -70,15 +74,19 @@ def nocolor():
|
||||
colorama.deinit()
|
||||
codes.clear()
|
||||
|
||||
|
||||
def coloron():
|
||||
codes.update(_orig_codes)
|
||||
|
||||
|
||||
def colorize(name, text):
|
||||
return codes.get(name, '') + text + codes.get('reset', '')
|
||||
|
||||
|
||||
def strip_colors(s):
|
||||
return re.compile('\x1b.*?m').sub('', s)
|
||||
|
||||
|
||||
def create_color_func(name):
|
||||
def inner(text):
|
||||
return colorize(name, text)
|
||||
|
@ -74,8 +74,8 @@ class Field(object):
|
||||
fieldarg, nodes.Text)
|
||||
if len(content) == 1 and (
|
||||
isinstance(content[0], nodes.Text) or
|
||||
(isinstance(content[0], nodes.inline) and len(content[0]) == 1
|
||||
and isinstance(content[0][0], nodes.Text))):
|
||||
(isinstance(content[0], nodes.inline) and len(content[0]) == 1 and
|
||||
isinstance(content[0][0], nodes.Text))):
|
||||
content = [self.make_xref(self.bodyrolename, domain,
|
||||
content[0].astext(), contnode=content[0])]
|
||||
fieldbody = nodes.field_body('', nodes.paragraph('', '', *content))
|
||||
@ -234,7 +234,7 @@ class DocFieldTransformer(object):
|
||||
# match the spec; capitalize field name and be done with it
|
||||
new_fieldname = fieldtype[0:1].upper() + fieldtype[1:]
|
||||
if fieldarg:
|
||||
new_fieldname += ' ' + fieldarg
|
||||
new_fieldname += ' ' + fieldarg
|
||||
fieldname[0] = nodes.Text(new_fieldname)
|
||||
entries.append(field)
|
||||
continue
|
||||
@ -265,7 +265,7 @@ class DocFieldTransformer(object):
|
||||
pass
|
||||
else:
|
||||
types.setdefault(typename, {})[argname] = \
|
||||
[nodes.Text(argtype)]
|
||||
[nodes.Text(argtype)]
|
||||
fieldarg = argname
|
||||
|
||||
translatable_content = nodes.inline(fieldbody.rawsource,
|
||||
|
@ -11,20 +11,21 @@
|
||||
|
||||
import re
|
||||
|
||||
# this imports the standard library inspect module without resorting to
|
||||
# relatively import this module
|
||||
inspect = __import__('inspect')
|
||||
|
||||
from six import PY3, binary_type
|
||||
from six.moves import builtins
|
||||
|
||||
from sphinx.util import force_decode
|
||||
|
||||
# this imports the standard library inspect module without resorting to
|
||||
# relatively import this module
|
||||
inspect = __import__('inspect')
|
||||
|
||||
memory_address_re = re.compile(r' at 0x[0-9a-f]{8,16}(?=>$)')
|
||||
|
||||
|
||||
if PY3:
|
||||
from functools import partial
|
||||
|
||||
def getargspec(func):
|
||||
"""Like inspect.getargspec but supports functools.partial as well."""
|
||||
if inspect.ismethod(func):
|
||||
@ -61,6 +62,7 @@ if PY3:
|
||||
|
||||
else: # 2.6, 2.7
|
||||
from functools import partial
|
||||
|
||||
def getargspec(func):
|
||||
"""Like inspect.getargspec but supports functools.partial as well."""
|
||||
if inspect.ismethod(func):
|
||||
|
@ -53,6 +53,7 @@ def encode_string(s):
|
||||
return '\\u%04x\\u%04x' % (s1, s2)
|
||||
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
|
||||
|
||||
|
||||
def decode_string(s):
|
||||
return ESCAPED.sub(lambda m: eval(u + '"' + m.group() + '"'), s)
|
||||
|
||||
@ -74,6 +75,7 @@ delete implements short while
|
||||
do import static with
|
||||
double in super""".split())
|
||||
|
||||
|
||||
def dumps(obj, key=False):
|
||||
if key:
|
||||
if not isinstance(obj, string_types):
|
||||
@ -101,6 +103,7 @@ def dumps(obj, key=False):
|
||||
return encode_string(obj)
|
||||
raise TypeError(type(obj))
|
||||
|
||||
|
||||
def dump(obj, f):
|
||||
f.write(dumps(obj))
|
||||
|
||||
@ -195,5 +198,6 @@ def loads(x):
|
||||
raise ValueError("nothing loaded from string")
|
||||
return obj
|
||||
|
||||
|
||||
def load(f):
|
||||
return loads(f.read())
|
||||
|
@ -27,12 +27,15 @@ def dump(obj, fp, *args, **kwds):
|
||||
kwds['cls'] = SphinxJSONEncoder
|
||||
return json.dump(obj, fp, *args, **kwds)
|
||||
|
||||
|
||||
def dumps(obj, *args, **kwds):
|
||||
kwds['cls'] = SphinxJSONEncoder
|
||||
return json.dumps(obj, *args, **kwds)
|
||||
|
||||
|
||||
def load(*args, **kwds):
|
||||
return json.load(*args, **kwds)
|
||||
|
||||
|
||||
def loads(*args, **kwds):
|
||||
return json.loads(*args, **kwds)
|
||||
|
@ -57,18 +57,21 @@ def _translate_pattern(pat):
|
||||
res += re.escape(c)
|
||||
return res + '$'
|
||||
|
||||
|
||||
def compile_matchers(patterns):
|
||||
return [re.compile(_translate_pattern(pat)).match for pat in patterns]
|
||||
|
||||
|
||||
_pat_cache = {}
|
||||
|
||||
|
||||
def patmatch(name, pat):
|
||||
"""Return if name matches pat. Adapted from fnmatch module."""
|
||||
if pat not in _pat_cache:
|
||||
_pat_cache[pat] = re.compile(_translate_pattern(pat))
|
||||
return _pat_cache[pat].match(name)
|
||||
|
||||
|
||||
def patfilter(names, pat):
|
||||
"""Return the subset of the list NAMES that match PAT.
|
||||
|
||||
|
@ -71,14 +71,16 @@ IGNORED_NODES = (
|
||||
nodes.Inline,
|
||||
nodes.literal_block,
|
||||
nodes.doctest_block,
|
||||
#XXX there are probably more
|
||||
# XXX there are probably more
|
||||
)
|
||||
|
||||
|
||||
def is_translatable(node):
|
||||
if isinstance(node, nodes.TextElement):
|
||||
apply_source_workaround(node)
|
||||
|
||||
if not node.source:
|
||||
return False # built-in message
|
||||
return False # built-in message
|
||||
if isinstance(node, IGNORED_NODES) and 'translatable' not in node:
|
||||
return False
|
||||
# <field_name>orphan</field_name>
|
||||
@ -101,6 +103,8 @@ LITERAL_TYPE_NODES = (
|
||||
IMAGE_TYPE_NODES = (
|
||||
nodes.image,
|
||||
)
|
||||
|
||||
|
||||
def extract_messages(doctree):
|
||||
"""Extract translatable messages from a document tree."""
|
||||
for node in doctree.traverse(is_translatable):
|
||||
@ -184,6 +188,7 @@ indextypes = [
|
||||
'single', 'pair', 'double', 'triple', 'see', 'seealso',
|
||||
]
|
||||
|
||||
|
||||
def process_index_entry(entry, targetid):
|
||||
indexentries = []
|
||||
entry = entry.strip()
|
||||
@ -233,7 +238,8 @@ def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc):
|
||||
try:
|
||||
builder.info(colorfunc(includefile) + " ", nonl=1)
|
||||
subtree = inline_all_toctrees(builder, docnameset, includefile,
|
||||
builder.env.get_doctree(includefile), colorfunc)
|
||||
builder.env.get_doctree(includefile),
|
||||
colorfunc)
|
||||
docnameset.add(includefile)
|
||||
except Exception:
|
||||
builder.warn('toctree contains ref to nonexisting '
|
||||
@ -256,8 +262,8 @@ def make_refnode(builder, fromdocname, todocname, targetid, child, title=None):
|
||||
if fromdocname == todocname:
|
||||
node['refid'] = targetid
|
||||
else:
|
||||
node['refuri'] = (builder.get_relative_uri(fromdocname, todocname)
|
||||
+ '#' + targetid)
|
||||
node['refuri'] = (builder.get_relative_uri(fromdocname, todocname) +
|
||||
'#' + targetid)
|
||||
if title:
|
||||
node['reftitle'] = title
|
||||
node.append(child)
|
||||
@ -268,9 +274,11 @@ def set_source_info(directive, node):
|
||||
node.source, node.line = \
|
||||
directive.state_machine.get_source_and_line(directive.lineno)
|
||||
|
||||
|
||||
def set_role_source_info(inliner, lineno, node):
|
||||
node.source, node.line = inliner.reporter.get_source_and_line(lineno)
|
||||
|
||||
|
||||
# monkey-patch Element.copy to copy the rawsource
|
||||
|
||||
def _new_copy(self):
|
||||
|
@ -36,6 +36,7 @@ EINVAL = getattr(errno, 'EINVAL', 0)
|
||||
# hangover from more *nix-oriented origins.
|
||||
SEP = "/"
|
||||
|
||||
|
||||
def os_path(canonicalpath):
|
||||
return canonicalpath.replace(SEP, path.sep)
|
||||
|
||||
@ -59,7 +60,7 @@ def relative_uri(base, to):
|
||||
if len(b2) == 1 and t2 == ['']:
|
||||
# Special case: relative_uri('f/index.html','f/') should
|
||||
# return './', not ''
|
||||
return '.' + SEP
|
||||
return '.' + SEP
|
||||
return ('..' + SEP) * (len(b2)-1) + SEP.join(t2)
|
||||
|
||||
|
||||
@ -147,6 +148,7 @@ def copyfile(source, dest):
|
||||
|
||||
no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
|
||||
|
||||
|
||||
def make_filename(string):
|
||||
return no_fn_re.sub('', string) or 'sphinx'
|
||||
|
||||
@ -167,6 +169,7 @@ def safe_relpath(path, start=None):
|
||||
except ValueError:
|
||||
return path
|
||||
|
||||
|
||||
def find_catalog(docname, compaction):
|
||||
if compaction:
|
||||
ret = docname.split(SEP, 1)[0]
|
||||
|
@ -22,12 +22,14 @@ if PY3:
|
||||
# prefix for Unicode strings
|
||||
u = ''
|
||||
from io import TextIOWrapper
|
||||
|
||||
# safely encode a string for printing to the terminal
|
||||
def terminal_safe(s):
|
||||
return s.encode('ascii', 'backslashreplace').decode('ascii')
|
||||
# some kind of default system encoding; should be used with a lenient
|
||||
# error handler
|
||||
sys_encoding = sys.getdefaultencoding()
|
||||
|
||||
# support for running 2to3 over config files
|
||||
def convert_with_2to3(filepath):
|
||||
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
|
||||
@ -46,11 +48,11 @@ if PY3:
|
||||
from html import escape as htmlescape # >= Python 3.2
|
||||
|
||||
class UnicodeMixin:
|
||||
"""Mixin class to handle defining the proper __str__/__unicode__
|
||||
methods in Python 2 or 3."""
|
||||
"""Mixin class to handle defining the proper __str__/__unicode__
|
||||
methods in Python 2 or 3."""
|
||||
|
||||
def __str__(self):
|
||||
return self.__unicode__()
|
||||
def __str__(self):
|
||||
return self.__unicode__()
|
||||
|
||||
from textwrap import indent
|
||||
|
||||
@ -59,8 +61,10 @@ else:
|
||||
u = 'u'
|
||||
# no need to refactor on 2.x versions
|
||||
convert_with_2to3 = None
|
||||
|
||||
def TextIOWrapper(stream, encoding):
|
||||
return codecs.lookup(encoding or 'ascii')[2](stream)
|
||||
|
||||
# safely encode a string for printing to the terminal
|
||||
def terminal_safe(s):
|
||||
return s.encode('ascii', 'backslashreplace')
|
||||
@ -127,6 +131,7 @@ from six.moves import zip_longest
|
||||
import io
|
||||
from itertools import product
|
||||
|
||||
|
||||
class _DeprecationWrapper(object):
|
||||
def __init__(self, mod, deprecated):
|
||||
self._mod = mod
|
||||
|
@ -153,6 +153,7 @@ closing_single_quotes_regex_2 = re.compile(r"""
|
||||
(\s | s\b)
|
||||
""" % (close_class,), re.VERBOSE)
|
||||
|
||||
|
||||
def educate_quotes(s):
|
||||
"""
|
||||
Parameter: String.
|
||||
@ -232,7 +233,7 @@ def educate_quotes_latex(s, dquotes=("``", "''")):
|
||||
|
||||
# Finally, replace all helpers with quotes.
|
||||
return s.replace("\x01", dquotes[0]).replace("\x02", dquotes[1]).\
|
||||
replace("\x03", "`").replace("\x04", "'")
|
||||
replace("\x03", "`").replace("\x04", "'")
|
||||
|
||||
|
||||
def educate_backticks(s):
|
||||
|
@ -28,6 +28,7 @@
|
||||
:license: Public Domain ("can be used free of charge for any purpose").
|
||||
"""
|
||||
|
||||
|
||||
class PorterStemmer(object):
|
||||
|
||||
def __init__(self):
|
||||
@ -49,7 +50,7 @@ class PorterStemmer(object):
|
||||
def cons(self, i):
|
||||
"""cons(i) is TRUE <=> b[i] is a consonant."""
|
||||
if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' \
|
||||
or self.b[i] == 'o' or self.b[i] == 'u':
|
||||
or self.b[i] == 'o' or self.b[i] == 'u':
|
||||
return 0
|
||||
if self.b[i] == 'y':
|
||||
if i == self.k0:
|
||||
@ -120,7 +121,7 @@ class PorterStemmer(object):
|
||||
snow, box, tray.
|
||||
"""
|
||||
if i < (self.k0 + 2) or not self.cons(i) or self.cons(i-1) \
|
||||
or not self.cons(i-2):
|
||||
or not self.cons(i-2):
|
||||
return 0
|
||||
ch = self.b[i]
|
||||
if ch == 'w' or ch == 'x' or ch == 'y':
|
||||
@ -130,7 +131,7 @@ class PorterStemmer(object):
|
||||
def ends(self, s):
|
||||
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
|
||||
length = len(s)
|
||||
if s[length - 1] != self.b[self.k]: # tiny speed-up
|
||||
if s[length - 1] != self.b[self.k]: # tiny speed-up
|
||||
return 0
|
||||
if length > (self.k - self.k0 + 1):
|
||||
return 0
|
||||
@ -184,9 +185,12 @@ class PorterStemmer(object):
|
||||
self.k = self.k - 1
|
||||
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
|
||||
self.k = self.j
|
||||
if self.ends("at"): self.setto("ate")
|
||||
elif self.ends("bl"): self.setto("ble")
|
||||
elif self.ends("iz"): self.setto("ize")
|
||||
if self.ends("at"):
|
||||
self.setto("ate")
|
||||
elif self.ends("bl"):
|
||||
self.setto("ble")
|
||||
elif self.ends("iz"):
|
||||
self.setto("ize")
|
||||
elif self.doublec(self.k):
|
||||
self.k = self.k - 1
|
||||
ch = self.b[self.k]
|
||||
@ -207,100 +211,159 @@ class PorterStemmer(object):
|
||||
string before the suffix must give m() > 0.
|
||||
"""
|
||||
if self.b[self.k - 1] == 'a':
|
||||
if self.ends("ational"): self.r("ate")
|
||||
elif self.ends("tional"): self.r("tion")
|
||||
if self.ends("ational"):
|
||||
self.r("ate")
|
||||
elif self.ends("tional"):
|
||||
self.r("tion")
|
||||
elif self.b[self.k - 1] == 'c':
|
||||
if self.ends("enci"): self.r("ence")
|
||||
elif self.ends("anci"): self.r("ance")
|
||||
if self.ends("enci"):
|
||||
self.r("ence")
|
||||
elif self.ends("anci"):
|
||||
self.r("ance")
|
||||
elif self.b[self.k - 1] == 'e':
|
||||
if self.ends("izer"): self.r("ize")
|
||||
if self.ends("izer"):
|
||||
self.r("ize")
|
||||
elif self.b[self.k - 1] == 'l':
|
||||
if self.ends("bli"): self.r("ble") # --DEPARTURE--
|
||||
if self.ends("bli"):
|
||||
self.r("ble") # --DEPARTURE--
|
||||
# To match the published algorithm, replace this phrase with
|
||||
# if self.ends("abli"): self.r("able")
|
||||
elif self.ends("alli"): self.r("al")
|
||||
elif self.ends("entli"): self.r("ent")
|
||||
elif self.ends("eli"): self.r("e")
|
||||
elif self.ends("ousli"): self.r("ous")
|
||||
elif self.ends("alli"):
|
||||
self.r("al")
|
||||
elif self.ends("entli"):
|
||||
self.r("ent")
|
||||
elif self.ends("eli"):
|
||||
self.r("e")
|
||||
elif self.ends("ousli"):
|
||||
self.r("ous")
|
||||
elif self.b[self.k - 1] == 'o':
|
||||
if self.ends("ization"): self.r("ize")
|
||||
elif self.ends("ation"): self.r("ate")
|
||||
elif self.ends("ator"): self.r("ate")
|
||||
if self.ends("ization"):
|
||||
self.r("ize")
|
||||
elif self.ends("ation"):
|
||||
self.r("ate")
|
||||
elif self.ends("ator"):
|
||||
self.r("ate")
|
||||
elif self.b[self.k - 1] == 's':
|
||||
if self.ends("alism"): self.r("al")
|
||||
elif self.ends("iveness"): self.r("ive")
|
||||
elif self.ends("fulness"): self.r("ful")
|
||||
elif self.ends("ousness"): self.r("ous")
|
||||
if self.ends("alism"):
|
||||
self.r("al")
|
||||
elif self.ends("iveness"):
|
||||
self.r("ive")
|
||||
elif self.ends("fulness"):
|
||||
self.r("ful")
|
||||
elif self.ends("ousness"):
|
||||
self.r("ous")
|
||||
elif self.b[self.k - 1] == 't':
|
||||
if self.ends("aliti"): self.r("al")
|
||||
elif self.ends("iviti"): self.r("ive")
|
||||
elif self.ends("biliti"): self.r("ble")
|
||||
elif self.b[self.k - 1] == 'g': # --DEPARTURE--
|
||||
if self.ends("logi"): self.r("log")
|
||||
if self.ends("aliti"):
|
||||
self.r("al")
|
||||
elif self.ends("iviti"):
|
||||
self.r("ive")
|
||||
elif self.ends("biliti"):
|
||||
self.r("ble")
|
||||
elif self.b[self.k - 1] == 'g': # --DEPARTURE--
|
||||
if self.ends("logi"):
|
||||
self.r("log")
|
||||
# To match the published algorithm, delete this phrase
|
||||
|
||||
def step3(self):
|
||||
"""step3() dels with -ic-, -full, -ness etc. similar strategy
|
||||
to step2."""
|
||||
if self.b[self.k] == 'e':
|
||||
if self.ends("icate"): self.r("ic")
|
||||
elif self.ends("ative"): self.r("")
|
||||
elif self.ends("alize"): self.r("al")
|
||||
if self.ends("icate"):
|
||||
self.r("ic")
|
||||
elif self.ends("ative"):
|
||||
self.r("")
|
||||
elif self.ends("alize"):
|
||||
self.r("al")
|
||||
elif self.b[self.k] == 'i':
|
||||
if self.ends("iciti"): self.r("ic")
|
||||
if self.ends("iciti"):
|
||||
self.r("ic")
|
||||
elif self.b[self.k] == 'l':
|
||||
if self.ends("ical"): self.r("ic")
|
||||
elif self.ends("ful"): self.r("")
|
||||
if self.ends("ical"):
|
||||
self.r("ic")
|
||||
elif self.ends("ful"):
|
||||
self.r("")
|
||||
elif self.b[self.k] == 's':
|
||||
if self.ends("ness"): self.r("")
|
||||
if self.ends("ness"):
|
||||
self.r("")
|
||||
|
||||
def step4(self):
|
||||
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
|
||||
if self.b[self.k - 1] == 'a':
|
||||
if self.ends("al"): pass
|
||||
else: return
|
||||
if self.ends("al"):
|
||||
pass
|
||||
else:
|
||||
return
|
||||
elif self.b[self.k - 1] == 'c':
|
||||
if self.ends("ance"): pass
|
||||
elif self.ends("ence"): pass
|
||||
else: return
|
||||
if self.ends("ance"):
|
||||
pass
|
||||
elif self.ends("ence"):
|
||||
pass
|
||||
else:
|
||||
return
|
||||
elif self.b[self.k - 1] == 'e':
|
||||
if self.ends("er"): pass
|
||||
else: return
|
||||
if self.ends("er"):
|
||||
pass
|
||||
else:
|
||||
return
|
||||
elif self.b[self.k - 1] == 'i':
|
||||
if self.ends("ic"): pass
|
||||
else: return
|
||||
if self.ends("ic"):
|
||||
pass
|
||||
else:
|
||||
return
|
||||
elif self.b[self.k - 1] == 'l':
|
||||
if self.ends("able"): pass
|
||||
elif self.ends("ible"): pass
|
||||
else: return
|
||||
if self.ends("able"):
|
||||
pass
|
||||
elif self.ends("ible"):
|
||||
pass
|
||||
else:
|
||||
return
|
||||
elif self.b[self.k - 1] == 'n':
|
||||
if self.ends("ant"): pass
|
||||
elif self.ends("ement"): pass
|
||||
elif self.ends("ment"): pass
|
||||
elif self.ends("ent"): pass
|
||||
else: return
|
||||
if self.ends("ant"):
|
||||
pass
|
||||
elif self.ends("ement"):
|
||||
pass
|
||||
elif self.ends("ment"):
|
||||
pass
|
||||
elif self.ends("ent"):
|
||||
pass
|
||||
else:
|
||||
return
|
||||
elif self.b[self.k - 1] == 'o':
|
||||
if self.ends("ion") and (self.b[self.j] == 's'
|
||||
or self.b[self.j] == 't'): pass
|
||||
elif self.ends("ou"): pass
|
||||
if self.ends("ion") and (self.b[self.j] == 's' or
|
||||
self.b[self.j] == 't'):
|
||||
pass
|
||||
elif self.ends("ou"):
|
||||
pass
|
||||
# takes care of -ous
|
||||
else: return
|
||||
else:
|
||||
return
|
||||
elif self.b[self.k - 1] == 's':
|
||||
if self.ends("ism"): pass
|
||||
else: return
|
||||
if self.ends("ism"):
|
||||
pass
|
||||
else:
|
||||
return
|
||||
elif self.b[self.k - 1] == 't':
|
||||
if self.ends("ate"): pass
|
||||
elif self.ends("iti"): pass
|
||||
else: return
|
||||
if self.ends("ate"):
|
||||
pass
|
||||
elif self.ends("iti"):
|
||||
pass
|
||||
else:
|
||||
return
|
||||
elif self.b[self.k - 1] == 'u':
|
||||
if self.ends("ous"): pass
|
||||
else: return
|
||||
if self.ends("ous"):
|
||||
pass
|
||||
else:
|
||||
return
|
||||
elif self.b[self.k - 1] == 'v':
|
||||
if self.ends("ive"): pass
|
||||
else: return
|
||||
if self.ends("ive"):
|
||||
pass
|
||||
else:
|
||||
return
|
||||
elif self.b[self.k - 1] == 'z':
|
||||
if self.ends("ize"): pass
|
||||
else: return
|
||||
if self.ends("ize"):
|
||||
pass
|
||||
else:
|
||||
return
|
||||
else:
|
||||
return
|
||||
if self.m() > 1:
|
||||
@ -316,7 +379,7 @@ class PorterStemmer(object):
|
||||
if a > 1 or (a == 1 and not self.cvc(self.k-1)):
|
||||
self.k = self.k - 1
|
||||
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
|
||||
self.k = self.k -1
|
||||
self.k = self.k - 1
|
||||
|
||||
def stem(self, p, i, j):
|
||||
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
|
||||
@ -332,7 +395,7 @@ class PorterStemmer(object):
|
||||
self.k = j
|
||||
self.k0 = i
|
||||
if self.k <= self.k0 + 1:
|
||||
return self.b # --DEPARTURE--
|
||||
return self.b # --DEPARTURE--
|
||||
|
||||
# With this line, strings of length 1 or 2 don't go through the
|
||||
# stemming process, although no mention is made of this in the
|
||||
|
@ -7,11 +7,6 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
# jinja2.sandbox imports the sets module on purpose
|
||||
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
|
||||
module='jinja2.sandbox')
|
||||
|
||||
# (ab)use the Jinja parser for parsing our boolean expressions
|
||||
from jinja2 import nodes
|
||||
from jinja2.parser import Parser
|
||||
|
@ -23,7 +23,7 @@ tex_replacements = [
|
||||
('[', r'{[}'),
|
||||
(']', r'{]}'),
|
||||
('`', r'{}`'),
|
||||
('\\',r'\textbackslash{}'),
|
||||
('\\', r'\textbackslash{}'),
|
||||
('~', r'\textasciitilde{}'),
|
||||
('<', r'\textless{}'),
|
||||
('>', r'\textgreater{}'),
|
||||
@ -104,11 +104,13 @@ tex_escape_map = {}
|
||||
tex_replace_map = {}
|
||||
tex_hl_escape_map_new = {}
|
||||
|
||||
|
||||
def init():
|
||||
for a, b in tex_replacements:
|
||||
tex_escape_map[ord(a)] = b
|
||||
tex_replace_map[ord(a)] = '_'
|
||||
|
||||
for a, b in tex_replacements:
|
||||
if a in '[]{}\\': continue
|
||||
if a in '[]{}\\':
|
||||
continue
|
||||
tex_hl_escape_map_new[ord(a)] = b
|
||||
|
@ -9,5 +9,5 @@
|
||||
|
||||
|
||||
def is_commentable(node):
|
||||
#return node.__class__.__name__ in ('paragraph', 'literal_block')
|
||||
# return node.__class__.__name__ in ('paragraph', 'literal_block')
|
||||
return node.__class__.__name__ == 'paragraph'
|
||||
|
Loading…
Reference in New Issue
Block a user