mirror of
https://github.com/neovim/neovim.git
synced 2025-02-25 18:55:25 -06:00
commit
35ec60f73a
@ -516,6 +516,7 @@ if(NOT BUSTED_OUTPUT_TYPE)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
find_program(LUACHECK_PRG luacheck)
|
find_program(LUACHECK_PRG luacheck)
|
||||||
|
find_program(FLAKE8_PRG flake8)
|
||||||
find_program(GPERF_PRG gperf)
|
find_program(GPERF_PRG gperf)
|
||||||
|
|
||||||
include(InstallHelpers)
|
include(InstallHelpers)
|
||||||
@ -667,6 +668,15 @@ else()
|
|||||||
COMMENT "lualint: LUACHECK_PRG not defined")
|
COMMENT "lualint: LUACHECK_PRG not defined")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if(FLAKE8_PRG)
|
||||||
|
add_custom_target(pylint
|
||||||
|
COMMAND ${FLAKE8_PRG} contrib/ scripts/ src/ test/
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
|
else()
|
||||||
|
add_custom_target(pylint false
|
||||||
|
COMMENT "flake8: FLAKE8_PRG not defined")
|
||||||
|
endif()
|
||||||
|
|
||||||
set(CPACK_PACKAGE_NAME "Neovim")
|
set(CPACK_PACKAGE_NAME "Neovim")
|
||||||
set(CPACK_PACKAGE_VENDOR "neovim.io")
|
set(CPACK_PACKAGE_VENDOR "neovim.io")
|
||||||
set(CPACK_PACKAGE_VERSION ${NVIM_VERSION_MEDIUM})
|
set(CPACK_PACKAGE_VERSION ${NVIM_VERSION_MEDIUM})
|
||||||
|
7
Makefile
7
Makefile
@ -138,6 +138,9 @@ functionaltest-lua: | nvim
|
|||||||
lualint: | build/.ran-cmake deps
|
lualint: | build/.ran-cmake deps
|
||||||
$(BUILD_CMD) -C build lualint
|
$(BUILD_CMD) -C build lualint
|
||||||
|
|
||||||
|
pylint: | build/.ran-cmake deps
|
||||||
|
$(BUILD_CMD) -C build pylint
|
||||||
|
|
||||||
unittest: | nvim
|
unittest: | nvim
|
||||||
+$(BUILD_CMD) -C build unittest
|
+$(BUILD_CMD) -C build unittest
|
||||||
|
|
||||||
@ -179,6 +182,6 @@ appimage:
|
|||||||
appimage-%:
|
appimage-%:
|
||||||
bash scripts/genappimage.sh $*
|
bash scripts/genappimage.sh $*
|
||||||
|
|
||||||
lint: check-single-includes clint lualint
|
lint: check-single-includes clint lualint pylint
|
||||||
|
|
||||||
.PHONY: test lualint functionaltest unittest lint clint clean distclean nvim libnvim cmake deps install appimage checkprefix
|
.PHONY: test lualint pylint functionaltest unittest lint clint clean distclean nvim libnvim cmake deps install appimage checkprefix
|
||||||
|
@ -4,6 +4,7 @@ set -e
|
|||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
if [[ "${CI_TARGET}" == lint ]]; then
|
if [[ "${CI_TARGET}" == lint ]]; then
|
||||||
|
python -m pip -q install --user --upgrade flake8
|
||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -9,26 +9,24 @@ source "${CI_DIR}/common/build.sh"
|
|||||||
source "${CI_DIR}/common/suite.sh"
|
source "${CI_DIR}/common/suite.sh"
|
||||||
|
|
||||||
enter_suite 'clint'
|
enter_suite 'clint'
|
||||||
|
|
||||||
run_test 'make clint-full' clint
|
run_test 'make clint-full' clint
|
||||||
|
|
||||||
exit_suite --continue
|
exit_suite --continue
|
||||||
|
|
||||||
enter_suite 'lualint'
|
enter_suite 'lualint'
|
||||||
|
|
||||||
run_test 'make lualint' lualint
|
run_test 'make lualint' lualint
|
||||||
|
exit_suite --continue
|
||||||
|
|
||||||
|
enter_suite 'pylint'
|
||||||
|
run_test 'make pylint' pylint
|
||||||
exit_suite --continue
|
exit_suite --continue
|
||||||
|
|
||||||
enter_suite single-includes
|
enter_suite single-includes
|
||||||
|
|
||||||
CLICOLOR_FORCE=1 run_test_wd \
|
CLICOLOR_FORCE=1 run_test_wd \
|
||||||
--allow-hang \
|
--allow-hang \
|
||||||
10s \
|
10s \
|
||||||
'make check-single-includes' \
|
'make check-single-includes' \
|
||||||
'csi_clean' \
|
'csi_clean' \
|
||||||
single-includes
|
single-includes
|
||||||
|
|
||||||
exit_suite --continue
|
exit_suite --continue
|
||||||
|
|
||||||
end_tests
|
end_tests
|
||||||
|
@ -26,13 +26,13 @@ def get_color_code(bg, color_num):
|
|||||||
prefix += 1
|
prefix += 1
|
||||||
color_num %= 8
|
color_num %= 8
|
||||||
else:
|
else:
|
||||||
prefix = '48;5;' if bg else '38;5;'
|
prefix = '48;5;' if bg else '38;5;'
|
||||||
return '\x1b[{0}{1}m'.format(prefix, color_num)
|
return '\x1b[{0}{1}m'.format(prefix, color_num)
|
||||||
|
|
||||||
|
|
||||||
def highlight(attrs):
|
def highlight(attrs):
|
||||||
fg, bg = [int(attrs['foreground']), int(attrs['background'])]
|
fg, bg = [int(attrs['foreground']), int(attrs['background'])]
|
||||||
rv = [SGR0] # start with sgr0
|
rv = [SGR0] # start with sgr0
|
||||||
if fg != -1:
|
if fg != -1:
|
||||||
rv.append(get_color_code(False, fg))
|
rv.append(get_color_code(False, fg))
|
||||||
if bg != -1:
|
if bg != -1:
|
||||||
|
@ -9,58 +9,56 @@ from argparse import ArgumentParser
|
|||||||
|
|
||||||
|
|
||||||
GENERATED_INCLUDE_RE = re.compile(
|
GENERATED_INCLUDE_RE = re.compile(
|
||||||
r'^\s*#\s*include\s*"([/a-z_0-9.]+\.generated\.h)"(\s+//.*)?$')
|
r'^\s*#\s*include\s*"([/a-z_0-9.]+\.generated\.h)"(\s+//.*)?$')
|
||||||
|
|
||||||
|
|
||||||
def main(argv):
|
def main(argv):
|
||||||
argparser = ArgumentParser()
|
argparser = ArgumentParser()
|
||||||
argparser.add_argument('--generated-includes-dir', action='append',
|
argparser.add_argument('--generated-includes-dir', action='append',
|
||||||
help='Directory where generated includes are located.')
|
help='Directory where generated includes are located.')
|
||||||
argparser.add_argument('--file', type=open, help='File to check.')
|
argparser.add_argument('--file', type=open, help='File to check.')
|
||||||
argparser.add_argument('iwyu_args', nargs='*',
|
argparser.add_argument('iwyu_args', nargs='*',
|
||||||
help='IWYU arguments, must go after --.')
|
help='IWYU arguments, must go after --.')
|
||||||
args = argparser.parse_args(argv)
|
args = argparser.parse_args(argv)
|
||||||
|
|
||||||
with args.file:
|
with args.file:
|
||||||
include_dirs = []
|
iwyu = Popen(['include-what-you-use', '-xc'] + args.iwyu_args + ['/dev/stdin'],
|
||||||
|
stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||||
|
|
||||||
iwyu = Popen(['include-what-you-use', '-xc'] + args.iwyu_args + ['/dev/stdin'],
|
for line in args.file:
|
||||||
stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
match = GENERATED_INCLUDE_RE.match(line)
|
||||||
|
if match:
|
||||||
|
for d in args.generated_includes_dir:
|
||||||
|
try:
|
||||||
|
f = open(os.path.join(d, match.group(1)))
|
||||||
|
except IOError:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
with f:
|
||||||
|
for generated_line in f:
|
||||||
|
iwyu.stdin.write(generated_line)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise IOError('Failed to find {0}'.format(match.group(1)))
|
||||||
|
else:
|
||||||
|
iwyu.stdin.write(line)
|
||||||
|
|
||||||
for line in args.file:
|
iwyu.stdin.close()
|
||||||
match = GENERATED_INCLUDE_RE.match(line)
|
|
||||||
if match:
|
|
||||||
for d in args.generated_includes_dir:
|
|
||||||
try:
|
|
||||||
f = open(os.path.join(d, match.group(1)))
|
|
||||||
except IOError:
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
with f:
|
|
||||||
for generated_line in f:
|
|
||||||
iwyu.stdin.write(generated_line)
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise IOError('Failed to find {0}'.format(match.group(1)))
|
|
||||||
else:
|
|
||||||
iwyu.stdin.write(line)
|
|
||||||
|
|
||||||
iwyu.stdin.close()
|
out = iwyu.stdout.read()
|
||||||
|
err = iwyu.stderr.read()
|
||||||
|
|
||||||
out = iwyu.stdout.read()
|
ret = iwyu.wait()
|
||||||
err = iwyu.stderr.read()
|
|
||||||
|
|
||||||
ret = iwyu.wait()
|
if ret != 2:
|
||||||
|
print('IWYU failed with exit code {0}:'.format(ret))
|
||||||
if ret != 2:
|
print('{0} stdout {0}'.format('=' * ((80 - len(' stdout ')) // 2)))
|
||||||
print('IWYU failed with exit code {0}:'.format(ret))
|
print(out)
|
||||||
print('{0} stdout {0}'.format('=' * ((80 - len(' stdout ')) // 2)))
|
print('{0} stderr {0}'.format('=' * ((80 - len(' stderr ')) // 2)))
|
||||||
print(out)
|
print(err)
|
||||||
print('{0} stderr {0}'.format('=' * ((80 - len(' stderr ')) // 2)))
|
return 1
|
||||||
print(err)
|
return 0
|
||||||
return 1
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
raise SystemExit(main(sys.argv[1:]))
|
raise SystemExit(main(sys.argv[1:]))
|
||||||
|
@ -81,12 +81,12 @@ SITENAVI_PLAIN = '<p>' + SITENAVI_LINKS_PLAIN + '</p>'
|
|||||||
SITENAVI_WEB = '<p>' + SITENAVI_LINKS_WEB + '</p>'
|
SITENAVI_WEB = '<p>' + SITENAVI_LINKS_WEB + '</p>'
|
||||||
|
|
||||||
SITENAVI_SEARCH = '<table width="100%"><tbody><tr><td>' + SITENAVI_LINKS_WEB + \
|
SITENAVI_SEARCH = '<table width="100%"><tbody><tr><td>' + SITENAVI_LINKS_WEB + \
|
||||||
'</td><td style="text-align: right; max-width: 25vw"><div class="gcse-searchbox">' \
|
'</td><td style="text-align: right; max-width: 25vw"><div class="gcse-searchbox">' \
|
||||||
'</div></td></tr></tbody></table><div class="gcse-searchresults"></div>'
|
'</div></td></tr></tbody></table><div class="gcse-searchresults"></div>'
|
||||||
|
|
||||||
TEXTSTART = """
|
TEXTSTART = """
|
||||||
<div id="d1">
|
<div id="d1">
|
||||||
<pre id="sp"> </pre>
|
<pre id="sp">""" + (" " * 80) + """</pre>
|
||||||
<div id="d2">
|
<div id="d2">
|
||||||
<pre>
|
<pre>
|
||||||
"""
|
"""
|
||||||
@ -100,74 +100,76 @@ FOOTER2 = """
|
|||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
""".format(
|
""".format(
|
||||||
generated_date='{0:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()),
|
generated_date='{0:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()),
|
||||||
commit='?')
|
commit='?')
|
||||||
|
|
||||||
RE_TAGLINE = re.compile(r'(\S+)\s+(\S+)')
|
RE_TAGLINE = re.compile(r'(\S+)\s+(\S+)')
|
||||||
|
|
||||||
PAT_WORDCHAR = '[!#-)+-{}~\xC0-\xFF]'
|
PAT_WORDCHAR = '[!#-)+-{}~\xC0-\xFF]'
|
||||||
|
|
||||||
PAT_HEADER = r'(^.*~$)'
|
PAT_HEADER = r'(^.*~$)'
|
||||||
PAT_GRAPHIC = r'(^.* `$)'
|
PAT_GRAPHIC = r'(^.* `$)'
|
||||||
PAT_PIPEWORD = r'(?<!\\)\|([#-)!+-~]+)\|'
|
PAT_PIPEWORD = r'(?<!\\)\|([#-)!+-~]+)\|'
|
||||||
PAT_STARWORD = r'\*([#-)!+-~]+)\*(?:(?=\s)|$)'
|
PAT_STARWORD = r'\*([#-)!+-~]+)\*(?:(?=\s)|$)'
|
||||||
PAT_COMMAND = r'`([^` ]+)`'
|
PAT_COMMAND = r'`([^` ]+)`'
|
||||||
PAT_OPTWORD = r"('(?:[a-z]{2,}|t_..)')"
|
PAT_OPTWORD = r"('(?:[a-z]{2,}|t_..)')"
|
||||||
PAT_CTRL = r'(CTRL-(?:W_)?(?:\{char\}|<[A-Za-z]+?>|.)?)'
|
PAT_CTRL = r'(CTRL-(?:W_)?(?:\{char\}|<[A-Za-z]+?>|.)?)'
|
||||||
PAT_SPECIAL = r'(<.+?>|\{.+?}|' \
|
PAT_SPECIAL = r'(<.+?>|\{.+?}|' \
|
||||||
r'\[(?:range|line|count|offset|\+?cmd|[-+]?num|\+\+opt|' \
|
r'\[(?:range|line|count|offset|\+?cmd|[-+]?num|\+\+opt|' \
|
||||||
r'arg|arguments|ident|addr|group)]|' \
|
r'arg|arguments|ident|addr|group)]|' \
|
||||||
r'(?<=\s)\[[-a-z^A-Z0-9_]{2,}])'
|
r'(?<=\s)\[[-a-z^A-Z0-9_]{2,}])'
|
||||||
PAT_TITLE = r'(Vim version [0-9.a-z]+|VIM REFERENCE.*)'
|
PAT_TITLE = r'(Vim version [0-9.a-z]+|VIM REFERENCE.*)'
|
||||||
PAT_NOTE = r'((?<!' + PAT_WORDCHAR + r')(?:note|NOTE|Notes?):?' \
|
PAT_NOTE = r'((?<!' + PAT_WORDCHAR + r')(?:note|NOTE|Notes?):?' \
|
||||||
r'(?!' + PAT_WORDCHAR + r'))'
|
r'(?!' + PAT_WORDCHAR + r'))'
|
||||||
PAT_URL = r'((?:https?|ftp)://[^\'"<> \t]+[a-zA-Z0-9/])'
|
PAT_URL = r'((?:https?|ftp)://[^\'"<> \t]+[a-zA-Z0-9/])'
|
||||||
PAT_WORD = r'((?<!' + PAT_WORDCHAR + r')' + PAT_WORDCHAR + r'+' \
|
PAT_WORD = r'((?<!' + PAT_WORDCHAR + r')' + PAT_WORDCHAR + r'+' \
|
||||||
r'(?!' + PAT_WORDCHAR + r'))'
|
r'(?!' + PAT_WORDCHAR + r'))'
|
||||||
|
|
||||||
RE_LINKWORD = re.compile(
|
RE_LINKWORD = re.compile(
|
||||||
PAT_OPTWORD + '|' +
|
PAT_OPTWORD + '|' +
|
||||||
PAT_CTRL + '|' +
|
PAT_CTRL + '|' +
|
||||||
PAT_SPECIAL)
|
PAT_SPECIAL)
|
||||||
RE_TAGWORD = re.compile(
|
RE_TAGWORD = re.compile(
|
||||||
PAT_HEADER + '|' +
|
PAT_HEADER + '|' +
|
||||||
PAT_GRAPHIC + '|' +
|
PAT_GRAPHIC + '|' +
|
||||||
PAT_PIPEWORD + '|' +
|
PAT_PIPEWORD + '|' +
|
||||||
PAT_STARWORD + '|' +
|
PAT_STARWORD + '|' +
|
||||||
PAT_COMMAND + '|' +
|
PAT_COMMAND + '|' +
|
||||||
PAT_OPTWORD + '|' +
|
PAT_OPTWORD + '|' +
|
||||||
PAT_CTRL + '|' +
|
PAT_CTRL + '|' +
|
||||||
PAT_SPECIAL + '|' +
|
PAT_SPECIAL + '|' +
|
||||||
PAT_TITLE + '|' +
|
PAT_TITLE + '|' +
|
||||||
PAT_NOTE + '|' +
|
PAT_NOTE + '|' +
|
||||||
PAT_URL + '|' +
|
PAT_URL + '|' +
|
||||||
PAT_WORD)
|
PAT_WORD)
|
||||||
RE_NEWLINE = re.compile(r'[\r\n]')
|
RE_NEWLINE = re.compile(r'[\r\n]')
|
||||||
# H1 header "=====…"
|
# H1 header "=====…"
|
||||||
# H2 header "-----…"
|
# H2 header "-----…"
|
||||||
RE_HRULE = re.compile(r'[-=]{3,}.*[-=]{3,3}$')
|
RE_HRULE = re.compile(r'[-=]{3,}.*[-=]{3,3}$')
|
||||||
RE_EG_START = re.compile(r'(?:.* )?>$')
|
RE_EG_START = re.compile(r'(?:.* )?>$')
|
||||||
RE_EG_END = re.compile(r'\S')
|
RE_EG_END = re.compile(r'\S')
|
||||||
RE_SECTION = re.compile(r'[-A-Z .][-A-Z0-9 .()]*(?=\s+\*)')
|
RE_SECTION = re.compile(r'[-A-Z .][-A-Z0-9 .()]*(?=\s+\*)')
|
||||||
RE_STARTAG = re.compile(r'\s\*([^ \t|]+)\*(?:\s|$)')
|
RE_STARTAG = re.compile(r'\s\*([^ \t|]+)\*(?:\s|$)')
|
||||||
RE_LOCAL_ADD = re.compile(r'LOCAL ADDITIONS:\s+\*local-additions\*$')
|
RE_LOCAL_ADD = re.compile(r'LOCAL ADDITIONS:\s+\*local-additions\*$')
|
||||||
|
|
||||||
|
|
||||||
class Link(object):
|
class Link(object):
|
||||||
__slots__ = 'link_plain_same', 'link_pipe_same', \
|
__slots__ = 'link_plain_same', 'link_pipe_same', \
|
||||||
'link_plain_foreign', 'link_pipe_foreign', \
|
'link_plain_foreign', 'link_pipe_foreign', \
|
||||||
'filename'
|
'filename'
|
||||||
|
|
||||||
def __init__(self, link_plain_same, link_plain_foreign,
|
def __init__(self, link_plain_same, link_plain_foreign,
|
||||||
link_pipe_same, link_pipe_foreign, filename):
|
link_pipe_same, link_pipe_foreign, filename):
|
||||||
self.link_plain_same = link_plain_same
|
self.link_plain_same = link_plain_same
|
||||||
self.link_plain_foreign = link_plain_foreign
|
self.link_plain_foreign = link_plain_foreign
|
||||||
self.link_pipe_same = link_pipe_same
|
self.link_pipe_same = link_pipe_same
|
||||||
self.link_pipe_foreign = link_pipe_foreign
|
self.link_pipe_foreign = link_pipe_foreign
|
||||||
self.filename = filename
|
self.filename = filename
|
||||||
|
|
||||||
|
|
||||||
class VimH2H(object):
|
class VimH2H(object):
|
||||||
def __init__(self, tags, version=None, is_web_version=True):
|
def __init__(self, tags, version=None, is_web_version=True):
|
||||||
self._urls = { }
|
self._urls = {}
|
||||||
self._version = version
|
self._version = version
|
||||||
self._is_web_version = is_web_version
|
self._is_web_version = is_web_version
|
||||||
for line in RE_NEWLINE.split(tags):
|
for line in RE_NEWLINE.split(tags):
|
||||||
@ -183,6 +185,7 @@ class VimH2H(object):
|
|||||||
|
|
||||||
def do_add_tag(self, filename, tag):
|
def do_add_tag(self, filename, tag):
|
||||||
tag_quoted = urllib.parse.quote_plus(tag)
|
tag_quoted = urllib.parse.quote_plus(tag)
|
||||||
|
|
||||||
def mkpart1(doc):
|
def mkpart1(doc):
|
||||||
return '<a href="' + doc + '#' + tag_quoted + '" class="'
|
return '<a href="' + doc + '#' + tag_quoted + '" class="'
|
||||||
part1_same = mkpart1('')
|
part1_same = mkpart1('')
|
||||||
@ -192,16 +195,20 @@ class VimH2H(object):
|
|||||||
doc = filename + '.html'
|
doc = filename + '.html'
|
||||||
part1_foreign = mkpart1(doc)
|
part1_foreign = mkpart1(doc)
|
||||||
part2 = '">' + html_escape[tag] + '</a>'
|
part2 = '">' + html_escape[tag] + '</a>'
|
||||||
|
|
||||||
def mklinks(cssclass):
|
def mklinks(cssclass):
|
||||||
return (part1_same + cssclass + part2,
|
return (part1_same + cssclass + part2,
|
||||||
part1_foreign + cssclass + part2)
|
part1_foreign + cssclass + part2)
|
||||||
cssclass_plain = 'd'
|
cssclass_plain = 'd'
|
||||||
m = RE_LINKWORD.match(tag)
|
m = RE_LINKWORD.match(tag)
|
||||||
if m:
|
if m:
|
||||||
opt, ctrl, special = m.groups()
|
opt, ctrl, special = m.groups()
|
||||||
if opt is not None: cssclass_plain = 'o'
|
if opt is not None:
|
||||||
elif ctrl is not None: cssclass_plain = 'k'
|
cssclass_plain = 'o'
|
||||||
elif special is not None: cssclass_plain = 's'
|
elif ctrl is not None:
|
||||||
|
cssclass_plain = 'k'
|
||||||
|
elif special is not None:
|
||||||
|
cssclass_plain = 's'
|
||||||
links_plain = mklinks(cssclass_plain)
|
links_plain = mklinks(cssclass_plain)
|
||||||
links_pipe = mklinks('l')
|
links_pipe = mklinks('l')
|
||||||
self._urls[tag] = Link(
|
self._urls[tag] = Link(
|
||||||
@ -213,18 +220,23 @@ class VimH2H(object):
|
|||||||
links = self._urls.get(tag)
|
links = self._urls.get(tag)
|
||||||
if links is not None:
|
if links is not None:
|
||||||
if links.filename == curr_filename:
|
if links.filename == curr_filename:
|
||||||
if css_class == 'l': return links.link_pipe_same
|
if css_class == 'l':
|
||||||
else: return links.link_plain_same
|
return links.link_pipe_same
|
||||||
|
else:
|
||||||
|
return links.link_plain_same
|
||||||
else:
|
else:
|
||||||
if css_class == 'l': return links.link_pipe_foreign
|
if css_class == 'l':
|
||||||
else: return links.link_plain_foreign
|
return links.link_pipe_foreign
|
||||||
|
else:
|
||||||
|
return links.link_plain_foreign
|
||||||
elif css_class is not None:
|
elif css_class is not None:
|
||||||
return '<span class="' + css_class + '">' + html_escape[tag] + \
|
return '<span class="' + css_class + '">' + html_escape[tag] + \
|
||||||
'</span>'
|
'</span>'
|
||||||
else: return html_escape[tag]
|
else:
|
||||||
|
return html_escape[tag]
|
||||||
|
|
||||||
def to_html(self, filename, contents, encoding):
|
def to_html(self, filename, contents, encoding):
|
||||||
out = [ ]
|
out = []
|
||||||
|
|
||||||
inexample = 0
|
inexample = 0
|
||||||
filename = str(filename)
|
filename = str(filename)
|
||||||
@ -247,10 +259,11 @@ class VimH2H(object):
|
|||||||
if inexample == 2:
|
if inexample == 2:
|
||||||
if RE_EG_END.match(line):
|
if RE_EG_END.match(line):
|
||||||
inexample = 0
|
inexample = 0
|
||||||
if line[0] == '<': line = line[1:]
|
if line[0] == '<':
|
||||||
|
line = line[1:]
|
||||||
else:
|
else:
|
||||||
out.extend(('<span class="e">', html_escape[line],
|
out.extend(('<span class="e">', html_escape[line],
|
||||||
'</span>\n'))
|
'</span>\n'))
|
||||||
continue
|
continue
|
||||||
if RE_EG_START.match(line_tabs):
|
if RE_EG_START.match(line_tabs):
|
||||||
inexample = 1
|
inexample = 1
|
||||||
@ -266,12 +279,12 @@ class VimH2H(object):
|
|||||||
out.append(html_escape[line[lastpos:pos]])
|
out.append(html_escape[line[lastpos:pos]])
|
||||||
lastpos = match.end()
|
lastpos = match.end()
|
||||||
header, graphic, pipeword, starword, command, opt, ctrl, \
|
header, graphic, pipeword, starword, command, opt, ctrl, \
|
||||||
special, title, note, url, word = match.groups()
|
special, title, note, url, word = match.groups()
|
||||||
if pipeword is not None:
|
if pipeword is not None:
|
||||||
out.append(self.maplink(pipeword, filename, 'l'))
|
out.append(self.maplink(pipeword, filename, 'l'))
|
||||||
elif starword is not None:
|
elif starword is not None:
|
||||||
out.extend(('<a name="', urllib.parse.quote_plus(starword),
|
out.extend(('<a name="', urllib.parse.quote_plus(starword),
|
||||||
'" class="t">', html_escape[starword], '</a>'))
|
'" class="t">', html_escape[starword], '</a>'))
|
||||||
elif command is not None:
|
elif command is not None:
|
||||||
out.extend(('<span class="e">', html_escape[command],
|
out.extend(('<span class="e">', html_escape[command],
|
||||||
'</span>'))
|
'</span>'))
|
||||||
@ -300,14 +313,15 @@ class VimH2H(object):
|
|||||||
if lastpos < len(line):
|
if lastpos < len(line):
|
||||||
out.append(html_escape[line[lastpos:]])
|
out.append(html_escape[line[lastpos:]])
|
||||||
out.append('\n')
|
out.append('\n')
|
||||||
if inexample == 1: inexample = 2
|
if inexample == 1:
|
||||||
|
inexample = 2
|
||||||
|
|
||||||
header = []
|
header = []
|
||||||
header.append(HEAD.format(encoding=encoding, filename=filename))
|
header.append(HEAD.format(encoding=encoding, filename=filename))
|
||||||
header.append(HEAD_END)
|
header.append(HEAD_END)
|
||||||
if self._is_web_version and is_help_txt:
|
if self._is_web_version and is_help_txt:
|
||||||
vers_note = VERSION_NOTE.replace('{version}', self._version) \
|
vers_note = VERSION_NOTE.replace('{version}', self._version) \
|
||||||
if self._version else ''
|
if self._version else ''
|
||||||
header.append(INTRO.replace('{vers-note}', vers_note))
|
header.append(INTRO.replace('{vers-note}', vers_note))
|
||||||
if self._is_web_version:
|
if self._is_web_version:
|
||||||
header.append(SITENAVI_SEARCH)
|
header.append(SITENAVI_SEARCH)
|
||||||
@ -318,6 +332,7 @@ class VimH2H(object):
|
|||||||
header.append(TEXTSTART)
|
header.append(TEXTSTART)
|
||||||
return ''.join(chain(header, out, (FOOTER, sitenavi_footer, FOOTER2)))
|
return ''.join(chain(header, out, (FOOTER, sitenavi_footer, FOOTER2)))
|
||||||
|
|
||||||
|
|
||||||
class HtmlEscCache(dict):
|
class HtmlEscCache(dict):
|
||||||
def __missing__(self, key):
|
def __missing__(self, key):
|
||||||
r = key.replace('&', '&') \
|
r = key.replace('&', '&') \
|
||||||
@ -326,11 +341,10 @@ class HtmlEscCache(dict):
|
|||||||
self[key] = r
|
self[key] = r
|
||||||
return r
|
return r
|
||||||
|
|
||||||
|
|
||||||
html_escape = HtmlEscCache()
|
html_escape = HtmlEscCache()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def slurp(filename):
|
def slurp(filename):
|
||||||
try:
|
try:
|
||||||
with open(filename, encoding='UTF-8') as f:
|
with open(filename, encoding='UTF-8') as f:
|
||||||
@ -340,17 +354,20 @@ def slurp(filename):
|
|||||||
with open(filename, encoding='latin-1') as f:
|
with open(filename, encoding='latin-1') as f:
|
||||||
return f.read(), 'latin-1'
|
return f.read(), 'latin-1'
|
||||||
|
|
||||||
|
|
||||||
def usage():
|
def usage():
|
||||||
return "usage: " + sys.argv[0] + " IN_DIR OUT_DIR [BASENAMES...]"
|
return "usage: " + sys.argv[0] + " IN_DIR OUT_DIR [BASENAMES...]"
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
if len(sys.argv) < 3: sys.exit(usage())
|
if len(sys.argv) < 3:
|
||||||
|
sys.exit(usage())
|
||||||
|
|
||||||
in_dir = sys.argv[1]
|
in_dir = sys.argv[1]
|
||||||
out_dir = sys.argv[2]
|
out_dir = sys.argv[2]
|
||||||
basenames = sys.argv[3:]
|
basenames = sys.argv[3:]
|
||||||
|
|
||||||
print( "Processing tags...")
|
print("Processing tags...")
|
||||||
h2h = VimH2H(slurp(os.path.join(in_dir, 'tags'))[0], is_web_version=False)
|
h2h = VimH2H(slurp(os.path.join(in_dir, 'tags'))[0], is_web_version=False)
|
||||||
|
|
||||||
if len(basenames) == 0:
|
if len(basenames) == 0:
|
||||||
@ -358,9 +375,9 @@ def main():
|
|||||||
|
|
||||||
for basename in basenames:
|
for basename in basenames:
|
||||||
if os.path.splitext(basename)[1] != '.txt' and basename != 'tags':
|
if os.path.splitext(basename)[1] != '.txt' and basename != 'tags':
|
||||||
print( "Ignoring " + basename)
|
print("Ignoring " + basename)
|
||||||
continue
|
continue
|
||||||
print( "Processing " + basename + "...")
|
print("Processing " + basename + "...")
|
||||||
path = os.path.join(in_dir, basename)
|
path = os.path.join(in_dir, basename)
|
||||||
text, encoding = slurp(path)
|
text, encoding = slurp(path)
|
||||||
outpath = os.path.join(out_dir, basename + '.html')
|
outpath = os.path.join(out_dir, basename + '.html')
|
||||||
@ -368,4 +385,5 @@ def main():
|
|||||||
of.write(h2h.to_html(basename, text, encoding))
|
of.write(h2h.to_html(basename, text, encoding))
|
||||||
of.close()
|
of.close()
|
||||||
|
|
||||||
|
|
||||||
main()
|
main()
|
||||||
|
@ -36,7 +36,6 @@ import shutil
|
|||||||
import textwrap
|
import textwrap
|
||||||
import subprocess
|
import subprocess
|
||||||
import collections
|
import collections
|
||||||
import pprint
|
|
||||||
|
|
||||||
from xml.dom import minidom
|
from xml.dom import minidom
|
||||||
|
|
||||||
@ -57,54 +56,55 @@ seen_funcs = set()
|
|||||||
lua2dox_filter = os.path.join(base_dir, 'scripts', 'lua2dox_filter')
|
lua2dox_filter = os.path.join(base_dir, 'scripts', 'lua2dox_filter')
|
||||||
|
|
||||||
CONFIG = {
|
CONFIG = {
|
||||||
'api': {
|
'api': {
|
||||||
'filename': 'api.txt',
|
'filename': 'api.txt',
|
||||||
# String used to find the start of the generated part of the doc.
|
# String used to find the start of the generated part of the doc.
|
||||||
'section_start_token': '*api-global*',
|
'section_start_token': '*api-global*',
|
||||||
# Section ordering.
|
# Section ordering.
|
||||||
'section_order' : [
|
'section_order': [
|
||||||
'vim.c',
|
'vim.c',
|
||||||
'buffer.c',
|
'buffer.c',
|
||||||
'window.c',
|
'window.c',
|
||||||
'tabpage.c',
|
'tabpage.c',
|
||||||
'ui.c',
|
'ui.c',
|
||||||
],
|
],
|
||||||
# List of files/directories for doxygen to read, separated by blanks
|
# List of files/directories for doxygen to read, separated by blanks
|
||||||
'files': os.path.join(base_dir, 'src/nvim/api'),
|
'files': os.path.join(base_dir, 'src/nvim/api'),
|
||||||
# file patterns used by doxygen
|
# file patterns used by doxygen
|
||||||
'file_patterns': '*.h *.c',
|
'file_patterns': '*.h *.c',
|
||||||
# Only function with this prefix are considered
|
# Only function with this prefix are considered
|
||||||
'func_name_prefix': 'nvim_',
|
'func_name_prefix': 'nvim_',
|
||||||
# Section name overrides.
|
# Section name overrides.
|
||||||
'section_name': {
|
'section_name': {
|
||||||
'vim.c': 'Global',
|
'vim.c': 'Global',
|
||||||
|
},
|
||||||
|
# Module name overrides (for Lua).
|
||||||
|
'module_override': {},
|
||||||
|
# Append the docs for these modules, do not start a new section.
|
||||||
|
'append_only': [],
|
||||||
},
|
},
|
||||||
# Module name overrides (for Lua).
|
'lua': {
|
||||||
'module_override': {},
|
'filename': 'if_lua.txt',
|
||||||
# Append the docs for these modules, do not start a new section.
|
'section_start_token': '*lua-vim*',
|
||||||
'append_only' : [],
|
'section_order': [
|
||||||
},
|
'vim.lua',
|
||||||
'lua': {
|
'shared.lua',
|
||||||
'filename': 'if_lua.txt',
|
],
|
||||||
'section_start_token': '*lua-vim*',
|
'files': ' '.join([
|
||||||
'section_order' : [
|
os.path.join(base_dir, 'src/nvim/lua/vim.lua'),
|
||||||
'vim.lua',
|
os.path.join(base_dir, 'runtime/lua/vim/shared.lua'),
|
||||||
'shared.lua',
|
|
||||||
],
|
|
||||||
'files': ' '.join([
|
|
||||||
os.path.join(base_dir, 'src/nvim/lua/vim.lua'),
|
|
||||||
os.path.join(base_dir, 'runtime/lua/vim/shared.lua'),
|
|
||||||
]),
|
]),
|
||||||
'file_patterns': '*.lua',
|
'file_patterns': '*.lua',
|
||||||
'func_name_prefix': '',
|
'func_name_prefix': '',
|
||||||
'section_name': {},
|
'section_name': {},
|
||||||
'module_override': {
|
'module_override': {
|
||||||
'shared': 'vim', # `shared` functions are exposed on the `vim` module.
|
# `shared` functions are exposed on the `vim` module.
|
||||||
|
'shared': 'vim',
|
||||||
|
},
|
||||||
|
'append_only': [
|
||||||
|
'shared.lua',
|
||||||
|
],
|
||||||
},
|
},
|
||||||
'append_only' : [
|
|
||||||
'shared.lua',
|
|
||||||
],
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
param_exclude = (
|
param_exclude = (
|
||||||
@ -121,6 +121,7 @@ annotation_map = {
|
|||||||
# deprecated functions.
|
# deprecated functions.
|
||||||
xrefs = set()
|
xrefs = set()
|
||||||
|
|
||||||
|
|
||||||
def debug_this(s, n):
|
def debug_this(s, n):
|
||||||
o = n if isinstance(n, str) else n.toprettyxml(indent=' ', newl='\n')
|
o = n if isinstance(n, str) else n.toprettyxml(indent=' ', newl='\n')
|
||||||
name = '' if isinstance(n, str) else n.nodeName
|
name = '' if isinstance(n, str) else n.nodeName
|
||||||
@ -191,7 +192,7 @@ def len_lastline(text):
|
|||||||
if -1 == lastnl:
|
if -1 == lastnl:
|
||||||
return len(text)
|
return len(text)
|
||||||
if '\n' == text[-1]:
|
if '\n' == text[-1]:
|
||||||
return lastnl - (1+ text.rfind('\n', 0, lastnl))
|
return lastnl - (1 + text.rfind('\n', 0, lastnl))
|
||||||
return len(text) - (1 + lastnl)
|
return len(text) - (1 + lastnl)
|
||||||
|
|
||||||
|
|
||||||
@ -209,6 +210,7 @@ def is_inline(n):
|
|||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def doc_wrap(text, prefix='', width=70, func=False, indent=None):
|
def doc_wrap(text, prefix='', width=70, func=False, indent=None):
|
||||||
"""Wraps text to `width`.
|
"""Wraps text to `width`.
|
||||||
|
|
||||||
@ -237,8 +239,8 @@ def doc_wrap(text, prefix='', width=70, func=False, indent=None):
|
|||||||
if indent_only:
|
if indent_only:
|
||||||
prefix = indent
|
prefix = indent
|
||||||
|
|
||||||
tw = textwrap.TextWrapper(break_long_words = False,
|
tw = textwrap.TextWrapper(break_long_words=False,
|
||||||
break_on_hyphens = False,
|
break_on_hyphens=False,
|
||||||
width=width,
|
width=width,
|
||||||
initial_indent=prefix,
|
initial_indent=prefix,
|
||||||
subsequent_indent=indent)
|
subsequent_indent=indent)
|
||||||
@ -287,13 +289,14 @@ def render_params(parent, width=62):
|
|||||||
desc_node = get_child(node, 'parameterdescription')
|
desc_node = get_child(node, 'parameterdescription')
|
||||||
if desc_node:
|
if desc_node:
|
||||||
desc = parse_parblock(desc_node, width=width,
|
desc = parse_parblock(desc_node, width=width,
|
||||||
indent=(' ' * len(name)))
|
indent=(' ' * len(name)))
|
||||||
|
|
||||||
out += '{}{}\n'.format(name, desc)
|
out += '{}{}\n'.format(name, desc)
|
||||||
return out.rstrip()
|
return out.rstrip()
|
||||||
|
|
||||||
# Renders a node as Vim help text, recursively traversing all descendants.
|
|
||||||
def render_node(n, text, prefix='', indent='', width=62):
|
def render_node(n, text, prefix='', indent='', width=62):
|
||||||
|
"""Renders a node as Vim help text, recursively traversing all descendants."""
|
||||||
text = ''
|
text = ''
|
||||||
# space_preceding = (len(text) > 0 and ' ' == text[-1][-1])
|
# space_preceding = (len(text) > 0 and ' ' == text[-1][-1])
|
||||||
# text += (int(not space_preceding) * ' ')
|
# text += (int(not space_preceding) * ' ')
|
||||||
@ -317,7 +320,11 @@ def render_node(n, text, prefix='', indent='', width=62):
|
|||||||
text += ' [verbatim] {}'.format(get_text(n))
|
text += ' [verbatim] {}'.format(get_text(n))
|
||||||
elif n.nodeName == 'listitem':
|
elif n.nodeName == 'listitem':
|
||||||
for c in n.childNodes:
|
for c in n.childNodes:
|
||||||
text += indent + prefix + render_node(c, text, indent=indent+(' ' * len(prefix)), width=width)
|
text += (
|
||||||
|
indent
|
||||||
|
+ prefix
|
||||||
|
+ render_node(c, text, indent=indent + (' ' * len(prefix)), width=width)
|
||||||
|
)
|
||||||
elif n.nodeName in ('para', 'heading'):
|
elif n.nodeName in ('para', 'heading'):
|
||||||
for c in n.childNodes:
|
for c in n.childNodes:
|
||||||
text += render_node(c, text, indent=indent, width=width)
|
text += render_node(c, text, indent=indent, width=width)
|
||||||
@ -326,7 +333,7 @@ def render_node(n, text, prefix='', indent='', width=62):
|
|||||||
elif n.nodeName == 'itemizedlist':
|
elif n.nodeName == 'itemizedlist':
|
||||||
for c in n.childNodes:
|
for c in n.childNodes:
|
||||||
text += '{}\n'.format(render_node(c, text, prefix='• ',
|
text += '{}\n'.format(render_node(c, text, prefix='• ',
|
||||||
indent=indent, width=width))
|
indent=indent, width=width))
|
||||||
elif n.nodeName == 'orderedlist':
|
elif n.nodeName == 'orderedlist':
|
||||||
i = 1
|
i = 1
|
||||||
for c in n.childNodes:
|
for c in n.childNodes:
|
||||||
@ -334,7 +341,7 @@ def render_node(n, text, prefix='', indent='', width=62):
|
|||||||
text += '\n'
|
text += '\n'
|
||||||
continue
|
continue
|
||||||
text += '{}\n'.format(render_node(c, text, prefix='{}. '.format(i),
|
text += '{}\n'.format(render_node(c, text, prefix='{}. '.format(i),
|
||||||
indent=indent, width=width))
|
indent=indent, width=width))
|
||||||
i = i + 1
|
i = i + 1
|
||||||
elif n.nodeName == 'simplesect' and 'note' == n.getAttribute('kind'):
|
elif n.nodeName == 'simplesect' and 'note' == n.getAttribute('kind'):
|
||||||
text += 'Note:\n '
|
text += 'Note:\n '
|
||||||
@ -356,6 +363,7 @@ def render_node(n, text, prefix='', indent='', width=62):
|
|||||||
n.nodeName, n.toprettyxml(indent=' ', newl='\n')))
|
n.nodeName, n.toprettyxml(indent=' ', newl='\n')))
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def render_para(parent, indent='', width=62):
|
def render_para(parent, indent='', width=62):
|
||||||
"""Renders Doxygen <para> containing arbitrary nodes.
|
"""Renders Doxygen <para> containing arbitrary nodes.
|
||||||
|
|
||||||
@ -363,7 +371,7 @@ def render_para(parent, indent='', width=62):
|
|||||||
"""
|
"""
|
||||||
if is_inline(parent):
|
if is_inline(parent):
|
||||||
return clean_lines(doc_wrap(render_node(parent, ''),
|
return clean_lines(doc_wrap(render_node(parent, ''),
|
||||||
indent=indent, width=width).strip())
|
indent=indent, width=width).strip())
|
||||||
|
|
||||||
# Ordered dict of ordered lists.
|
# Ordered dict of ordered lists.
|
||||||
groups = collections.OrderedDict([
|
groups = collections.OrderedDict([
|
||||||
@ -407,17 +415,19 @@ def render_para(parent, indent='', width=62):
|
|||||||
if len(groups['return']) > 0:
|
if len(groups['return']) > 0:
|
||||||
chunks.append('\nReturn: ~')
|
chunks.append('\nReturn: ~')
|
||||||
for child in groups['return']:
|
for child in groups['return']:
|
||||||
chunks.append(render_node(child, chunks[-1][-1], indent=indent, width=width))
|
chunks.append(render_node(
|
||||||
|
child, chunks[-1][-1], indent=indent, width=width))
|
||||||
if len(groups['seealso']) > 0:
|
if len(groups['seealso']) > 0:
|
||||||
chunks.append('\nSee also: ~')
|
chunks.append('\nSee also: ~')
|
||||||
for child in groups['seealso']:
|
for child in groups['seealso']:
|
||||||
chunks.append(render_node(child, chunks[-1][-1], indent=indent, width=width))
|
chunks.append(render_node(
|
||||||
|
child, chunks[-1][-1], indent=indent, width=width))
|
||||||
for child in groups['xrefs']:
|
for child in groups['xrefs']:
|
||||||
title = get_text(get_child(child, 'xreftitle'))
|
title = get_text(get_child(child, 'xreftitle'))
|
||||||
xrefs.add(title)
|
xrefs.add(title)
|
||||||
xrefdesc = render_para(get_child(child, 'xrefdescription'), width=width)
|
xrefdesc = render_para(get_child(child, 'xrefdescription'), width=width)
|
||||||
chunks.append(doc_wrap(xrefdesc, prefix='{}: '.format(title),
|
chunks.append(doc_wrap(xrefdesc, prefix='{}: '.format(title),
|
||||||
width=width) + '\n')
|
width=width) + '\n')
|
||||||
|
|
||||||
return clean_lines('\n'.join(chunks).strip())
|
return clean_lines('\n'.join(chunks).strip())
|
||||||
|
|
||||||
@ -587,6 +597,7 @@ def delete_lines_below(filename, tokenstr):
|
|||||||
with open(filename, 'wt') as fp:
|
with open(filename, 'wt') as fp:
|
||||||
fp.writelines(lines[0:i])
|
fp.writelines(lines[0:i])
|
||||||
|
|
||||||
|
|
||||||
def gen_docs(config):
|
def gen_docs(config):
|
||||||
"""Generate documentation.
|
"""Generate documentation.
|
||||||
|
|
||||||
@ -619,7 +630,8 @@ def gen_docs(config):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
groupname = get_text(find_first(compound, 'name'))
|
groupname = get_text(find_first(compound, 'name'))
|
||||||
groupxml = os.path.join(base, '%s.xml' % compound.getAttribute('refid'))
|
groupxml = os.path.join(base, '%s.xml' %
|
||||||
|
compound.getAttribute('refid'))
|
||||||
|
|
||||||
desc = find_first(minidom.parse(groupxml), 'detaileddescription')
|
desc = find_first(minidom.parse(groupxml), 'detaileddescription')
|
||||||
if desc:
|
if desc:
|
||||||
@ -635,7 +647,7 @@ def gen_docs(config):
|
|||||||
if filename.endswith('.c') or filename.endswith('.lua'):
|
if filename.endswith('.c') or filename.endswith('.lua'):
|
||||||
functions, deprecated = parse_source_xml(
|
functions, deprecated = parse_source_xml(
|
||||||
os.path.join(base, '%s.xml' %
|
os.path.join(base, '%s.xml' %
|
||||||
compound.getAttribute('refid')), mode)
|
compound.getAttribute('refid')), mode)
|
||||||
|
|
||||||
if not functions and not deprecated:
|
if not functions and not deprecated:
|
||||||
continue
|
continue
|
||||||
@ -680,12 +692,15 @@ def gen_docs(config):
|
|||||||
i = 0
|
i = 0
|
||||||
for filename in CONFIG[mode]['section_order']:
|
for filename in CONFIG[mode]['section_order']:
|
||||||
if filename not in sections:
|
if filename not in sections:
|
||||||
raise RuntimeError('found new module "{}"; update the "section_order" map'.format(filename))
|
raise RuntimeError(
|
||||||
|
'found new module "{}"; update the "section_order" map'.format(
|
||||||
|
filename))
|
||||||
title, helptag, section_doc = sections.pop(filename)
|
title, helptag, section_doc = sections.pop(filename)
|
||||||
i += 1
|
i += 1
|
||||||
if filename not in CONFIG[mode]['append_only']:
|
if filename not in CONFIG[mode]['append_only']:
|
||||||
docs += sep
|
docs += sep
|
||||||
docs += '\n%s%s' % (title, helptag.rjust(text_width - len(title)))
|
docs += '\n%s%s' % (title,
|
||||||
|
helptag.rjust(text_width - len(title)))
|
||||||
docs += section_doc
|
docs += section_doc
|
||||||
docs += '\n\n\n'
|
docs += '\n\n\n'
|
||||||
|
|
||||||
@ -693,7 +708,7 @@ def gen_docs(config):
|
|||||||
docs += ' vim:tw=78:ts=8:ft=help:norl:\n'
|
docs += ' vim:tw=78:ts=8:ft=help:norl:\n'
|
||||||
|
|
||||||
doc_file = os.path.join(base_dir, 'runtime', 'doc',
|
doc_file = os.path.join(base_dir, 'runtime', 'doc',
|
||||||
CONFIG[mode]['filename'])
|
CONFIG[mode]['filename'])
|
||||||
|
|
||||||
delete_lines_below(doc_file, CONFIG[mode]['section_start_token'])
|
delete_lines_below(doc_file, CONFIG[mode]['section_start_token'])
|
||||||
with open(doc_file, 'ab') as fp:
|
with open(doc_file, 'ab') as fp:
|
||||||
|
@ -12,27 +12,27 @@ import msgpack
|
|||||||
|
|
||||||
|
|
||||||
class EntryTypes(Enum):
|
class EntryTypes(Enum):
|
||||||
Unknown = -1
|
Unknown = -1
|
||||||
Missing = 0
|
Missing = 0
|
||||||
Header = 1
|
Header = 1
|
||||||
SearchPattern = 2
|
SearchPattern = 2
|
||||||
SubString = 3
|
SubString = 3
|
||||||
HistoryEntry = 4
|
HistoryEntry = 4
|
||||||
Register = 5
|
Register = 5
|
||||||
Variable = 6
|
Variable = 6
|
||||||
GlobalMark = 7
|
GlobalMark = 7
|
||||||
Jump = 8
|
Jump = 8
|
||||||
BufferList = 9
|
BufferList = 9
|
||||||
LocalMark = 10
|
LocalMark = 10
|
||||||
Change = 11
|
Change = 11
|
||||||
|
|
||||||
|
|
||||||
def strtrans_errors(e):
|
def strtrans_errors(e):
|
||||||
if not isinstance(e, UnicodeDecodeError):
|
if not isinstance(e, UnicodeDecodeError):
|
||||||
raise NotImplementedError('don’t know how to handle {0} error'.format(
|
raise NotImplementedError('don’t know how to handle {0} error'.format(
|
||||||
e.__class__.__name__))
|
e.__class__.__name__))
|
||||||
return '<{0:x}>'.format(reduce((lambda a, b: a*0x100+b),
|
return '<{0:x}>'.format(reduce((lambda a, b: a*0x100+b),
|
||||||
list(e.object[e.start:e.end]))), e.end
|
list(e.object[e.start:e.end]))), e.end
|
||||||
|
|
||||||
|
|
||||||
codecs.register_error('strtrans', strtrans_errors)
|
codecs.register_error('strtrans', strtrans_errors)
|
||||||
@ -56,54 +56,54 @@ ctable = {
|
|||||||
|
|
||||||
|
|
||||||
def mnormalize(o):
|
def mnormalize(o):
|
||||||
return ctable.get(type(o), idfunc)(o)
|
return ctable.get(type(o), idfunc)(o)
|
||||||
|
|
||||||
|
|
||||||
fname = sys.argv[1]
|
fname = sys.argv[1]
|
||||||
try:
|
try:
|
||||||
filt = sys.argv[2]
|
filt = sys.argv[2]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
filt = lambda entry: True
|
def filt(entry): return True
|
||||||
else:
|
else:
|
||||||
_filt = filt
|
_filt = filt
|
||||||
filt = lambda entry: eval(_filt, globals(), {'entry': entry})
|
def filt(entry): return eval(_filt, globals(), {'entry': entry})
|
||||||
|
|
||||||
poswidth = len(str(os.stat(fname).st_size or 1000))
|
poswidth = len(str(os.stat(fname).st_size or 1000))
|
||||||
|
|
||||||
|
|
||||||
class FullEntry(dict):
|
class FullEntry(dict):
|
||||||
def __init__(self, val):
|
def __init__(self, val):
|
||||||
self.__dict__.update(val)
|
self.__dict__.update(val)
|
||||||
|
|
||||||
|
|
||||||
with open(fname, 'rb') as fp:
|
with open(fname, 'rb') as fp:
|
||||||
unpacker = msgpack.Unpacker(file_like=fp, read_size=1)
|
unpacker = msgpack.Unpacker(file_like=fp, read_size=1)
|
||||||
max_type = max(typ.value for typ in EntryTypes)
|
max_type = max(typ.value for typ in EntryTypes)
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
pos = fp.tell()
|
pos = fp.tell()
|
||||||
typ = unpacker.unpack()
|
typ = unpacker.unpack()
|
||||||
except msgpack.OutOfData:
|
except msgpack.OutOfData:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
timestamp = unpacker.unpack()
|
timestamp = unpacker.unpack()
|
||||||
time = datetime.fromtimestamp(timestamp)
|
time = datetime.fromtimestamp(timestamp)
|
||||||
length = unpacker.unpack()
|
length = unpacker.unpack()
|
||||||
if typ > max_type:
|
if typ > max_type:
|
||||||
entry = fp.read(length)
|
entry = fp.read(length)
|
||||||
typ = EntryTypes.Unknown
|
typ = EntryTypes.Unknown
|
||||||
else:
|
else:
|
||||||
entry = unpacker.unpack()
|
entry = unpacker.unpack()
|
||||||
typ = EntryTypes(typ)
|
typ = EntryTypes(typ)
|
||||||
full_entry = FullEntry({
|
full_entry = FullEntry({
|
||||||
'value': entry,
|
'value': entry,
|
||||||
'timestamp': timestamp,
|
'timestamp': timestamp,
|
||||||
'time': time,
|
'time': time,
|
||||||
'length': length,
|
'length': length,
|
||||||
'pos': pos,
|
'pos': pos,
|
||||||
'type': typ,
|
'type': typ,
|
||||||
})
|
})
|
||||||
if not filt(full_entry):
|
if not filt(full_entry):
|
||||||
continue
|
continue
|
||||||
print('%*u %13s %s %5u %r' % (
|
print('%*u %13s %s %5u %r' % (
|
||||||
poswidth, pos, typ.name, time.isoformat(), length, mnormalize(entry)))
|
poswidth, pos, typ.name, time.isoformat(), length, mnormalize(entry)))
|
||||||
|
@ -10,7 +10,7 @@ import os
|
|||||||
|
|
||||||
|
|
||||||
DECL_KINDS = {
|
DECL_KINDS = {
|
||||||
CursorKind.FUNCTION_DECL,
|
CursorKind.FUNCTION_DECL,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -18,122 +18,123 @@ Strip = namedtuple('Strip', 'start_line start_column end_line end_column')
|
|||||||
|
|
||||||
|
|
||||||
def main(progname, cfname, only_static, move_all):
|
def main(progname, cfname, only_static, move_all):
|
||||||
cfname = os.path.abspath(os.path.normpath(cfname))
|
cfname = os.path.abspath(os.path.normpath(cfname))
|
||||||
|
|
||||||
hfname1 = os.path.splitext(cfname)[0] + os.extsep + 'h'
|
hfname1 = os.path.splitext(cfname)[0] + os.extsep + 'h'
|
||||||
hfname2 = os.path.splitext(cfname)[0] + '_defs' + os.extsep + 'h'
|
hfname2 = os.path.splitext(cfname)[0] + '_defs' + os.extsep + 'h'
|
||||||
|
|
||||||
files_to_modify = (cfname, hfname1, hfname2)
|
files_to_modify = (cfname, hfname1, hfname2)
|
||||||
|
|
||||||
index = Index.create()
|
index = Index.create()
|
||||||
src_dirname = os.path.join(os.path.dirname(__file__), '..', 'src')
|
src_dirname = os.path.join(os.path.dirname(__file__), '..', 'src')
|
||||||
src_dirname = os.path.abspath(os.path.normpath(src_dirname))
|
src_dirname = os.path.abspath(os.path.normpath(src_dirname))
|
||||||
relname = os.path.join(src_dirname, 'nvim')
|
relname = os.path.join(src_dirname, 'nvim')
|
||||||
unit = index.parse(cfname, args=('-I' + src_dirname,
|
unit = index.parse(cfname, args=('-I' + src_dirname,
|
||||||
'-DUNIX',
|
'-DUNIX',
|
||||||
'-DEXITFREE',
|
'-DEXITFREE',
|
||||||
'-DFEAT_USR_CMDS',
|
'-DFEAT_USR_CMDS',
|
||||||
'-DFEAT_CMDL_COMPL',
|
'-DFEAT_CMDL_COMPL',
|
||||||
'-DFEAT_COMPL_FUNC',
|
'-DFEAT_COMPL_FUNC',
|
||||||
'-DPROTO',
|
'-DPROTO',
|
||||||
'-DUSE_MCH_ERRMSG'))
|
'-DUSE_MCH_ERRMSG'))
|
||||||
cursor = unit.cursor
|
cursor = unit.cursor
|
||||||
|
|
||||||
tostrip = defaultdict(OrderedDict)
|
tostrip = defaultdict(OrderedDict)
|
||||||
definitions = set()
|
definitions = set()
|
||||||
|
|
||||||
for child in cursor.get_children():
|
for child in cursor.get_children():
|
||||||
if not (child.location and child.location.file):
|
if not (child.location and child.location.file):
|
||||||
continue
|
continue
|
||||||
fname = os.path.abspath(os.path.normpath(child.location.file.name))
|
fname = os.path.abspath(os.path.normpath(child.location.file.name))
|
||||||
if fname not in files_to_modify:
|
if fname not in files_to_modify:
|
||||||
continue
|
continue
|
||||||
if child.kind not in DECL_KINDS:
|
if child.kind not in DECL_KINDS:
|
||||||
continue
|
continue
|
||||||
if only_static and next(child.get_tokens()).spelling == 'static':
|
if only_static and next(child.get_tokens()).spelling == 'static':
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if child.is_definition() and fname == cfname:
|
if child.is_definition() and fname == cfname:
|
||||||
definitions.add(child.spelling)
|
definitions.add(child.spelling)
|
||||||
else:
|
else:
|
||||||
stripdict = tostrip[fname]
|
stripdict = tostrip[fname]
|
||||||
assert(child.spelling not in stripdict)
|
assert(child.spelling not in stripdict)
|
||||||
stripdict[child.spelling] = Strip(
|
stripdict[child.spelling] = Strip(
|
||||||
child.extent.start.line,
|
child.extent.start.line,
|
||||||
child.extent.start.column,
|
child.extent.start.column,
|
||||||
child.extent.end.line,
|
child.extent.end.line,
|
||||||
child.extent.end.column,
|
child.extent.end.column,
|
||||||
)
|
)
|
||||||
|
|
||||||
for (fname, stripdict) in tostrip.items():
|
for (fname, stripdict) in tostrip.items():
|
||||||
if not move_all:
|
if not move_all:
|
||||||
for name in set(stripdict) - definitions:
|
for name in set(stripdict) - definitions:
|
||||||
stripdict.pop(name)
|
stripdict.pop(name)
|
||||||
|
|
||||||
if not stripdict:
|
if not stripdict:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if fname.endswith('.h'):
|
if fname.endswith('.h'):
|
||||||
is_h_file = True
|
is_h_file = True
|
||||||
include_line = next(reversed(stripdict.values())).start_line + 1
|
include_line = next(reversed(stripdict.values())).start_line + 1
|
||||||
else:
|
else:
|
||||||
is_h_file = False
|
is_h_file = False
|
||||||
include_line = next(iter(stripdict.values())).start_line
|
include_line = next(iter(stripdict.values())).start_line
|
||||||
|
|
||||||
lines = None
|
lines = None
|
||||||
generated_existed = os.path.exists(fname + '.generated.h')
|
generated_existed = os.path.exists(fname + '.generated.h')
|
||||||
with open(fname, 'rb') as F:
|
with open(fname, 'rb') as F:
|
||||||
lines = list(F)
|
lines = list(F)
|
||||||
|
|
||||||
stripped = []
|
stripped = []
|
||||||
|
|
||||||
for name, position in reversed(stripdict.items()):
|
for name, position in reversed(stripdict.items()):
|
||||||
sl = slice(position.start_line - 1, position.end_line)
|
sl = slice(position.start_line - 1, position.end_line)
|
||||||
if is_h_file:
|
if is_h_file:
|
||||||
include_line -= sl.stop - sl.start
|
include_line -= sl.stop - sl.start
|
||||||
stripped += lines[sl]
|
stripped += lines[sl]
|
||||||
lines[sl] = ()
|
lines[sl] = ()
|
||||||
|
|
||||||
if not generated_existed:
|
if not generated_existed:
|
||||||
lines[include_line:include_line] = [
|
lines[include_line:include_line] = [
|
||||||
'#ifdef INCLUDE_GENERATED_DECLARATIONS\n',
|
'#ifdef INCLUDE_GENERATED_DECLARATIONS\n',
|
||||||
'# include "{0}.generated.h"\n'.format(os.path.relpath(fname, relname)),
|
'# include "{0}.generated.h"\n'.format(
|
||||||
'#endif\n',
|
os.path.relpath(fname, relname)),
|
||||||
]
|
'#endif\n',
|
||||||
|
]
|
||||||
|
|
||||||
with open(fname, 'wb') as F:
|
with open(fname, 'wb') as F:
|
||||||
F.writelines(lines)
|
F.writelines(lines)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
progname = sys.argv[0]
|
progname = sys.argv[0]
|
||||||
args = sys.argv[1:]
|
args = sys.argv[1:]
|
||||||
if not args or '--help' in args:
|
if not args or '--help' in args:
|
||||||
print('Usage:')
|
print('Usage:')
|
||||||
print('')
|
print('')
|
||||||
print(' {0} [--static [--all]] file.c...'.format(progname))
|
print(' {0} [--static [--all]] file.c...'.format(progname))
|
||||||
print('')
|
print('')
|
||||||
print('Stripts all declarations from file.c, file.h and file_defs.h.')
|
print('Stripts all declarations from file.c, file.h and file_defs.h.')
|
||||||
print('If --static argument is given then only static declarations are')
|
print('If --static argument is given then only static declarations are')
|
||||||
print('stripped. Declarations are stripped only if corresponding')
|
print('stripped. Declarations are stripped only if corresponding')
|
||||||
print('definition is found unless --all argument was given.')
|
print('definition is found unless --all argument was given.')
|
||||||
print('')
|
print('')
|
||||||
print('Note: it is assumed that static declarations starts with "static"')
|
print('Note: it is assumed that static declarations starts with "static"')
|
||||||
print(' keyword.')
|
print(' keyword.')
|
||||||
sys.exit(0 if args else 1)
|
sys.exit(0 if args else 1)
|
||||||
|
|
||||||
if args[0] == '--static':
|
if args[0] == '--static':
|
||||||
only_static = True
|
only_static = True
|
||||||
args = args[1:]
|
args = args[1:]
|
||||||
else:
|
else:
|
||||||
only_static = False
|
only_static = False
|
||||||
|
|
||||||
if args[0] == '--all':
|
if args[0] == '--all':
|
||||||
move_all = True
|
move_all = True
|
||||||
args = args[1:]
|
args = args[1:]
|
||||||
else:
|
else:
|
||||||
move_all = False
|
move_all = False
|
||||||
|
|
||||||
for cfname in args:
|
for cfname in args:
|
||||||
print('Processing {0}'.format(cfname))
|
print('Processing {0}'.format(cfname))
|
||||||
main(progname, cfname, only_static, move_all)
|
main(progname, cfname, only_static, move_all)
|
||||||
|
13
src/clint.py
13
src/clint.py
@ -2610,9 +2610,13 @@ def CheckBraces(filename, clean_lines, linenum, error):
|
|||||||
'Brace starting function body must be placed on its own line')
|
'Brace starting function body must be placed on its own line')
|
||||||
else:
|
else:
|
||||||
func_start_linenum = end_linenum + 1
|
func_start_linenum = end_linenum + 1
|
||||||
while not clean_lines.lines[func_start_linenum] == '{':
|
while not clean_lines.lines[func_start_linenum] == "{":
|
||||||
attrline = Match(r'^((?!# *define).*?)(?:FUNC_ATTR|FUNC_API|REAL_FATTR)_\w+(?:\(\d+(, \d+)*\))?',
|
attrline = Match(
|
||||||
clean_lines.lines[func_start_linenum])
|
r'^((?!# *define).*?)'
|
||||||
|
r'(?:FUNC_ATTR|FUNC_API|REAL_FATTR)_\w+'
|
||||||
|
r'(?:\(\d+(, \d+)*\))?',
|
||||||
|
clean_lines.lines[func_start_linenum],
|
||||||
|
)
|
||||||
if attrline:
|
if attrline:
|
||||||
if len(attrline.group(1)) != 2:
|
if len(attrline.group(1)) != 2:
|
||||||
error(filename, func_start_linenum,
|
error(filename, func_start_linenum,
|
||||||
@ -3182,7 +3186,8 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension,
|
|||||||
r'|li_(?:next|prev|tv))\b', line)
|
r'|li_(?:next|prev|tv))\b', line)
|
||||||
if match:
|
if match:
|
||||||
error(filename, linenum, 'runtime/deprecated', 4,
|
error(filename, linenum, 'runtime/deprecated', 4,
|
||||||
'Accessing list_T internals directly is prohibited (hint: see commit d46e37cb4c71)')
|
'Accessing list_T internals directly is prohibited '
|
||||||
|
'(hint: see commit d46e37cb4c71)')
|
||||||
|
|
||||||
# Check for suspicious usage of "if" like
|
# Check for suspicious usage of "if" like
|
||||||
# } if (a == b) {
|
# } if (a == b) {
|
||||||
|
@ -8,6 +8,7 @@ import locale
|
|||||||
import io
|
import io
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|
||||||
def set_output_encoding(enc=None):
|
def set_output_encoding(enc=None):
|
||||||
"""Set the encoding of stdout and stderr
|
"""Set the encoding of stdout and stderr
|
||||||
|
|
||||||
@ -20,7 +21,7 @@ def set_output_encoding(enc=None):
|
|||||||
|
|
||||||
def get_text_writer(fo, **kwargs):
|
def get_text_writer(fo, **kwargs):
|
||||||
kw = dict(kwargs)
|
kw = dict(kwargs)
|
||||||
kw.setdefault('errors', 'backslashreplace') # use \uXXXX style
|
kw.setdefault('errors', 'backslashreplace') # use \uXXXX style
|
||||||
kw.setdefault('closefd', False)
|
kw.setdefault('closefd', False)
|
||||||
|
|
||||||
if sys.version_info[0] < 3:
|
if sys.version_info[0] < 3:
|
||||||
@ -29,6 +30,7 @@ def set_output_encoding(enc=None):
|
|||||||
writer = io.open(fo.fileno(), mode='w', newline='', **kw)
|
writer = io.open(fo.fileno(), mode='w', newline='', **kw)
|
||||||
write = writer.write # save the original write() function
|
write = writer.write # save the original write() function
|
||||||
enc = locale.getpreferredencoding()
|
enc = locale.getpreferredencoding()
|
||||||
|
|
||||||
def convwrite(s):
|
def convwrite(s):
|
||||||
if isinstance(s, bytes):
|
if isinstance(s, bytes):
|
||||||
write(s.decode(enc)) # convert to unistr
|
write(s.decode(enc)) # convert to unistr
|
||||||
|
Loading…
Reference in New Issue
Block a user