mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Complete test suite overhaul.
* rename a few test modules to make the names more consistent * do not copy/use Sphinx from build/ (unnecessary without 2to3) * use a temporary dir for *all* test projects, the source tree will stay pristine that way (default is tests/build) * speed up tests by ~3x by splitting up test projects and avoiding rebuilds
This commit is contained in:
@@ -18,29 +18,16 @@ from util import with_app
|
||||
def with_text_app(*args, **kw):
|
||||
default_kw = {
|
||||
'buildername': 'text',
|
||||
'srcdir': '(empty)',
|
||||
'confoverrides': {
|
||||
'project': 'text',
|
||||
'master_doc': 'contents',
|
||||
},
|
||||
'testroot': 'build-text',
|
||||
}
|
||||
default_kw.update(kw)
|
||||
return with_app(*args, **default_kw)
|
||||
|
||||
|
||||
@with_text_app()
|
||||
def test_maxwitdh_with_prefix(app):
|
||||
long_string = u' '.join([u"ham"] * 30)
|
||||
contents = (
|
||||
u".. seealso:: %(long_string)s\n\n"
|
||||
u"* %(long_string)s\n"
|
||||
u"* %(long_string)s\n"
|
||||
u"\nspam egg\n"
|
||||
) % locals()
|
||||
|
||||
(app.srcdir / 'contents.rst').write_text(contents, encoding='utf-8')
|
||||
app.builder.build_all()
|
||||
result = (app.outdir / 'contents.txt').text(encoding='utf-8')
|
||||
def test_maxwitdh_with_prefix(app, status, warning):
|
||||
app.builder.build_update()
|
||||
result = (app.outdir / 'maxwidth.txt').text(encoding='utf-8')
|
||||
|
||||
lines = result.splitlines()
|
||||
line_widths = [column_width(line) for line in lines]
|
||||
@@ -58,105 +45,52 @@ def test_maxwitdh_with_prefix(app):
|
||||
|
||||
|
||||
@with_text_app()
|
||||
def test_lineblock(app):
|
||||
def test_lineblock(app, status, warning):
|
||||
# regression test for #1109: need empty line after line block
|
||||
contents = (
|
||||
u"* one\n"
|
||||
u"\n"
|
||||
u" | line-block 1\n"
|
||||
u" | line-block 2\n"
|
||||
u"\n"
|
||||
u"followed paragraph.\n"
|
||||
)
|
||||
|
||||
(app.srcdir / 'contents.rst').write_text(contents, encoding='utf-8')
|
||||
app.builder.build_all()
|
||||
result = (app.outdir / 'contents.txt').text(encoding='utf-8')
|
||||
|
||||
app.builder.build_update()
|
||||
result = (app.outdir / 'lineblock.txt').text(encoding='utf-8')
|
||||
expect = (
|
||||
u"* one\n"
|
||||
u"\n"
|
||||
u" line-block 1\n"
|
||||
u" line-block 2\n"
|
||||
u"\n"
|
||||
u"followed paragraph.\n"
|
||||
)
|
||||
|
||||
u"* one\n"
|
||||
u"\n"
|
||||
u" line-block 1\n"
|
||||
u" line-block 2\n"
|
||||
u"\n"
|
||||
u"followed paragraph.\n"
|
||||
)
|
||||
assert result == expect
|
||||
|
||||
|
||||
@with_text_app()
|
||||
def test_nonascii_title_line(app):
|
||||
title = u'\u65e5\u672c\u8a9e'
|
||||
underline = u'=' * column_width(title)
|
||||
content = u'\n'.join((title, underline, u''))
|
||||
|
||||
(app.srcdir / 'contents.rst').write_text(content, encoding='utf-8')
|
||||
app.builder.build_all()
|
||||
result = (app.outdir / 'contents.txt').text(encoding='utf-8')
|
||||
|
||||
expect_underline = underline.replace('=', '*')
|
||||
def test_nonascii_title_line(app, status, warning):
|
||||
app.builder.build_update()
|
||||
result = (app.outdir / 'nonascii_title.txt').text(encoding='utf-8')
|
||||
expect_underline = '******'
|
||||
result_underline = result.splitlines()[2].strip()
|
||||
assert expect_underline == result_underline
|
||||
|
||||
|
||||
@with_text_app()
|
||||
def test_nonascii_table(app):
|
||||
text = u'\u65e5\u672c\u8a9e'
|
||||
contents = (u"\n.. list-table::"
|
||||
"\n"
|
||||
"\n - - spam"
|
||||
"\n - egg"
|
||||
"\n"
|
||||
"\n - - %(text)s"
|
||||
"\n - %(text)s"
|
||||
"\n" % locals())
|
||||
|
||||
(app.srcdir / 'contents.rst').write_text(contents, encoding='utf-8')
|
||||
app.builder.build_all()
|
||||
result = (app.outdir / 'contents.txt').text(encoding='utf-8')
|
||||
|
||||
def test_nonascii_table(app, status, warning):
|
||||
app.builder.build_update()
|
||||
result = (app.outdir / 'nonascii_table.txt').text(encoding='utf-8')
|
||||
lines = [line.strip() for line in result.splitlines() if line.strip()]
|
||||
line_widths = [column_width(line) for line in lines]
|
||||
assert len(set(line_widths)) == 1 # same widths
|
||||
|
||||
|
||||
@with_text_app()
|
||||
def test_nonascii_maxwidth(app):
|
||||
sb_text = u'abc' #length=3
|
||||
mb_text = u'\u65e5\u672c\u8a9e' #length=3
|
||||
|
||||
sb_line = ' '.join([sb_text] * int(MAXWIDTH / 3))
|
||||
mb_line = ' '.join([mb_text] * int(MAXWIDTH / 3))
|
||||
mix_line = ' '.join([sb_text, mb_text] * int(MAXWIDTH / 6))
|
||||
|
||||
contents = u'\n\n'.join((sb_line, mb_line, mix_line))
|
||||
|
||||
(app.srcdir / 'contents.rst').write_text(contents, encoding='utf-8')
|
||||
app.builder.build_all()
|
||||
result = (app.outdir / 'contents.txt').text(encoding='utf-8')
|
||||
|
||||
def test_nonascii_maxwidth(app, status, warning):
|
||||
app.builder.build_update()
|
||||
result = (app.outdir / 'nonascii_maxwidth.txt').text(encoding='utf-8')
|
||||
lines = [line.strip() for line in result.splitlines() if line.strip()]
|
||||
line_widths = [column_width(line) for line in lines]
|
||||
assert max(line_widths) < MAXWIDTH
|
||||
|
||||
|
||||
@with_text_app()
|
||||
def test_table_with_empty_cell(app):
|
||||
contents = (u"""
|
||||
+-----+-----+
|
||||
| XXX | XXX |
|
||||
+-----+-----+
|
||||
| | XXX |
|
||||
+-----+-----+
|
||||
| XXX | |
|
||||
+-----+-----+
|
||||
""")
|
||||
|
||||
(app.srcdir / 'contents.rst').write_text(contents, encoding='utf-8')
|
||||
app.builder.build_all()
|
||||
result = (app.outdir / 'contents.txt').text(encoding='utf-8')
|
||||
|
||||
def test_table_with_empty_cell(app, status, warning):
|
||||
app.builder.build_update()
|
||||
result = (app.outdir / 'table.txt').text(encoding='utf-8')
|
||||
lines = [line.strip() for line in result.splitlines() if line.strip()]
|
||||
assert lines[0] == "+-------+-------+"
|
||||
assert lines[1] == "| XXX | XXX |"
|
||||
|
Reference in New Issue
Block a user