Add new tests for the attribute feature and fix existing tests.

This commit is contained in:
Georg Brandl 2009-01-04 20:27:37 +01:00
parent cae384f5ff
commit 27bc4e3189
3 changed files with 102 additions and 85 deletions

View File

@ -21,7 +21,7 @@ from docutils import nodes
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
from sphinx.util import rpartition, nested_parse_with_titles
from sphinx.util import rpartition, nested_parse_with_titles, force_decode
from sphinx.pycode import ModuleAnalyzer, PycodeError
from sphinx.util.docstrings import prepare_docstring
@ -31,8 +31,6 @@ try:
except NameError:
base_exception = Exception
_charset_re = re.compile(r'coding[:=]\s*([-\w.]+)')
_module_charsets = {}
py_ext_sig_re = re.compile(
r'''^ ([\w.]+::)? # explicit module name
@ -173,27 +171,6 @@ def isdescriptor(x):
return False
def get_module_charset(module):
"""Return the charset of the given module (cached in _module_charsets)."""
if module in _module_charsets:
return _module_charsets[module]
try:
filename = __import__(module, None, None, ['foo']).__file__
except (ImportError, AttributeError):
return None
if filename[-4:].lower() in ('.pyc', '.pyo'):
filename = filename[:-1]
for line in [linecache.getline(filename, x) for x in (1, 2)]:
match = _charset_re.search(line)
if match is not None:
charset = match.group(1)
break
else:
charset = 'ascii'
_module_charsets[module] = charset
return charset
class RstGenerator(object):
def __init__(self, options, document, lineno):
self.options = options
@ -207,15 +184,19 @@ class RstGenerator(object):
def warn(self, msg):
self.warnings.append(self.reporter.warning(msg, line=self.lineno))
def get_doc(self, what, name, obj):
"""Format and yield lines of the docstring(s) for the object."""
def get_doc(self, what, obj, encoding=None):
"""Decode and return lines of the docstring(s) for the object."""
docstrings = []
# add the regular docstring if present
if getattr(obj, '__doc__', None):
docstrings.append(obj.__doc__)
# skip some lines in module docstrings if configured
# skip some lines in module docstrings if configured (deprecated!)
if what == 'module' and self.env.config.automodule_skip_lines and docstrings:
docstrings[0] = '\n'.join(docstrings[0].splitlines()
[self.env.config.automodule_skip_lines:])
# for classes, what the "docstring" is can be controlled via an option
if what in ('class', 'exception'):
content = self.env.config.autoclass_content
@ -231,24 +212,12 @@ class RstGenerator(object):
docstrings.append(initdocstring)
# the default is only the class docstring
# decode the docstrings using the module's source encoding
charset = None
module = getattr(obj, '__module__', None)
if module is not None:
charset = get_module_charset(module)
# make sure we get Unicode docstrings
return [force_decode(docstring, encoding) for docstring in docstrings]
for docstring in docstrings:
if isinstance(docstring, str):
if charset:
docstring = docstring.decode(charset)
else:
try:
# try decoding with utf-8, should only work for real UTF-8
docstring = docstring.decode('utf-8')
except UnicodeError:
# last resort -- can't fail
docstring = docstring.decode('latin1')
docstringlines = prepare_docstring(docstring)
def process_doc(self, docstrings, what, name, obj):
"""Let the user process the docstrings."""
for docstringlines in docstrings:
if self.env.app:
# let extensions preprocess docstrings
self.env.app.emit('autodoc-process-docstring',
@ -397,24 +366,25 @@ class RstGenerator(object):
# now, import the module and get object to document
try:
todoc = module = __import__(mod, None, None, ['foo'])
if hasattr(module, '__file__') and module.__file__:
modfile = module.__file__
if modfile[-4:].lower() in ('.pyc', '.pyo'):
modfile = modfile[:-1]
self.filename_set.add(modfile)
else:
modfile = None # e.g. for builtin and C modules
__import__(mod)
todoc = module = sys.modules[mod]
for part in objpath:
todoc = getattr(todoc, part)
# also get a source code analyzer for attribute docs
analyzer = ModuleAnalyzer.for_module(mod)
except (ImportError, AttributeError, PycodeError), err:
self.warn('autodoc can\'t import/find %s %r, it reported error: "%s", '
'please check your spelling and sys.path' %
(what, str(fullname), err))
return
# try to also get a source code analyzer for attribute docs
try:
analyzer = ModuleAnalyzer.for_module(mod)
except PycodeError, err:
# no source file -- e.g. for builtin and C modules
analyzer = None
else:
self.filename_set.add(analyzer.srcname)
# check __module__ of object if wanted (for members not given explicitly)
if check_module:
if hasattr(todoc, '__module__'):
@ -473,23 +443,29 @@ class RstGenerator(object):
if what != 'module':
indent += u' '
if modfile:
sourcename = '%s:docstring of %s' % (modfile, fullname)
# add content from attribute documentation
if analyzer:
sourcename = '%s:docstring of %s' % (analyzer.srcname, fullname)
attr_docs = analyzer.find_attr_docs()
if what in ('data', 'attribute'):
key = ('.'.join(objpath[:-1]), objpath[-1])
if key in attr_docs:
no_docstring = True
docstrings = [attr_docs[key]]
for i, line in enumerate(self.process_doc(docstrings, what,
fullname, todoc)):
self.result.append(indent + line, sourcename, i)
else:
sourcename = 'docstring of %s' % fullname
# add content from attribute documentation
attr_docs = analyzer.find_attr_docs()
if what in ('data', 'attribute'):
key = ('.'.join(objpath[:-1]), objpath[-1])
if key in attr_docs:
no_docstring = True
for i, line in enumerate(attr_docs[key]):
self.result.append(indent + line, sourcename, i)
attr_docs = {}
# add content from docstrings
if not no_docstring:
for i, line in enumerate(self.get_doc(what, fullname, todoc)):
encoding = analyzer and analyzer.encoding
docstrings = map(prepare_docstring,
self.get_doc(what, todoc, encoding))
for i, line in enumerate(self.process_doc(docstrings, what,
fullname, todoc)):
self.result.append(indent + line, sourcename, i)
# add source content, if present

View File

@ -347,3 +347,17 @@ def parselinenos(spec, total):
except Exception, err:
raise ValueError('invalid line number spec: %r' % spec)
return items
def force_decode(string, encoding):
if isinstance(string, str):
if encoding:
string = string.decode(encoding)
else:
try:
# try decoding with utf-8, should only work for real UTF-8
string = string.decode('utf-8')
except UnicodeError:
# last resort -- can't fail
string = string.decode('latin1')
return string

View File

@ -15,6 +15,7 @@ from util import *
from docutils.statemachine import ViewList
from sphinx.ext.autodoc import RstGenerator, cut_lines, between
from sphinx.util.docstrings import prepare_docstring
def setup_module():
@ -173,13 +174,14 @@ def test_format_signature():
def test_get_doc():
def getdocl(*args):
# strip the empty line at the end
return list(gen.get_doc(*args))[:-1]
ds = map(prepare_docstring, gen.get_doc(*args))
# for testing purposes, concat them and strip the empty line at the end
return sum(ds, [])[:-1]
# objects without docstring
def f():
pass
assert getdocl('function', 'f', f) == []
assert getdocl('function', f) == []
# standard function, diverse docstring styles...
def f():
@ -189,7 +191,7 @@ def test_get_doc():
Docstring
"""
for func in (f, g):
assert getdocl('function', 'f', func) == ['Docstring']
assert getdocl('function', func) == ['Docstring']
# first line vs. other lines indentation
def f():
@ -198,17 +200,17 @@ def test_get_doc():
Other
lines
"""
assert getdocl('function', 'f', f) == ['First line', '', 'Other', ' lines']
assert getdocl('function', f) == ['First line', '', 'Other', ' lines']
# charset guessing (this module is encoded in utf-8)
def f():
"""Döcstring"""
assert getdocl('function', 'f', f) == [u'Döcstring']
assert getdocl('function', f) == [u'Döcstring']
# already-unicode docstrings must be taken literally
def f():
u"""Döcstring"""
assert getdocl('function', 'f', f) == [u'Döcstring']
assert getdocl('function', f) == [u'Döcstring']
# class docstring: depends on config value which one is taken
class C:
@ -216,11 +218,11 @@ def test_get_doc():
def __init__(self):
"""Init docstring"""
gen.env.config.autoclass_content = 'class'
assert getdocl('class', 'C', C) == ['Class docstring']
assert getdocl('class', C) == ['Class docstring']
gen.env.config.autoclass_content = 'init'
assert getdocl('class', 'C', C) == ['Init docstring']
assert getdocl('class', C) == ['Init docstring']
gen.env.config.autoclass_content = 'both'
assert getdocl('class', 'C', C) == ['Class docstring', '', 'Init docstring']
assert getdocl('class', C) == ['Class docstring', '', 'Init docstring']
class D:
"""Class docstring"""
@ -232,18 +234,22 @@ def test_get_doc():
"""
# Indentation is normalized for 'both'
assert getdocl('class', 'D', D) == ['Class docstring', '', 'Init docstring',
'', 'Other', ' lines']
assert getdocl('class', D) == ['Class docstring', '', 'Init docstring',
'', 'Other', ' lines']
def test_docstring_processing():
def process(what, name, obj):
return list(gen.process_doc(map(prepare_docstring, gen.get_doc(what, obj)),
what, name, obj))
class E:
def __init__(self):
"""Init docstring"""
# docstring processing by event handler
assert getdocl('class', 'bar', E) == ['Init docstring', '', '42']
assert process('class', 'bar', E) == ['Init docstring', '', '42', '']
def test_docstring_processing_functions():
lid = app.connect('autodoc-process-docstring', cut_lines(1, 1, ['function']))
def f():
"""
@ -251,7 +257,7 @@ def test_docstring_processing_functions():
second line
third line
"""
assert list(gen.get_doc('function', 'f', f)) == ['second line', '']
assert process('function', 'f', f) == ['second line', '']
app.disconnect(lid)
lid = app.connect('autodoc-process-docstring', between('---', ['function']))
@ -263,7 +269,7 @@ def test_docstring_processing_functions():
---
third line
"""
assert list(gen.get_doc('function', 'f', f)) == ['second line', '']
assert process('function', 'f', f) == ['second line', '']
app.disconnect(lid)
@ -289,7 +295,7 @@ def test_generate():
def assert_result_contains(item, *args):
gen.generate(*args)
print '\n'.join(gen.result)
#print '\n'.join(gen.result)
assert len(gen.warnings) == 0, gen.warnings
assert item in gen.result
del gen.result[:]
@ -325,7 +331,10 @@ def test_generate():
assert_processes(should, 'class', 'Class', [], None)
should.extend([('method', 'test_autodoc.Class.meth')])
assert_processes(should, 'class', 'Class', ['meth'], None)
should.extend([('attribute', 'test_autodoc.Class.prop')])
should.extend([('attribute', 'test_autodoc.Class.prop'),
('attribute', 'test_autodoc.Class.attr'),
('attribute', 'test_autodoc.Class.docattr'),
('attribute', 'test_autodoc.Class.udocattr')])
assert_processes(should, 'class', 'Class', ['__all__'], None)
options.undoc_members = True
should.append(('method', 'test_autodoc.Class.undocmeth'))
@ -369,6 +378,11 @@ def test_generate():
('method', 'test_autodoc.Outer.Inner.meth')],
'class', 'Outer', ['__all__'], None)
# test generation for C modules (which have no source file)
gen.env.currmodule = 'time'
assert_processes([('function', 'time.asctime')], 'function', 'asctime', [], None)
assert_processes([('function', 'time.asctime')], 'function', 'asctime', [], None)
# --- generate fodder ------------
@ -398,10 +412,22 @@ class Class(Base):
"""Method that should be skipped."""
pass
# should not be documented
skipattr = 'foo'
#: should be documented -- süß
attr = 'bar'
@property
def prop(self):
"""Property."""
docattr = 'baz'
"""should likewise be documented -- süß"""
udocattr = 'quux'
u"""should be documented as well - süß"""
class CustomDict(dict):
"""Docstring."""
@ -421,4 +447,5 @@ class Outer(object):
def meth(self):
"""Foo"""
# should be documented as an alias
factory = dict