merge with 1.0

This commit is contained in:
Georg Brandl 2011-05-15 13:52:48 +02:00
commit 10287a6684
9 changed files with 108 additions and 33 deletions

16
CHANGES
View File

@ -102,6 +102,22 @@ Features added
Release 1.0.8 (in development)
==============================
* #657: viewcode now works correctly with source files that have
non-ASCII encoding.
* #669: Respect the ``noindex`` flag option in py:module directives.
* #675: Fix IndexErrors when including nonexisting lines with
:rst:dir:`literalinclude`.
* #676: Respect custom function/method parameter separator strings.
* #682: Fix JS incompatibility with jQuery >= 1.5.
* #693: Fix double encoding done when writing HTMLHelp .hhk files.
* #647: Do not apply SmartyPants in parsed-literal blocks.
Release 1.0.7 (Jan 15, 2011)
============================

View File

@ -198,7 +198,11 @@ Overriding works like this::
Add additional script files here, like this::
{% set script_files = script_files + [pathto("_static/myscript.js", 1)] %}
{% set script_files = script_files + ["_static/myscript.js"] %}
.. data:: css_files
Similar to :data:`script_files`, for CSS files.
Helper Functions

View File

@ -259,8 +259,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
def write_index(title, refs, subitems):
def write_param(name, value):
item = ' <param name="%s" value="%s">\n' % (name, value)
f.write(item.encode(self.encoding, 'xmlcharrefreplace')
.decode(self.encoding))
f.write(item)
title = cgi.escape(title)
f.write('<LI> <OBJECT type="text/sitemap">\n')
write_param('Keyword', title)

View File

@ -138,7 +138,13 @@ class LiteralInclude(Directive):
linelist = parselinenos(linespec, len(lines))
except ValueError, err:
return [document.reporter.warning(str(err), line=self.lineno)]
lines = [lines[i] for i in linelist]
# just ignore nonexisting lines
nlines = len(lines)
lines = [lines[i] for i in linelist if i < nlines]
if not lines:
return [document.reporter.warning(
'Line spec %r: no lines pulled from include file %r' %
(linespec, filename), line=self.lineno)]
startafter = self.options.get('start-after')
endbefore = self.options.get('end-before')

View File

@ -418,18 +418,19 @@ class PyModule(Directive):
modname = self.arguments[0].strip()
noindex = 'noindex' in self.options
env.temp_data['py:module'] = modname
env.domaindata['py']['modules'][modname] = \
(env.docname, self.options.get('synopsis', ''),
self.options.get('platform', ''), 'deprecated' in self.options)
# make a duplicate entry in 'objects' to facilitate searching for the
# module in PythonDomain.find_obj()
env.domaindata['py']['objects'][modname] = (env.docname, 'module')
targetnode = nodes.target('', '', ids=['module-' + modname], ismod=True)
self.state.document.note_explicit_target(targetnode)
ret = [targetnode]
# the platform and synopsis aren't printed; in fact, they are only used
# in the modindex currently
ret = []
if not noindex:
env.domaindata['py']['modules'][modname] = \
(env.docname, self.options.get('synopsis', ''),
self.options.get('platform', ''), 'deprecated' in self.options)
# make a duplicate entry in 'objects' to facilitate searching for the
# module in PythonDomain.find_obj()
env.domaindata['py']['objects'][modname] = (env.docname, 'module')
targetnode = nodes.target('', '', ids=['module-' + modname], ismod=True)
self.state.document.note_explicit_target(targetnode)
# the platform and synopsis aren't printed; in fact, they are only used
# in the modindex currently
ret.append(targetnode)
indextext = _('%s (module)') % modname
inode = addnodes.index(entries=[('single', indextext,
'module-' + modname, '')])

View File

@ -17,7 +17,7 @@ from cStringIO import StringIO
from sphinx.errors import PycodeError
from sphinx.pycode import nodes
from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals
from sphinx.util import get_module_source
from sphinx.util import get_module_source, detect_encoding
from sphinx.util.pycompat import next
from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc
@ -38,10 +38,6 @@ for k, v in token.tok_name.iteritems():
number2name = pygrammar.number2symbol.copy()
number2name.update(token.tok_name)
# a regex to recognize coding cookies
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
_eq = nodes.Leaf(token.EQUAL, '=')
@ -217,11 +213,10 @@ class ModuleAnalyzer(object):
self.srcname = srcname
# file-like object yielding source lines
self.source = source
# will be changed when found by parse()
self.encoding = sys.getdefaultencoding()
# cache the source code as well
pos = self.source.tell()
self.encoding = detect_encoding(self.source.readline)
self.code = self.source.read()
self.source.seek(pos)
@ -251,13 +246,6 @@ class ModuleAnalyzer(object):
self.parsetree = pydriver.parse_tokens(self.tokens)
except parse.ParseError, err:
raise PycodeError('parsing failed', err)
# find the source code encoding, if present
comments = self.parsetree.get_prefix()
for line in comments.splitlines()[:2]:
match = _coding_re.search(line)
if match is not None:
self.encoding = match.group(1)
break
def find_attr_docs(self, scope=''):
"""Find class and module-level attributes and their documentation."""

View File

@ -284,7 +284,7 @@ var Search = {
listItem.slideDown(5, function() {
displayNextItem();
});
});
}, "text");
} else {
// no source available, just display title
Search.output.append(listItem);

View File

@ -18,7 +18,7 @@ import tempfile
import posixpath
import traceback
from os import path
from codecs import open
from codecs import open, BOM_UTF8
from collections import deque
import docutils
@ -216,6 +216,59 @@ def get_module_source(modname):
return 'file', filename
# a regex to recognize coding cookies
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
def detect_encoding(readline):
"""Like tokenize.detect_encoding() from Py3k, but a bit simplified."""
def read_or_stop():
try:
return readline()
except StopIteration:
return None
def get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace('_', '-')
if enc == 'utf-8' or enc.startswith('utf-8-'):
return 'utf-8'
if enc in ('latin-1', 'iso-8859-1', 'iso-latin-1') or \
enc.startswith(('latin-1-', 'iso-8859-1-', 'iso-latin-1-')):
return 'iso-8859-1'
return orig_enc
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = _coding_re.findall(line_string)
if not matches:
return None
return get_normal_name(matches[0])
default = sys.getdefaultencoding()
first = read_or_stop()
if first and first.startswith(BOM_UTF8):
first = first[3:]
default = 'utf-8-sig'
if not first:
return default
encoding = find_cookie(first)
if encoding:
return encoding
second = read_or_stop()
if not second:
return default
encoding = find_cookie(second)
if encoding:
return encoding
return default
# Low-level utility functions and classes.
class Tee(object):

View File

@ -116,12 +116,13 @@ class HTMLTranslator(BaseTranslator):
def visit_desc_parameterlist(self, node):
self.body.append('<big>(</big>')
self.first_param = 1
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
self.body.append('<big>)</big>')
def visit_desc_parameter(self, node):
if not self.first_param:
self.body.append(', ')
self.body.append(self.param_separator)
else:
self.first_param = 0
if not node.hasattr('noemph'):
@ -566,8 +567,15 @@ class SmartyPantsHTMLTranslator(HTMLTranslator):
self.no_smarty += 1
try:
HTMLTranslator.visit_literal_block(self, node)
finally:
except nodes.SkipNode:
# HTMLTranslator raises SkipNode for simple literal blocks,
# but not for parsed literal blocks
self.no_smarty -= 1
raise
def depart_literal_block(self, node):
HTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
def visit_literal_emphasis(self, node):
self.no_smarty += 1