merge birkenfeld/sphinx

This commit is contained in:
Robert Lehmann 2013-01-06 12:14:57 +01:00
commit 32b87e258b
69 changed files with 923 additions and 323 deletions

View File

@ -13,6 +13,7 @@ Other contributors, listed alphabetically, are:
* Charles Duffy -- original graphviz extension * Charles Duffy -- original graphviz extension
* Kevin Dunn -- MathJax extension * Kevin Dunn -- MathJax extension
* Josip Dzolonga -- coverage builder * Josip Dzolonga -- coverage builder
* Hernan Grecco -- search improvements
* Horst Gutmann -- internationalization support * Horst Gutmann -- internationalization support
* Martin Hans -- autodoc improvements * Martin Hans -- autodoc improvements
* Doug Hellmann -- graphviz improvements * Doug Hellmann -- graphviz improvements

27
CHANGES
View File

@ -6,6 +6,31 @@ Release 1.2 (in development)
admonition title ("See Also" instead of "See also"), and spurious indentation admonition title ("See Also" instead of "See also"), and spurious indentation
in the text builder. in the text builder.
* sphinx-build now has a verbose option :option:`-v` which can be
repeated for greater effect. A single occurrance provides a
slightly more verbose output than normal. Two or more occurrences
of this option provides more detailed output which may be useful for
debugging.
* sphinx-build now provides more specific error messages when called with
invalid options or arguments.
* sphinx-build now supports the standard :option:`--help` and
:option:`--version` options.
* #869: sphinx-build now has the option :option:`-T` for printing the full
traceback after an unhandled exception.
* #976: Fix gettext does not extract index entries.
* #940: Fix gettext does not extract figure caption.
* #1067: Improve the ordering of the JavaScript search results: matches in titles
come before matches in full text, and object results are better categorized.
Also implement a pluggable search scorer.
* Fix text writer can not handle visit_legend for figure directive contents.
* PR#72: #975: Fix gettext does not extract definition terms before docutils 0.10.0 * PR#72: #975: Fix gettext does not extract definition terms before docutils 0.10.0
* PR#25: In inheritance diagrams, the first line of the class docstring * PR#25: In inheritance diagrams, the first line of the class docstring
@ -67,6 +92,8 @@ Release 1.2 (in development)
* #1041: Fix cpp domain parser fails to parse a const type with a modifier. * #1041: Fix cpp domain parser fails to parse a const type with a modifier.
* #958: Do not preserve ``environment.pickle`` after a failed build.
* PR#88: Added the "Sphinx Developer's Guide" (:file:`doc/devguide.rst`) * PR#88: Added the "Sphinx Developer's Guide" (:file:`doc/devguide.rst`)
which outlines the basic development process of the Sphinx project. which outlines the basic development process of the Sphinx project.

View File

@ -760,6 +760,15 @@ that use Sphinx' HTMLWriter class.
.. versionadded:: 1.1 .. versionadded:: 1.1
.. confval:: html_search_scorer
The name of a javascript file (relative to the configuration directory) that
implements a search results scorer. If empty, the default will be used.
.. XXX describe interface for scorer here
.. versionadded:: 1.2
.. confval:: htmlhelp_basename .. confval:: htmlhelp_basename
Output file base name for HTML help builder. Default is ``'pydoc'``. Output file base name for HTML help builder. Default is ``'pydoc'``.

View File

@ -391,3 +391,6 @@ are in HTML form), these variables are also available:
* ``titles_only`` (false by default): if true, put only toplevel document * ``titles_only`` (false by default): if true, put only toplevel document
titles in the tree titles in the tree
* ``includehidden`` (false by default): if true, the TOC tree will also
contain hidden entries.

View File

@ -11,6 +11,7 @@
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
""" """
import os
import sys import sys
import types import types
import posixpath import posixpath
@ -60,7 +61,8 @@ class Sphinx(object):
def __init__(self, srcdir, confdir, outdir, doctreedir, buildername, def __init__(self, srcdir, confdir, outdir, doctreedir, buildername,
confoverrides=None, status=sys.stdout, warning=sys.stderr, confoverrides=None, status=sys.stdout, warning=sys.stderr,
freshenv=False, warningiserror=False, tags=None): freshenv=False, warningiserror=False, tags=None, verbosity=0):
self.verbosity = verbosity
self.next_listener_id = 0 self.next_listener_id = 0
self._extensions = {} self._extensions = {}
self._listeners = {} self._listeners = {}
@ -203,12 +205,27 @@ class Sphinx(object):
else: else:
self.builder.build_update() self.builder.build_update()
except Exception, err: except Exception, err:
# delete the saved env to force a fresh build next time
envfile = path.join(self.doctreedir, ENV_PICKLE_FILENAME)
if path.isfile(envfile):
os.unlink(envfile)
self.emit('build-finished', err) self.emit('build-finished', err)
raise raise
else: else:
self.emit('build-finished', None) self.emit('build-finished', None)
self.builder.cleanup() self.builder.cleanup()
def _log(self, message, wfile, nonl=False):
try:
wfile.write(message)
except UnicodeEncodeError:
encoding = getattr(wfile, 'encoding', 'ascii') or 'ascii'
wfile.write(message.encode(encoding, 'replace'))
if not nonl:
wfile.write('\n')
if hasattr(wfile, 'flush'):
wfile.flush()
def warn(self, message, location=None, prefix='WARNING: '): def warn(self, message, location=None, prefix='WARNING: '):
if isinstance(location, tuple): if isinstance(location, tuple):
docname, lineno = location docname, lineno = location
@ -221,26 +238,30 @@ class Sphinx(object):
if self.warningiserror: if self.warningiserror:
raise SphinxWarning(warntext) raise SphinxWarning(warntext)
self._warncount += 1 self._warncount += 1
try: self._log(warntext, self._warning, True)
self._warning.write(warntext)
except UnicodeEncodeError:
encoding = getattr(self._warning, 'encoding', 'ascii') or 'ascii'
self._warning.write(warntext.encode(encoding, 'replace'))
def info(self, message='', nonl=False): def info(self, message='', nonl=False):
try: self._log(message, self._status, nonl)
self._status.write(message)
except UnicodeEncodeError: def verbose(self, message, *args, **kwargs):
encoding = getattr(self._status, 'encoding', 'ascii') or 'ascii' if self.verbosity < 1:
self._status.write(message.encode(encoding, 'replace')) return
if not nonl: if args or kwargs:
self._status.write('\n') message = message % (args or kwargs)
self._status.flush() self._log(message, self._warning)
def debug(self, message, *args, **kwargs):
if self.verbosity < 2:
return
if args or kwargs:
message = message % (args or kwargs)
self._log(message, self._warning)
# general extensibility interface # general extensibility interface
def setup_extension(self, extension): def setup_extension(self, extension):
"""Import and setup a Sphinx extension module. No-op if called twice.""" """Import and setup a Sphinx extension module. No-op if called twice."""
self.debug('setting up extension: %r', extension)
if extension in self._extensions: if extension in self._extensions:
return return
try: try:
@ -301,9 +322,12 @@ class Sphinx(object):
else: else:
self._listeners[event][listener_id] = callback self._listeners[event][listener_id] = callback
self.next_listener_id += 1 self.next_listener_id += 1
self.debug('connecting event %r: %r [id=%s]',
event, callback, listener_id)
return listener_id return listener_id
def disconnect(self, listener_id): def disconnect(self, listener_id):
self.debug('disconnecting event: [id=%s]', listener_id)
for event in self._listeners.itervalues(): for event in self._listeners.itervalues():
event.pop(listener_id, None) event.pop(listener_id, None)
@ -323,6 +347,7 @@ class Sphinx(object):
# registering addon parts # registering addon parts
def add_builder(self, builder): def add_builder(self, builder):
self.debug('adding builder: %r', builder)
if not hasattr(builder, 'name'): if not hasattr(builder, 'name'):
raise ExtensionError('Builder class %s has no "name" attribute' raise ExtensionError('Builder class %s has no "name" attribute'
% builder) % builder)
@ -337,6 +362,7 @@ class Sphinx(object):
self.builderclasses[builder.name] = builder self.builderclasses[builder.name] = builder
def add_config_value(self, name, default, rebuild): def add_config_value(self, name, default, rebuild):
self.debug('adding config value: %r', (name, default, rebuild))
if name in self.config.values: if name in self.config.values:
raise ExtensionError('Config value %r already present' % name) raise ExtensionError('Config value %r already present' % name)
if rebuild in (False, True): if rebuild in (False, True):
@ -344,11 +370,13 @@ class Sphinx(object):
self.config.values[name] = (default, rebuild) self.config.values[name] = (default, rebuild)
def add_event(self, name): def add_event(self, name):
self.debug('adding event: %r', name)
if name in self._events: if name in self._events:
raise ExtensionError('Event %r already present' % name) raise ExtensionError('Event %r already present' % name)
self._events[name] = '' self._events[name] = ''
def add_node(self, node, **kwds): def add_node(self, node, **kwds):
self.debug('adding node: %r', (node, kwds))
nodes._add_node_class_names([node.__name__]) nodes._add_node_class_names([node.__name__])
for key, val in kwds.iteritems(): for key, val in kwds.iteritems():
try: try:
@ -388,24 +416,30 @@ class Sphinx(object):
return obj return obj
def add_directive(self, name, obj, content=None, arguments=None, **options): def add_directive(self, name, obj, content=None, arguments=None, **options):
self.debug('adding directive: %r',
(name, obj, content, arguments, options))
directives.register_directive( directives.register_directive(
name, self._directive_helper(obj, content, arguments, **options)) name, self._directive_helper(obj, content, arguments, **options))
def add_role(self, name, role): def add_role(self, name, role):
self.debug('adding role: %r', (name, role))
roles.register_local_role(name, role) roles.register_local_role(name, role)
def add_generic_role(self, name, nodeclass): def add_generic_role(self, name, nodeclass):
# don't use roles.register_generic_role because it uses # don't use roles.register_generic_role because it uses
# register_canonical_role # register_canonical_role
self.debug('adding generic role: %r', (name, nodeclass))
role = roles.GenericRole(name, nodeclass) role = roles.GenericRole(name, nodeclass)
roles.register_local_role(name, role) roles.register_local_role(name, role)
def add_domain(self, domain): def add_domain(self, domain):
self.debug('adding domain: %r', domain)
if domain.name in self.domains: if domain.name in self.domains:
raise ExtensionError('domain %s already registered' % domain.name) raise ExtensionError('domain %s already registered' % domain.name)
self.domains[domain.name] = domain self.domains[domain.name] = domain
def override_domain(self, domain): def override_domain(self, domain):
self.debug('overriding domain: %r', domain)
if domain.name not in self.domains: if domain.name not in self.domains:
raise ExtensionError('domain %s not yet registered' % domain.name) raise ExtensionError('domain %s not yet registered' % domain.name)
if not issubclass(domain, self.domains[domain.name]): if not issubclass(domain, self.domains[domain.name]):
@ -415,17 +449,21 @@ class Sphinx(object):
def add_directive_to_domain(self, domain, name, obj, def add_directive_to_domain(self, domain, name, obj,
content=None, arguments=None, **options): content=None, arguments=None, **options):
self.debug('adding directive to domain: %r',
(domain, name, obj, content, arguments, options))
if domain not in self.domains: if domain not in self.domains:
raise ExtensionError('domain %s not yet registered' % domain) raise ExtensionError('domain %s not yet registered' % domain)
self.domains[domain].directives[name] = \ self.domains[domain].directives[name] = \
self._directive_helper(obj, content, arguments, **options) self._directive_helper(obj, content, arguments, **options)
def add_role_to_domain(self, domain, name, role): def add_role_to_domain(self, domain, name, role):
self.debug('adding role to domain: %r', (domain, name, role))
if domain not in self.domains: if domain not in self.domains:
raise ExtensionError('domain %s not yet registered' % domain) raise ExtensionError('domain %s not yet registered' % domain)
self.domains[domain].roles[name] = role self.domains[domain].roles[name] = role
def add_index_to_domain(self, domain, index): def add_index_to_domain(self, domain, index):
self.debug('adding index to domain: %r', (domain, index))
if domain not in self.domains: if domain not in self.domains:
raise ExtensionError('domain %s not yet registered' % domain) raise ExtensionError('domain %s not yet registered' % domain)
self.domains[domain].indices.append(index) self.domains[domain].indices.append(index)
@ -433,6 +471,9 @@ class Sphinx(object):
def add_object_type(self, directivename, rolename, indextemplate='', def add_object_type(self, directivename, rolename, indextemplate='',
parse_node=None, ref_nodeclass=None, objname='', parse_node=None, ref_nodeclass=None, objname='',
doc_field_types=[]): doc_field_types=[]):
self.debug('adding object type: %r',
(directivename, rolename, indextemplate, parse_node,
ref_nodeclass, objname, doc_field_types))
StandardDomain.object_types[directivename] = \ StandardDomain.object_types[directivename] = \
ObjType(objname or directivename, rolename) ObjType(objname or directivename, rolename)
# create a subclass of GenericObject as the new directive # create a subclass of GenericObject as the new directive
@ -449,6 +490,9 @@ class Sphinx(object):
def add_crossref_type(self, directivename, rolename, indextemplate='', def add_crossref_type(self, directivename, rolename, indextemplate='',
ref_nodeclass=None, objname=''): ref_nodeclass=None, objname=''):
self.debug('adding crossref type: %r',
(directivename, rolename, indextemplate, ref_nodeclass,
objname))
StandardDomain.object_types[directivename] = \ StandardDomain.object_types[directivename] = \
ObjType(objname or directivename, rolename) ObjType(objname or directivename, rolename)
# create a subclass of Target as the new directive # create a subclass of Target as the new directive
@ -459,9 +503,11 @@ class Sphinx(object):
StandardDomain.roles[rolename] = XRefRole(innernodeclass=ref_nodeclass) StandardDomain.roles[rolename] = XRefRole(innernodeclass=ref_nodeclass)
def add_transform(self, transform): def add_transform(self, transform):
self.debug('adding transform: %r', transform)
SphinxStandaloneReader.transforms.append(transform) SphinxStandaloneReader.transforms.append(transform)
def add_javascript(self, filename): def add_javascript(self, filename):
self.debug('adding javascript: %r', filename)
from sphinx.builders.html import StandaloneHTMLBuilder from sphinx.builders.html import StandaloneHTMLBuilder
if '://' in filename: if '://' in filename:
StandaloneHTMLBuilder.script_files.append(filename) StandaloneHTMLBuilder.script_files.append(filename)
@ -470,6 +516,7 @@ class Sphinx(object):
posixpath.join('_static', filename)) posixpath.join('_static', filename))
def add_stylesheet(self, filename): def add_stylesheet(self, filename):
self.debug('adding stylesheet: %r', filename)
from sphinx.builders.html import StandaloneHTMLBuilder from sphinx.builders.html import StandaloneHTMLBuilder
if '://' in filename: if '://' in filename:
StandaloneHTMLBuilder.css_files.append(filename) StandaloneHTMLBuilder.css_files.append(filename)
@ -478,21 +525,25 @@ class Sphinx(object):
posixpath.join('_static', filename)) posixpath.join('_static', filename))
def add_lexer(self, alias, lexer): def add_lexer(self, alias, lexer):
self.debug('adding lexer: %r', (alias, lexer))
from sphinx.highlighting import lexers from sphinx.highlighting import lexers
if lexers is None: if lexers is None:
return return
lexers[alias] = lexer lexers[alias] = lexer
def add_autodocumenter(self, cls): def add_autodocumenter(self, cls):
self.debug('adding autodocumenter: %r', cls)
from sphinx.ext import autodoc from sphinx.ext import autodoc
autodoc.add_documenter(cls) autodoc.add_documenter(cls)
self.add_directive('auto' + cls.objtype, autodoc.AutoDirective) self.add_directive('auto' + cls.objtype, autodoc.AutoDirective)
def add_autodoc_attrgetter(self, type, getter): def add_autodoc_attrgetter(self, type, getter):
self.debug('adding autodoc attrgetter: %r', (type, getter))
from sphinx.ext import autodoc from sphinx.ext import autodoc
autodoc.AutoDirective._special_attrgetters[type] = getter autodoc.AutoDirective._special_attrgetters[type] = getter
def add_search_language(self, cls): def add_search_language(self, cls):
self.debug('adding search language: %r', cls)
from sphinx.search import languages, SearchLanguage from sphinx.search import languages, SearchLanguage
assert isinstance(cls, SearchLanguage) assert isinstance(cls, SearchLanguage)
languages[cls.lang] = cls languages[cls.lang] = cls

View File

@ -119,9 +119,13 @@ class Builder(object):
summary = bold(summary) summary = bold(summary)
for item in iterable: for item in iterable:
l += 1 l += 1
self.info(term_width_line('%s[%3d%%] %s' % s = '%s[%3d%%] %s' % (summary, 100*l/length,
(summary, 100*l/length, colorfunc(item))
colorfunc(item))), nonl=1) if self.app.verbosity:
s += '\n'
else:
s = term_width_line(s)
self.info(s, nonl=1)
yield item yield item
if l > 0: if l > 0:
self.info() self.info()

View File

@ -15,9 +15,11 @@ from datetime import datetime
from collections import defaultdict from collections import defaultdict
from sphinx.builders import Builder from sphinx.builders import Builder
from sphinx.util.nodes import extract_messages from sphinx.util import split_index_msg
from sphinx.util.nodes import extract_messages, traverse_translatable_index
from sphinx.util.osutil import SEP, safe_relpath, ensuredir, find_catalog from sphinx.util.osutil import SEP, safe_relpath, ensuredir, find_catalog
from sphinx.util.console import darkgreen from sphinx.util.console import darkgreen
from sphinx.locale import pairindextypes
POHEADER = ur""" POHEADER = ur"""
# SOME DESCRIPTIVE TITLE. # SOME DESCRIPTIVE TITLE.
@ -82,6 +84,16 @@ class I18nBuilder(Builder):
for node, msg in extract_messages(doctree): for node, msg in extract_messages(doctree):
catalog.add(msg, node) catalog.add(msg, node)
# Extract translatable messages from index entries.
for node, entries in traverse_translatable_index(doctree):
for typ, msg, tid, main in entries:
for m in split_index_msg(typ, msg):
if typ == 'pair' and m in pairindextypes.values():
# avoid built-in translated message was incorporated
# in 'sphinx.util.nodes.process_index_entry'
continue
catalog.add(m, node)
class MessageCatalogBuilder(I18nBuilder): class MessageCatalogBuilder(I18nBuilder):
""" """

View File

@ -240,7 +240,8 @@ class StandaloneHTMLBuilder(Builder):
if not lang or lang not in languages: if not lang or lang not in languages:
lang = 'en' lang = 'en'
self.indexer = IndexBuilder(self.env, lang, self.indexer = IndexBuilder(self.env, lang,
self.config.html_search_options) self.config.html_search_options,
self.config.html_search_scorer)
self.load_indexer(docnames) self.load_indexer(docnames)
self.docwriter = HTMLWriter(self) self.docwriter = HTMLWriter(self)
@ -653,6 +654,8 @@ class StandaloneHTMLBuilder(Builder):
self.indexer.feed(pagename, title, doctree) self.indexer.feed(pagename, title, doctree)
def _get_local_toctree(self, docname, collapse=True, **kwds): def _get_local_toctree(self, docname, collapse=True, **kwds):
if 'includehidden' not in kwds:
kwds['includehidden'] = False
return self.render_partial(self.env.get_toctree_for( return self.render_partial(self.env.get_toctree_for(
docname, self, collapse, **kwds))['fragment'] docname, self, collapse, **kwds))['fragment']

View File

@ -59,6 +59,10 @@ new and changed files
-w <file> -- write warnings (and errors) to given file -w <file> -- write warnings (and errors) to given file
-W -- turn warnings into errors -W -- turn warnings into errors
-P -- run Pdb on exception -P -- run Pdb on exception
-T -- show full traceback on exception
-v -- increase verbosity (can be repeated)
--help -- show this help and exit
--version -- show version information and exit
Modi: Modi:
* without -a and without filenames, write new and changed files. * without -a and without filenames, write new and changed files.
* with -a, write all files. * with -a, write all files.
@ -71,8 +75,15 @@ def main(argv):
nocolor() nocolor()
try: try:
opts, args = getopt.getopt(argv[1:], 'ab:t:d:c:CD:A:ng:NEqQWw:P') opts, args = getopt.getopt(argv[1:], 'ab:t:d:c:CD:A:ng:NEqQWw:PThv',
['help', 'version'])
allopts = set(opt[0] for opt in opts) allopts = set(opt[0] for opt in opts)
if '-h' in allopts or '--help' in allopts:
usage(argv)
return 0
if '--version' in allopts:
print 'Sphinx (sphinx-build) %s' % __version__
return 0
srcdir = confdir = abspath(args[0]) srcdir = confdir = abspath(args[0])
if not path.isdir(srcdir): if not path.isdir(srcdir):
print >>sys.stderr, 'Error: Cannot find source directory `%s\'.' % ( print >>sys.stderr, 'Error: Cannot find source directory `%s\'.' % (
@ -87,15 +98,18 @@ def main(argv):
if not path.isdir(outdir): if not path.isdir(outdir):
print >>sys.stderr, 'Making output directory...' print >>sys.stderr, 'Making output directory...'
os.makedirs(outdir) os.makedirs(outdir)
except (IndexError, getopt.error): except getopt.error, err:
usage(argv) usage(argv, 'Error: %s' % err)
return 1
except IndexError:
usage(argv, 'Error: Insufficient arguments.')
return 1 return 1
filenames = args[2:] filenames = args[2:]
err = 0 err = 0
for filename in filenames: for filename in filenames:
if not path.isfile(filename): if not path.isfile(filename):
print >>sys.stderr, 'Cannot find file %r.' % filename print >>sys.stderr, 'Error: Cannot find file %r.' % filename
err = 1 err = 1
if err: if err:
return 1 return 1
@ -109,6 +123,8 @@ def main(argv):
buildername = None buildername = None
force_all = freshenv = warningiserror = use_pdb = False force_all = freshenv = warningiserror = use_pdb = False
show_traceback = False
verbosity = 0
status = sys.stdout status = sys.stdout
warning = sys.stderr warning = sys.stderr
error = sys.stderr error = sys.stderr
@ -121,7 +137,7 @@ def main(argv):
buildername = val buildername = val
elif opt == '-a': elif opt == '-a':
if filenames: if filenames:
usage(argv, 'Cannot combine -a option and filenames.') usage(argv, 'Error: Cannot combine -a option and filenames.')
return 1 return 1
force_all = True force_all = True
elif opt == '-t': elif opt == '-t':
@ -185,6 +201,11 @@ def main(argv):
warnfile = val warnfile = val
elif opt == '-P': elif opt == '-P':
use_pdb = True use_pdb = True
elif opt == '-T':
show_traceback = True
elif opt == '-v':
verbosity += 1
show_traceback = True
if warning and warnfile: if warning and warnfile:
warnfp = open(warnfile, 'w') warnfp = open(warnfile, 'w')
@ -194,17 +215,10 @@ def main(argv):
try: try:
app = Sphinx(srcdir, confdir, outdir, doctreedir, buildername, app = Sphinx(srcdir, confdir, outdir, doctreedir, buildername,
confoverrides, status, warning, freshenv, confoverrides, status, warning, freshenv,
warningiserror, tags) warningiserror, tags, verbosity)
app.build(force_all, filenames) app.build(force_all, filenames)
return app.statuscode return app.statuscode
except KeyboardInterrupt: except (Exception, KeyboardInterrupt), err:
if use_pdb:
import pdb
print >>error, red('Interrupted while building, starting debugger:')
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
return 1
except Exception, err:
if use_pdb: if use_pdb:
import pdb import pdb
print >>error, red('Exception occurred while building, ' print >>error, red('Exception occurred while building, '
@ -213,7 +227,12 @@ def main(argv):
pdb.post_mortem(sys.exc_info()[2]) pdb.post_mortem(sys.exc_info()[2])
else: else:
print >>error print >>error
if isinstance(err, SystemMessage): if show_traceback:
traceback.print_exc(None, error)
print >>error
if isinstance(err, KeyboardInterrupt):
print >>error, 'interrupted!'
elif isinstance(err, SystemMessage):
print >>error, red('reST markup error:') print >>error, red('reST markup error:')
print >>error, terminal_safe(err.args[0]) print >>error, terminal_safe(err.args[0])
elif isinstance(err, SphinxError): elif isinstance(err, SphinxError):

View File

@ -110,6 +110,7 @@ class Config(object):
html_secnumber_suffix = ('. ', 'html'), html_secnumber_suffix = ('. ', 'html'),
html_search_language = (None, 'html'), html_search_language = (None, 'html'),
html_search_options = ({}, 'html'), html_search_options = ({}, 'html'),
html_search_scorer = ('', None),
# HTML help only options # HTML help only options
htmlhelp_basename = (lambda self: make_filename(self.project), None), htmlhelp_basename = (lambda self: make_filename(self.project), None),

View File

@ -169,6 +169,7 @@ class Index(Directive):
indexnode = addnodes.index() indexnode = addnodes.index()
indexnode['entries'] = ne = [] indexnode['entries'] = ne = []
indexnode['inline'] = False indexnode['inline'] = False
set_source_info(self, indexnode)
for entry in arguments: for entry in arguments:
ne.extend(process_index_entry(entry, targetid)) ne.extend(process_index_entry(entry, targetid))
return [indexnode, targetnode] return [indexnode, targetnode]

View File

@ -38,9 +38,9 @@ from docutils.transforms.parts import ContentsFilter
from sphinx import addnodes from sphinx import addnodes
from sphinx.util import url_re, get_matching_docs, docname_join, split_into, \ from sphinx.util import url_re, get_matching_docs, docname_join, split_into, \
FilenameUniqDict split_index_msg, FilenameUniqDict
from sphinx.util.nodes import clean_astext, make_refnode, extract_messages, \ from sphinx.util.nodes import clean_astext, make_refnode, extract_messages, \
WarningStream traverse_translatable_index, WarningStream
from sphinx.util.osutil import movefile, SEP, ustrftime, find_catalog, \ from sphinx.util.osutil import movefile, SEP, ustrftime, find_catalog, \
fs_encoding fs_encoding
from sphinx.util.matching import compile_matchers from sphinx.util.matching import compile_matchers
@ -71,7 +71,7 @@ default_settings = {
# This is increased every time an environment attribute is added # This is increased every time an environment attribute is added
# or changed to properly invalidate pickle files. # or changed to properly invalidate pickle files.
ENV_VERSION = 41 ENV_VERSION = 42
default_substitutions = set([ default_substitutions = set([
@ -303,6 +303,23 @@ class Locale(Transform):
child.parent = node child.parent = node
node.children = patch.children node.children = patch.children
# Extract and translate messages for index entries.
for node, entries in traverse_translatable_index(self.document):
new_entries = []
for type, msg, tid, main in entries:
msg_parts = split_index_msg(type, msg)
msgstr_parts = []
for part in msg_parts:
msgstr = catalog.gettext(part)
if not msgstr:
msgstr = part
msgstr_parts.append(msgstr)
new_entries.append((type, ';'.join(msgstr_parts), tid, main))
node['raw_entries'] = entries
node['entries'] = new_entries
class SphinxStandaloneReader(standalone.Reader): class SphinxStandaloneReader(standalone.Reader):
""" """
@ -365,9 +382,7 @@ class BuildEnvironment:
del self.config.values del self.config.values
domains = self.domains domains = self.domains
del self.domains del self.domains
# first write to a temporary file, so that if dumping fails, picklefile = open(filename, 'wb')
# the existing environment won't be overwritten
picklefile = open(filename + '.tmp', 'wb')
# remove potentially pickling-problematic values from config # remove potentially pickling-problematic values from config
for key, val in vars(self.config).items(): for key, val in vars(self.config).items():
if key.startswith('_') or \ if key.startswith('_') or \
@ -379,7 +394,6 @@ class BuildEnvironment:
pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL) pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL)
finally: finally:
picklefile.close() picklefile.close()
movefile(filename + '.tmp', filename)
# reset attributes # reset attributes
self.domains = domains self.domains = domains
self.config.values = values self.config.values = values
@ -954,6 +968,7 @@ class BuildEnvironment:
filterlevel = self.config.keep_warnings and 2 or 5 filterlevel = self.config.keep_warnings and 2 or 5
for node in doctree.traverse(nodes.system_message): for node in doctree.traverse(nodes.system_message):
if node['level'] < filterlevel: if node['level'] < filterlevel:
self.app.debug('%s [filtered system message]', node.astext())
node.parent.remove(node) node.parent.remove(node)
@ -1340,11 +1355,9 @@ class BuildEnvironment:
if toctree.get('hidden', False) and not includehidden: if toctree.get('hidden', False) and not includehidden:
return None return None
def _walk_depth(node, depth, maxdepth): # For reading the following two helper function, it is useful to keep
"""Utility: Cut a TOC at a specified depth.""" # in mind the node structure of a toctree (using HTML-like node names
# for brevity):
# For reading this function, it is useful to keep in mind the node
# structure of a toctree (using HTML-like node names for brevity):
# #
# <ul> # <ul>
# <li> # <li>
@ -1356,30 +1369,42 @@ class BuildEnvironment:
# </ul> # </ul>
# </li> # </li>
# </ul> # </ul>
#
# The transformation is made in two passes in order to avoid
# interactions between marking and pruning the tree (see bug #1046).
def _toctree_prune(node, depth, maxdepth):
"""Utility: Cut a TOC at a specified depth."""
for subnode in node.children[:]: for subnode in node.children[:]:
if isinstance(subnode, (addnodes.compact_paragraph, if isinstance(subnode, (addnodes.compact_paragraph,
nodes.list_item)): nodes.list_item)):
# for <p> and <li>, just indicate the depth level and # for <p> and <li>, just recurse
# recurse to children _toctree_prune(subnode, depth, maxdepth)
subnode['classes'].append('toctree-l%d' % (depth-1))
_walk_depth(subnode, depth, maxdepth)
elif isinstance(subnode, nodes.bullet_list): elif isinstance(subnode, nodes.bullet_list):
# for <ul>, determine if the depth is too large or if the # for <ul>, determine if the depth is too large or if the
# entry is to be collapsed # entry is to be collapsed
if maxdepth > 0 and depth > maxdepth: if maxdepth > 0 and depth > maxdepth:
subnode.parent.replace(subnode, []) subnode.parent.replace(subnode, [])
else: else:
# to find out what to collapse, *first* walk subitems,
# since that determines which children point to the
# current page
_walk_depth(subnode, depth+1, maxdepth)
# cull sub-entries whose parents aren't 'current' # cull sub-entries whose parents aren't 'current'
if (collapse and depth > 1 and if (collapse and depth > 1 and
'iscurrent' not in subnode.parent): 'iscurrent' not in subnode.parent):
subnode.parent.remove(subnode) subnode.parent.remove(subnode)
else:
# recurse on visible children
_toctree_prune(subnode, depth+1, maxdepth)
def _toctree_add_classes(node, depth):
"""Add 'toctree-l%d' and 'current' classes to the toctree."""
for subnode in node.children:
if isinstance(subnode, (addnodes.compact_paragraph,
nodes.list_item)):
# for <p> and <li>, indicate the depth level and recurse
subnode['classes'].append('toctree-l%d' % (depth-1))
_toctree_add_classes(subnode, depth)
elif isinstance(subnode, nodes.bullet_list):
# for <ul>, just recurse
_toctree_add_classes(subnode, depth+1)
elif isinstance(subnode, nodes.reference): elif isinstance(subnode, nodes.reference):
# for <a>, identify which entries point to the current # for <a>, identify which entries point to the current
# document and therefore may not be collapsed # document and therefore may not be collapsed
@ -1500,8 +1525,9 @@ class BuildEnvironment:
newnode = addnodes.compact_paragraph('', '', *tocentries) newnode = addnodes.compact_paragraph('', '', *tocentries)
newnode['toctree'] = True newnode['toctree'] = True
# prune the tree to maxdepth and replace titles, also set level classes # prune the tree to maxdepth, also set toc depth and current classes
_walk_depth(newnode, 1, prune and maxdepth or 0) _toctree_add_classes(newnode, 1)
_toctree_prune(newnode, 1, prune and maxdepth or 0)
# set the target paths in the toctrees (they are not known at TOC # set the target paths in the toctrees (they are not known at TOC
# generation time) # generation time)

View File

@ -317,13 +317,20 @@ class Documenter(object):
Returns True if successful, False if an error occurred. Returns True if successful, False if an error occurred.
""" """
if self.objpath:
self.env.app.debug('autodoc: from %s import %s',
self.modname, '.'.join(self.objpath))
try: try:
self.env.app.debug('autodoc: import %s', self.modname)
__import__(self.modname) __import__(self.modname)
parent = None parent = None
obj = self.module = sys.modules[self.modname] obj = self.module = sys.modules[self.modname]
self.env.app.debug('autodoc: => %r', obj)
for part in self.objpath: for part in self.objpath:
parent = obj parent = obj
self.env.app.debug('autodoc: getattr(_, %r)', part)
obj = self.get_attr(obj, part) obj = self.get_attr(obj, part)
self.env.app.debug('autodoc: => %r', obj)
self.object_name = part self.object_name = part
self.parent = parent self.parent = parent
self.object = obj self.object = obj
@ -331,12 +338,16 @@ class Documenter(object):
# this used to only catch SyntaxError, ImportError and AttributeError, # this used to only catch SyntaxError, ImportError and AttributeError,
# but importing modules with side effects can raise all kinds of errors # but importing modules with side effects can raise all kinds of errors
except Exception, err: except Exception, err:
if self.env.app and not self.env.app.quiet: if self.objpath:
self.env.app.info(traceback.format_exc().rstrip()) errmsg = 'autodoc: failed to import %s %r from module %r' % \
self.directive.warn( (self.objtype, '.'.join(self.objpath), self.modname)
'autodoc can\'t import/find %s %r, it reported error: ' else:
'"%s", please check your spelling and sys.path' % errmsg = 'autodoc: failed to import %s %r' % \
(self.objtype, str(self.fullname), err)) (self.objtype, self.fullname)
errmsg += '; the following exception was raised:\n%s' % \
traceback.format_exc()
self.env.app.debug(errmsg)
self.directive.warn(errmsg)
self.env.note_reread() self.env.note_reread()
return False return False
@ -1294,6 +1305,13 @@ class AutoDirective(Directive):
self.warnings = [] self.warnings = []
self.result = ViewList() self.result = ViewList()
try:
source, lineno = self.reporter.get_source_and_line(self.lineno)
except AttributeError:
source = lineno = None
self.env.app.debug('%s:%s: <input>\n%s',
source, lineno, self.block_text)
# find out what documenter to call # find out what documenter to call
objtype = self.name[4:] objtype = self.name[4:]
doc_class = self._registry[objtype] doc_class = self._registry[objtype]
@ -1314,6 +1332,9 @@ class AutoDirective(Directive):
if not self.result: if not self.result:
return self.warnings return self.warnings
if self.env.app.verbosity >= 2:
self.env.app.debug('autodoc: <output>\n%s', '\n'.join(self.result))
# record all filenames as dependencies -- this will at least # record all filenames as dependencies -- this will at least
# partially make automatic invalidation possible # partially make automatic invalidation possible
for fn in self.filename_set: for fn in self.filename_set:

View File

@ -293,6 +293,7 @@ def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
entries = [('single', target, targetid, main)] entries = [('single', target, targetid, main)]
indexnode = addnodes.index() indexnode = addnodes.index()
indexnode['entries'] = entries indexnode['entries'] = entries
set_role_source_info(inliner, lineno, indexnode)
textnode = nodes.Text(title, title) textnode = nodes.Text(title, title)
return [indexnode, targetnode, textnode], [] return [indexnode, targetnode, textnode], []

View File

@ -8,10 +8,12 @@
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
""" """
from __future__ import with_statement
import re import re
import itertools
import cPickle as pickle import cPickle as pickle
from docutils.nodes import comment, Text, NodeVisitor, SkipNode from docutils.nodes import comment, title, Text, NodeVisitor, SkipNode
from sphinx.util import jsdump, rpartition from sphinx.util import jsdump, rpartition
@ -92,6 +94,7 @@ var Stemmer = function() {
(ord(word[0]) < 256 and (len(word) < 3 or word in self.stopwords or (ord(word[0]) < 256 and (len(word) < 3 or word in self.stopwords or
word.isdigit()))) word.isdigit())))
from sphinx.search import en, ja from sphinx.search import en, ja
languages = { languages = {
@ -137,13 +140,16 @@ class WordCollector(NodeVisitor):
def __init__(self, document, lang): def __init__(self, document, lang):
NodeVisitor.__init__(self, document) NodeVisitor.__init__(self, document)
self.found_words = [] self.found_words = []
self.found_title_words = []
self.lang = lang self.lang = lang
def dispatch_visit(self, node): def dispatch_visit(self, node):
if node.__class__ is comment: if node.__class__ is comment:
raise SkipNode raise SkipNode
if node.__class__ is Text: elif node.__class__ is Text:
self.found_words.extend(self.lang.split(node.astext())) self.found_words.extend(self.lang.split(node.astext()))
elif node.__class__ is title:
self.found_title_words.extend(self.lang.split(node.astext()))
class IndexBuilder(object): class IndexBuilder(object):
@ -156,12 +162,14 @@ class IndexBuilder(object):
'pickle': pickle 'pickle': pickle
} }
def __init__(self, env, lang, options): def __init__(self, env, lang, options, scoring):
self.env = env self.env = env
# filename -> title # filename -> title
self._titles = {} self._titles = {}
# stemmed word -> set(filenames) # stemmed word -> set(filenames)
self._mapping = {} self._mapping = {}
# stemmed words in titles -> set(filenames)
self._title_mapping = {}
# objtype -> index # objtype -> index
self._objtypes = {} self._objtypes = {}
# objtype index -> (domain, type, objname (localized)) # objtype index -> (domain, type, objname (localized))
@ -169,6 +177,12 @@ class IndexBuilder(object):
# add language-specific SearchLanguage instance # add language-specific SearchLanguage instance
self.lang = languages[lang](options) self.lang = languages[lang](options)
if scoring:
with open(scoring, 'rb') as fp:
self.js_scorer_code = fp.read().decode('utf-8')
else:
self.js_scorer_code = u''
def load(self, stream, format): def load(self, stream, format):
"""Reconstruct from frozen data.""" """Reconstruct from frozen data."""
if isinstance(format, basestring): if isinstance(format, basestring):
@ -179,12 +193,18 @@ class IndexBuilder(object):
raise ValueError('old format') raise ValueError('old format')
index2fn = frozen['filenames'] index2fn = frozen['filenames']
self._titles = dict(zip(index2fn, frozen['titles'])) self._titles = dict(zip(index2fn, frozen['titles']))
self._mapping = {}
for k, v in frozen['terms'].iteritems(): def load_terms(mapping):
rv = {}
for k, v in mapping.iteritems():
if isinstance(v, int): if isinstance(v, int):
self._mapping[k] = set([index2fn[v]]) rv[k] = set([index2fn[v]])
else: else:
self._mapping[k] = set(index2fn[i] for i in v) rv[k] = set(index2fn[i] for i in v)
return rv
self._mapping = load_terms(frozen['terms'])
self._title_mapping = load_terms(frozen['titleterms'])
# no need to load keywords/objtypes # no need to load keywords/objtypes
def dump(self, stream, format): def dump(self, stream, format):
@ -229,28 +249,31 @@ class IndexBuilder(object):
return rv return rv
def get_terms(self, fn2index): def get_terms(self, fn2index):
rv = {} rvs = {}, {}
for k, v in self._mapping.iteritems(): for rv, mapping in zip(rvs, (self._mapping, self._title_mapping)):
for k, v in mapping.iteritems():
if len(v) == 1: if len(v) == 1:
fn, = v fn, = v
if fn in fn2index: if fn in fn2index:
rv[k] = fn2index[fn] rv[k] = fn2index[fn]
else: else:
rv[k] = [fn2index[fn] for fn in v if fn in fn2index] rv[k] = [fn2index[fn] for fn in v if fn in fn2index]
return rv return rvs
def freeze(self): def freeze(self):
"""Create a usable data structure for serializing.""" """Create a usable data structure for serializing."""
filenames = self._titles.keys() filenames = self._titles.keys()
titles = self._titles.values() titles = self._titles.values()
fn2index = dict((f, i) for (i, f) in enumerate(filenames)) fn2index = dict((f, i) for (i, f) in enumerate(filenames))
terms = self.get_terms(fn2index) terms, title_terms = self.get_terms(fn2index)
objects = self.get_objects(fn2index) # populates _objtypes objects = self.get_objects(fn2index) # populates _objtypes
objtypes = dict((v, k[0] + ':' + k[1]) objtypes = dict((v, k[0] + ':' + k[1])
for (k, v) in self._objtypes.iteritems()) for (k, v) in self._objtypes.iteritems())
objnames = self._objnames objnames = self._objnames
return dict(filenames=filenames, titles=titles, terms=terms, return dict(filenames=filenames, titles=titles, terms=terms,
objects=objects, objtypes=objtypes, objnames=objnames) objects=objects, objtypes=objtypes, objnames=objnames,
titleterms=title_terms)
def prune(self, filenames): def prune(self, filenames):
"""Remove data for all filenames not in the list.""" """Remove data for all filenames not in the list."""
@ -261,6 +284,8 @@ class IndexBuilder(object):
self._titles = new_titles self._titles = new_titles
for wordnames in self._mapping.itervalues(): for wordnames in self._mapping.itervalues():
wordnames.intersection_update(filenames) wordnames.intersection_update(filenames)
for wordnames in self._title_mapping.itervalues():
wordnames.intersection_update(filenames)
def feed(self, filename, title, doctree): def feed(self, filename, title, doctree):
"""Feed a doctree to the index.""" """Feed a doctree to the index."""
@ -269,19 +294,23 @@ class IndexBuilder(object):
visitor = WordCollector(doctree, self.lang) visitor = WordCollector(doctree, self.lang)
doctree.walk(visitor) doctree.walk(visitor)
def add_term(word, stem=self.lang.stem): stem = self.lang.stem
word = stem(word) _filter = self.lang.word_filter
if self.lang.word_filter(word):
self._mapping.setdefault(word, set()).add(filename)
for word in self.lang.split(title): for word in itertools.chain(visitor.found_title_words,
add_term(word) self.lang.split(title)):
word = stem(word)
if _filter(word):
self._title_mapping.setdefault(word, set()).add(filename)
for word in visitor.found_words: for word in visitor.found_words:
add_term(word) word = stem(word)
if word not in self._title_mapping and _filter(word):
self._mapping.setdefault(word, set()).add(filename)
def context_for_searchtool(self): def context_for_searchtool(self):
return dict( return dict(
search_language_stemming_code = self.lang.js_stemmer_code, search_language_stemming_code = self.lang.js_stemmer_code,
search_language_stop_words = jsdump.dumps(sorted(self.lang.stopwords)), search_language_stop_words = jsdump.dumps(sorted(self.lang.stopwords)),
search_scorer_tool = self.js_scorer_code,
) )

View File

@ -8,7 +8,7 @@
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
#} #}
{% extends "basic/layout.html" %} {%- extends "basic/layout.html" %}
{% block header %} {% block header %}
<div class="header-wrapper"> <div class="header-wrapper">

View File

@ -7,7 +7,7 @@
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
#} #}
{% extends "layout.html" %} {%- extends "layout.html" %}
{% set title = _('Overview') %} {% set title = _('Overview') %}
{% block body %} {% block body %}
<h1>{{ docstitle|e }}</h1> <h1>{{ docstitle|e }}</h1>

View File

@ -7,7 +7,7 @@
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
#} #}
{% extends "layout.html" %} {%- extends "layout.html" %}
{% set title = indextitle %} {% set title = indextitle %}
{% block extrahead %} {% block extrahead %}
{{ super() }} {{ super() }}

View File

@ -28,7 +28,7 @@
</dt> </dt>
{% endmacro %} {% endmacro %}
{% extends "layout.html" %} {%- extends "layout.html" %}
{% set title = _('Index') %} {% set title = _('Index') %}
{% block body %} {% block body %}

View File

@ -7,7 +7,7 @@
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
#} #}
{% extends "layout.html" %} {%- extends "layout.html" %}
{% set title = _('Index') %} {% set title = _('Index') %}
{% block body %} {% block body %}

View File

@ -28,7 +28,7 @@
</dt> </dt>
{% endmacro %} {% endmacro %}
{% extends "layout.html" %} {%- extends "layout.html" %}
{% set title = _('Index') %} {% set title = _('Index') %}
{% block body %} {% block body %}

View File

@ -7,7 +7,7 @@
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
#} #}
{% extends "layout.html" %} {%- extends "layout.html" %}
{% block body %} {% block body %}
{{ body }} {{ body }}
{% endblock %} {% endblock %}

View File

@ -7,7 +7,7 @@
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
#} #}
{% extends "layout.html" %} {%- extends "layout.html" %}
{% set title = _('Search') %} {% set title = _('Search') %}
{% set script_files = script_files + ['_static/searchtools.js'] %} {% set script_files = script_files + ['_static/searchtools.js'] %}
{% block extrahead %} {% block extrahead %}

View File

@ -32,7 +32,7 @@ if (!window.console || !console.firebug) {
*/ */
jQuery.urldecode = function(x) { jQuery.urldecode = function(x) {
return decodeURIComponent(x).replace(/\+/g, ' '); return decodeURIComponent(x).replace(/\+/g, ' ');
} };
/** /**
* small helper function to urlencode strings * small helper function to urlencode strings

View File

@ -9,34 +9,41 @@
* *
*/ */
{{ search_language_stemming_code|safe }}
{% if search_scorer_tool %}
{{ search_scorer_tool|safe }}
{% else %}
/** /**
* helper function to return a node containing the * Simple result scoring code.
* search summary for a given text. keywords is a list */
* of stemmed words, hlwords is the list of normal, unstemmed var Scorer = {
* words. the first one is used to find the occurance, the // Implement the following function to further tweak the score for each result
* latter for highlighting it. // The function takes a result array [filename, title, anchor, descr, score]
// and returns the new score.
/*
score: function(result) {
return result[4];
},
*/ */
jQuery.makeSearchSummary = function(text, keywords, hlwords) { // query matches the full name of an object
var textLower = text.toLowerCase(); objNameMatch: 11,
var start = 0; // or matches in the last dotted part of the object name
$.each(keywords, function() { objPartialMatch: 6,
var i = textLower.indexOf(this.toLowerCase()); // Additive scores depending on the priority of the object
if (i > -1) objPrio: {0: 15, // used to be importantResults
start = i; 1: 5, // used to be objectResults
}); 2: -5}, // used to be unimportantResults
start = Math.max(start - 120, 0); // Used when the priority is not in the mapping.
var excerpt = ((start > 0) ? '...' : '') + objPrioDefault: 0,
$.trim(text.substr(start, 240)) +
((start + 240 - text.length) ? '...' : '');
var rv = $('<div class="context"></div>').text(excerpt);
$.each(hlwords, function() {
rv = rv.highlightText(this, 'highlighted');
});
return rv;
}
{{ search_language_stemming_code|safe }} // query found in title
title: 15,
// query found in terms
term: 5
};
{% endif %}
/** /**
* Search Module * Search Module
@ -86,19 +93,20 @@ var Search = {
if (this._pulse_status >= 0) if (this._pulse_status >= 0)
return; return;
function pulse() { function pulse() {
var i;
Search._pulse_status = (Search._pulse_status + 1) % 4; Search._pulse_status = (Search._pulse_status + 1) % 4;
var dotString = ''; var dotString = '';
for (var i = 0; i < Search._pulse_status; i++) for (i = 0; i < Search._pulse_status; i++)
dotString += '.'; dotString += '.';
Search.dots.text(dotString); Search.dots.text(dotString);
if (Search._pulse_status > -1) if (Search._pulse_status > -1)
window.setTimeout(pulse, 500); window.setTimeout(pulse, 500);
}; }
pulse(); pulse();
}, },
/** /**
* perform a search for something * perform a search for something (or wait until index is loaded)
*/ */
performSearch : function(query) { performSearch : function(query) {
// create the required interface elements // create the required interface elements
@ -118,41 +126,46 @@ var Search = {
this.deferQuery(query); this.deferQuery(query);
}, },
/**
* execute search (requires search index to be loaded)
*/
query : function(query) { query : function(query) {
var i;
var stopwords = {{ search_language_stop_words }}; var stopwords = {{ search_language_stop_words }};
// Stem the searchterms and add them to the correct list // stem the searchterms and add them to the correct list
var stemmer = new Stemmer(); var stemmer = new Stemmer();
var searchterms = []; var searchterms = [];
var excluded = []; var excluded = [];
var hlterms = []; var hlterms = [];
var tmp = query.split(/\s+/); var tmp = query.split(/\s+/);
var objectterms = []; var objectterms = [];
for (var i = 0; i < tmp.length; i++) { for (i = 0; i < tmp.length; i++) {
if (tmp[i] != "") { if (tmp[i] !== "") {
objectterms.push(tmp[i].toLowerCase()); objectterms.push(tmp[i].toLowerCase());
} }
if ($u.indexOf(stopwords, tmp[i]) != -1 || tmp[i].match(/^\d+$/) || if ($u.indexOf(stopwords, tmp[i]) != -1 || tmp[i].match(/^\d+$/) ||
tmp[i] == "") { tmp[i] === "") {
// skip this "word" // skip this "word"
continue; continue;
} }
// stem the word // stem the word
var word = stemmer.stemWord(tmp[i]).toLowerCase(); var word = stemmer.stemWord(tmp[i]).toLowerCase();
var toAppend;
// select the correct list // select the correct list
if (word[0] == '-') { if (word[0] == '-') {
var toAppend = excluded; toAppend = excluded;
word = word.substr(1); word = word.substr(1);
} }
else { else {
var toAppend = searchterms; toAppend = searchterms;
hlterms.push(tmp[i].toLowerCase()); hlterms.push(tmp[i].toLowerCase());
} }
// only add if not already in the list // only add if not already in the list
if (!$u.contains(toAppend, word)) if (!$u.contains(toAppend, word))
toAppend.push(word); toAppend.push(word);
}; }
var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" ")); var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" "));
// console.debug('SEARCH: searching for:'); // console.debug('SEARCH: searching for:');
@ -160,89 +173,51 @@ var Search = {
// console.info('excluded: ', excluded); // console.info('excluded: ', excluded);
// prepare search // prepare search
var filenames = this._index.filenames;
var titles = this._index.titles;
var terms = this._index.terms; var terms = this._index.terms;
var fileMap = {}; var titleterms = this._index.titleterms;
var files = null;
// different result priorities // array of [filename, title, anchor, descr, score]
var importantResults = []; var results = [];
var objectResults = [];
var regularResults = [];
var unimportantResults = [];
$('#search-progress').empty(); $('#search-progress').empty();
// lookup as object // lookup as object
for (var i = 0; i < objectterms.length; i++) { for (i = 0; i < objectterms.length; i++) {
var others = [].concat(objectterms.slice(0, i), var others = [].concat(objectterms.slice(0, i),
objectterms.slice(i+1, objectterms.length)) objectterms.slice(i+1, objectterms.length));
var results = this.performObjectSearch(objectterms[i], others); results = results.concat(this.performObjectSearch(objectterms[i], others));
// Assume first word is most likely to be the object,
// other words more likely to be in description.
// Therefore put matches for earlier words first.
// (Results are eventually used in reverse order).
objectResults = results[0].concat(objectResults);
importantResults = results[1].concat(importantResults);
unimportantResults = results[2].concat(unimportantResults);
} }
// perform the search on the required terms // lookup as search terms in fulltext
for (var i = 0; i < searchterms.length; i++) { results = results.concat(this.performTermsSearch(searchterms, excluded, terms, Scorer.term))
var word = searchterms[i]; .concat(this.performTermsSearch(searchterms, excluded, titleterms, Scorer.title));
// no match but word was a required one
if ((files = terms[word]) == null) // let the scorer override scores with a custom scoring function
break; if (Scorer.score) {
if (files.length == undefined) { for (i = 0; i < results.length; i++)
files = [files]; results[i][4] = Scorer.score(results[i]);
}
// create the mapping
for (var j = 0; j < files.length; j++) {
var file = files[j];
if (file in fileMap)
fileMap[file].push(word);
else
fileMap[file] = [word];
}
} }
// now check if the files don't contain excluded terms // now sort the results by score (in opposite order of appearance, since the
for (var file in fileMap) { // display function below uses pop() to retrieve items) and then
var valid = true; // alphabetically
results.sort(function(a, b) {
// check if all requirements are matched var left = a[4];
if (fileMap[file].length != searchterms.length) var right = b[4];
continue; if (left > right) {
return 1;
// ensure that none of the excluded terms is in the } else if (left < right) {
// search result. return -1;
for (var i = 0; i < excluded.length; i++) { } else {
if (terms[excluded[i]] == file || // same score: sort alphabetically
$u.contains(terms[excluded[i]] || [], file)) { left = a[1].toLowerCase();
valid = false; right = b[1].toLowerCase();
break;
}
}
// if we have still a valid result we can add it
// to the result list
if (valid)
regularResults.push([filenames[file], titles[file], '', null]);
}
// delete unused variables in order to not waste
// memory until list is retrieved completely
delete filenames, titles, terms;
// now sort the regular results descending by title
regularResults.sort(function(a, b) {
var left = a[1].toLowerCase();
var right = b[1].toLowerCase();
return (left > right) ? -1 : ((left < right) ? 1 : 0); return (left > right) ? -1 : ((left < right) ? 1 : 0);
}
}); });
// combine all results // for debugging
var results = unimportantResults.concat(regularResults) //Search.lastresults = results.slice(); // a copy
.concat(objectResults).concat(importantResults); //console.info('search results:', Search.lastresults);
// print the results // print the results
var resultCount = results.length; var resultCount = results.length;
@ -251,7 +226,7 @@ var Search = {
if (results.length) { if (results.length) {
var item = results.pop(); var item = results.pop();
var listItem = $('<li style="display:none"></li>'); var listItem = $('<li style="display:none"></li>');
if (DOCUMENTATION_OPTIONS.FILE_SUFFIX == '') { if (DOCUMENTATION_OPTIONS.FILE_SUFFIX === '') {
// dirhtml builder // dirhtml builder
var dirname = item[0] + '/'; var dirname = item[0] + '/';
if (dirname.match(/\/index\/$/)) { if (dirname.match(/\/index\/$/)) {
@ -277,8 +252,8 @@ var Search = {
} else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) { } else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
$.get(DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' + $.get(DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' +
item[0] + '.txt', function(data) { item[0] + '.txt', function(data) {
if (data != '') { if (data !== '') {
listItem.append($.makeSearchSummary(data, searchterms, hlterms)); listItem.append(Search.makeSearchSummary(data, searchterms, hlterms));
Search.output.append(listItem); Search.output.append(listItem);
} }
listItem.slideDown(5, function() { listItem.slideDown(5, function() {
@ -307,20 +282,32 @@ var Search = {
displayNextItem(); displayNextItem();
}, },
/**
* search for object names
*/
performObjectSearch : function(object, otherterms) { performObjectSearch : function(object, otherterms) {
var filenames = this._index.filenames; var filenames = this._index.filenames;
var objects = this._index.objects; var objects = this._index.objects;
var objnames = this._index.objnames; var objnames = this._index.objnames;
var titles = this._index.titles; var titles = this._index.titles;
var importantResults = []; var i;
var objectResults = []; var results = [];
var unimportantResults = [];
for (var prefix in objects) { for (var prefix in objects) {
for (var name in objects[prefix]) { for (var name in objects[prefix]) {
var fullname = (prefix ? prefix + '.' : '') + name; var fullname = (prefix ? prefix + '.' : '') + name;
if (fullname.toLowerCase().indexOf(object) > -1) { if (fullname.toLowerCase().indexOf(object) > -1) {
var score = 0;
var parts = fullname.split('.');
// check for different match types: exact matches of full name or
// "last name" (i.e. last dotted part)
if (fullname == object || parts[parts.length - 1] == object) {
score += Scorer.objNameMatch;
// matches in last name
} else if (parts[parts.length - 1].indexOf(object) > -1) {
score += Scorer.objPartialMatch;
}
var match = objects[prefix][name]; var match = objects[prefix][name];
var objname = objnames[match[1]][2]; var objname = objnames[match[1]][2];
var title = titles[match[0]]; var title = titles[match[0]];
@ -330,7 +317,7 @@ var Search = {
var haystack = (prefix + ' ' + name + ' ' + var haystack = (prefix + ' ' + name + ' ' +
objname + ' ' + title).toLowerCase(); objname + ' ' + title).toLowerCase();
var allfound = true; var allfound = true;
for (var i = 0; i < otherterms.length; i++) { for (i = 0; i < otherterms.length; i++) {
if (haystack.indexOf(otherterms[i]) == -1) { if (haystack.indexOf(otherterms[i]) == -1) {
allfound = false; allfound = false;
break; break;
@ -341,37 +328,107 @@ var Search = {
} }
} }
var descr = objname + _(', in ') + title; var descr = objname + _(', in ') + title;
anchor = match[3];
if (anchor == '') var anchor = match[3];
if (anchor === '')
anchor = fullname; anchor = fullname;
else if (anchor == '-') else if (anchor == '-')
anchor = objnames[match[1]][1] + '-' + fullname; anchor = objnames[match[1]][1] + '-' + fullname;
result = [filenames[match[0]], fullname, '#'+anchor, descr]; // add custom score for some objects according to scorer
switch (match[2]) { if (Scorer.objPrio.hasOwnProperty(match[2])) {
case 1: objectResults.push(result); break; score += Scorer.objPrio[match[2]];
case 0: importantResults.push(result); break; } else {
case 2: unimportantResults.push(result); break; score += Scorer.objPrioDefault;
} }
results.push([filenames[match[0]], fullname, '#'+anchor, descr, score]);
} }
} }
} }
// sort results descending return results;
objectResults.sort(function(a, b) { },
return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
/**
* search for full-text terms in the index
*/
performTermsSearch : function(searchterms, excluded, terms, score) {
var filenames = this._index.filenames;
var titles = this._index.titles;
var i, j, file, files;
var fileMap = {};
var results = [];
// perform the search on the required terms
for (i = 0; i < searchterms.length; i++) {
var word = searchterms[i];
// no match but word was a required one
if ((files = terms[word]) === null)
break;
if (files.length === undefined) {
files = [files];
}
// create the mapping
for (j = 0; j < files.length; j++) {
file = files[j];
if (file in fileMap)
fileMap[file].push(word);
else
fileMap[file] = [word];
}
}
// now check if the files don't contain excluded terms
for (file in fileMap) {
var valid = true;
// check if all requirements are matched
if (fileMap[file].length != searchterms.length)
continue;
// ensure that none of the excluded terms is in the search result
for (i = 0; i < excluded.length; i++) {
if (terms[excluded[i]] == file ||
$u.contains(terms[excluded[i]] || [], file)) {
valid = false;
break;
}
}
// if we have still a valid result we can add it to the result list
if (valid) {
results.push([filenames[file], titles[file], '', null, score]);
}
}
return results;
},
/**
* helper function to return a node containing the
* search summary for a given text. keywords is a list
* of stemmed words, hlwords is the list of normal, unstemmed
* words. the first one is used to find the occurance, the
* latter for highlighting it.
*/
makeSearchSummary : function(text, keywords, hlwords) {
var textLower = text.toLowerCase();
var start = 0;
$.each(keywords, function() {
var i = textLower.indexOf(this.toLowerCase());
if (i > -1)
start = i;
}); });
start = Math.max(start - 120, 0);
importantResults.sort(function(a, b) { var excerpt = ((start > 0) ? '...' : '') +
return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0); $.trim(text.substr(start, 240)) +
((start + 240 - text.length) ? '...' : '');
var rv = $('<div class="context"></div>').text(excerpt);
$.each(hlwords, function() {
rv = rv.highlightText(this, 'highlighted');
}); });
return rv;
unimportantResults.sort(function(a, b) {
return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
});
return [importantResults, objectResults, unimportantResults]
}
} }
};
$(document).ready(function() { $(document).ready(function() {
Search.init(); Search.init();

View File

@ -7,7 +7,7 @@
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
#} #}
{% extends "basic/layout.html" %} {%- extends "basic/layout.html" %}
{% if theme_collapsiblesidebar|tobool %} {% if theme_collapsiblesidebar|tobool %}
{% set script_files = script_files + ['_static/sidebar.js'] %} {% set script_files = script_files + ['_static/sidebar.js'] %}

View File

@ -7,7 +7,7 @@
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
#} #}
{% extends "layout.html" %} {%- extends "layout.html" %}
{%- block header %}{% endblock %} {%- block header %}{% endblock %}
{%- block rootrellink %}{% endblock %} {%- block rootrellink %}{% endblock %}
{%- block relbaritems %}{% endblock %} {%- block relbaritems %}{% endblock %}

View File

@ -7,7 +7,7 @@
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
#} #}
{% extends "basic/layout.html" %} {%- extends "basic/layout.html" %}
{# add only basic navigation links #} {# add only basic navigation links #}
{% block sidebar1 %}{% endblock %} {% block sidebar1 %}{% endblock %}

View File

@ -7,7 +7,7 @@
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
#} #}
{% extends "basic/layout.html" %} {%- extends "basic/layout.html" %}
{% set script_files = script_files + ['_static/theme_extras.js'] %} {% set script_files = script_files + ['_static/theme_extras.js'] %}
{% set css_files = css_files + ['_static/print.css'] %} {% set css_files = css_files + ['_static/print.css'] %}

View File

@ -1,4 +1,4 @@
{% extends "basic/layout.html" %} {%- extends "basic/layout.html" %}
{%- block extrahead %} {%- block extrahead %}
<link rel="stylesheet" href="http://fonts.googleapis.com/css?family=Neuton&amp;subset=latin" type="text/css" media="screen" charset="utf-8" /> <link rel="stylesheet" href="http://fonts.googleapis.com/css?family=Neuton&amp;subset=latin" type="text/css" media="screen" charset="utf-8" />

View File

@ -8,7 +8,7 @@
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
#} #}
{% extends "basic/layout.html" %} {%- extends "basic/layout.html" %}
{% set script_files = script_files + ['_static/theme_extras.js'] %} {% set script_files = script_files + ['_static/theme_extras.js'] %}
{% set css_files = css_files + ['_static/print.css'] %} {% set css_files = css_files + ['_static/print.css'] %}
{# do not display relbars #} {# do not display relbars #}

View File

@ -7,7 +7,7 @@
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
#} #}
{% extends "basic/layout.html" %} {%- extends "basic/layout.html" %}
{# put the sidebar before the body #} {# put the sidebar before the body #}
{% block sidebar1 %}{{ sidebar() }}{% endblock %} {% block sidebar1 %}{{ sidebar() }}{% endblock %}

View File

@ -291,6 +291,12 @@ class Tee(object):
self.stream1.write(text) self.stream1.write(text)
self.stream2.write(text) self.stream2.write(text)
def flush(self):
if hasattr(self.stream1, 'flush'):
self.stream1.flush()
if hasattr(self.stream2, 'flush'):
self.stream2.flush()
def parselinenos(spec, total): def parselinenos(spec, total):
"""Parse a line number spec (such as "1,2,4-6") and return a list of """Parse a line number spec (such as "1,2,4-6") and return a list of
@ -354,6 +360,29 @@ def split_into(n, type, value):
return parts return parts
def split_index_msg(type, value):
# new entry types must be listed in directives/other.py!
result = []
try:
if type == 'single':
try:
result = split_into(2, 'single', value)
except ValueError:
result = split_into(1, 'single', value)
elif type == 'pair':
result = split_into(2, 'pair', value)
elif type == 'triple':
result = split_into(3, 'triple', value)
elif type == 'see':
result = split_into(2, 'see', value)
elif type == 'seealso':
result = split_into(2, 'see', value)
except ValueError:
pass
return result
def format_exception_cut_frames(x=1): def format_exception_cut_frames(x=1):
"""Format an exception with traceback, but only the last x frames.""" """Format an exception with traceback, but only the last x frames."""
typ, val, tb = sys.exc_info() typ, val, tb = sys.exc_info()

View File

@ -52,6 +52,13 @@ def extract_messages(doctree):
node.line = definition_list_item.line - 1 node.line = definition_list_item.line - 1
node.rawsource = definition_list_item.\ node.rawsource = definition_list_item.\
rawsource.split("\n", 2)[0] rawsource.split("\n", 2)[0]
# workaround: nodes.caption doesn't have source, line.
# this issue was filed to Docutils tracker:
# https://sourceforge.net/tracker/?func=detail&aid=3599485&group_id=38414&atid=422032
if isinstance(node, nodes.caption) and not node.source:
node.source = node.parent.source
node.line = '' #need fix docutils to get `node.line`
if not node.source: if not node.source:
continue # built-in message continue # built-in message
if isinstance(node, IGNORED_NODES): if isinstance(node, IGNORED_NODES):
@ -67,6 +74,19 @@ def extract_messages(doctree):
yield node, msg yield node, msg
def traverse_translatable_index(doctree):
"""Traverse translatable index node from a document tree."""
def is_block_index(node):
return isinstance(node, addnodes.index) and \
node.get('inline') == False
for node in doctree.traverse(is_block_index):
if 'raw_entries' in node:
entries = node['raw_entries']
else:
entries = node['entries']
yield node, entries
def nested_parse_with_titles(state, content, node): def nested_parse_with_titles(state, content, node):
"""Version of state.nested_parse() that allows titles and does not require """Version of state.nested_parse() that allows titles and does not require
titles to have the same decoration as the calling document. titles to have the same decoration as the calling document.

View File

@ -294,6 +294,11 @@ class TextTranslator(nodes.NodeVisitor):
def visit_label(self, node): def visit_label(self, node):
raise nodes.SkipNode raise nodes.SkipNode
def visit_legend(self, node):
pass
def depart_legend(self, node):
pass
# XXX: option list could use some better styling # XXX: option list could use some better styling
def visit_option_list(self, node): def visit_option_list(self, node):

View File

@ -27,9 +27,7 @@ Contents:
doctest doctest
extensions extensions
versioning/index versioning/index
only
footnote footnote
i18n/index
Python <http://python.org/> Python <http://python.org/>

View File

@ -191,6 +191,10 @@ Figures
My caption of the figure My caption of the figure
My description paragraph of the figure.
Description paragraph is wraped with legend node.
Version markup Version markup
-------------- --------------

View File

@ -0,0 +1,12 @@
#, fuzzy
msgid ""
msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
msgid "File with UTF-8 BOM"
msgstr "Datei mit UTF-8"
msgid "This file has a UTF-8 \"BOM\"."
msgstr "This file has umlauts: äöü."

View File

@ -0,0 +1,5 @@
File with UTF-8 BOM
===================
This file has a UTF-8 "BOM".

View File

@ -0,0 +1,7 @@
# -*- coding: utf-8 -*-
import sys, os
project = 'Sphinx intl <Tests>'
source_suffix = '.txt'
keep_warnings = True

View File

@ -2,9 +2,13 @@
:maxdepth: 2 :maxdepth: 2
:numbered: :numbered:
subdir/contents
bom
footnote footnote
external_links external_links
refs_inconsistency refs_inconsistency
literalblock literalblock
seealso seealso
definition_terms definition_terms
figure_caption
index_entries

View File

@ -0,0 +1,29 @@
# SOME DESCRIPTIVE TITLE.
# Copyright (C) 2012, foof
# This file is distributed under the same license as the foo package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: sphinx 1.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2013-01-04 7:00\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
msgid "i18n with figure caption"
msgstr "I18N WITH FIGURE CAPTION"
msgid "My caption of the figure"
msgstr "MY CAPTION OF THE FIGURE"
msgid "My description paragraph1 of the figure."
msgstr "MY DESCRIPTION PARAGRAPH1 OF THE FIGURE."
msgid "My description paragraph2 of the figure."
msgstr "MY DESCRIPTION PARAGRAPH2 OF THE FIGURE."

View File

@ -0,0 +1,12 @@
:tocdepth: 2
i18n with figure caption
========================
.. figure:: i18n.png
My caption of the figure
My description paragraph1 of the figure.
My description paragraph2 of the figure.

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

View File

@ -0,0 +1,77 @@
# SOME DESCRIPTIVE TITLE.
# Copyright (C) 2013, foo
# This file is distributed under the same license as the foo package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: foo foo\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2013-01-05 18:10\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
msgid "i18n with index entries"
msgstr ""
msgid "index target section"
msgstr ""
msgid "this is :index:`Newsletter` target paragraph."
msgstr "THIS IS :index:`NEWSLETTER` TARGET PARAGRAPH."
msgid "various index entries"
msgstr ""
msgid "That's all."
msgstr ""
msgid "Mailing List"
msgstr "MAILING LIST"
msgid "Newsletter"
msgstr "NEWSLETTER"
msgid "Recipients List"
msgstr "RECIPIENTS LIST"
msgid "First"
msgstr "FIRST"
msgid "Second"
msgstr "SECOND"
msgid "Third"
msgstr "THIRD"
msgid "Entry"
msgstr "ENTRY"
msgid "See"
msgstr "SEE"
msgid "Module"
msgstr "MODULE"
msgid "Keyword"
msgstr "KEYWORD"
msgid "Operator"
msgstr "OPERATOR"
msgid "Object"
msgstr "OBJECT"
msgid "Exception"
msgstr "EXCEPTION"
msgid "Statement"
msgstr "STATEMENT"
msgid "Builtin"
msgstr "BUILTIN"

View File

@ -0,0 +1,31 @@
:tocdepth: 2
i18n with index entries
=======================
.. index::
single: Mailing List
pair: Newsletter; Recipients List
index target section
--------------------
this is :index:`Newsletter` target paragraph.
various index entries
---------------------
.. index::
triple: First; Second; Third
see: Entry; Mailing List
seealso: See; Newsletter
module: Module
keyword: Keyword
operator: Operator
object: Object
exception: Exception
statement: Statement
builtin: Builtin
That's all.

View File

@ -0,0 +1,2 @@
subdir contents
===============

View File

@ -0,0 +1,2 @@
project = 'test-only-directive'

View File

@ -0,0 +1,6 @@
test-only-directive
===================
.. toctree::
only

View File

@ -386,10 +386,10 @@ def test_generate():
assert_warns("import for autodocumenting 'foobar'", assert_warns("import for autodocumenting 'foobar'",
'function', 'foobar', more_content=None) 'function', 'foobar', more_content=None)
# importing # importing
assert_warns("import/find module 'test_foobar'", assert_warns("failed to import module 'test_foobar'",
'module', 'test_foobar', more_content=None) 'module', 'test_foobar', more_content=None)
# attributes missing # attributes missing
assert_warns("import/find function 'util.foobar'", assert_warns("failed to import function 'foobar' from module 'util'",
'function', 'util.foobar', more_content=None) 'function', 'util.foobar', more_content=None)
# test auto and given content mixing # test auto and given content mixing

View File

@ -11,6 +11,7 @@
import gettext import gettext
import os import os
import re
from subprocess import Popen, PIPE from subprocess import Popen, PIPE
from util import * from util import *
@ -19,6 +20,7 @@ from util import SkipTest
def teardown_module(): def teardown_module():
(test_root / '_build').rmtree(True) (test_root / '_build').rmtree(True)
(test_roots / 'test-intl' / '_build').rmtree(True),
@with_app(buildername='gettext') @with_app(buildername='gettext')
@ -87,3 +89,51 @@ def test_gettext(app):
_ = gettext.translation('test_root', app.outdir, languages=['en']).gettext _ = gettext.translation('test_root', app.outdir, languages=['en']).gettext
assert _("Testing various markup") == u"Testing various markup" assert _("Testing various markup") == u"Testing various markup"
@with_app(buildername='gettext',
srcdir=(test_roots / 'test-intl'),
doctreedir=(test_roots / 'test-intl' / '_build' / 'doctree'),
confoverrides={'gettext_compact': False})
def test_gettext_index_entries(app):
# regression test for #976
app.builder.build(['index_entries'])
_msgid_getter = re.compile(r'msgid "(.*)"').search
def msgid_getter(msgid):
m = _msgid_getter(msgid)
if m:
return m.groups()[0]
return None
pot = (app.outdir / 'index_entries.pot').text(encoding='utf-8')
msgids = filter(None, map(msgid_getter, pot.splitlines()))
expected_msgids = [
"i18n with index entries",
"index target section",
"this is :index:`Newsletter` target paragraph.",
"various index entries",
"That's all.",
"Mailing List",
"Newsletter",
"Recipients List",
"First",
"Second",
"Third",
"Entry",
"See",
"Module",
"Keyword",
"Operator",
"Object",
"Exception",
"Statement",
"Builtin",
]
for expect in expected_msgids:
assert expect in msgids
msgids.remove(expect)
# unexpected msgid existent
assert msgids == []

View File

@ -35,7 +35,6 @@ ENV_WARNINGS = """\
%(root)s/autodoc_fodder.py:docstring of autodoc_fodder\\.MarkupError:2: \ %(root)s/autodoc_fodder.py:docstring of autodoc_fodder\\.MarkupError:2: \
WARNING: Explicit markup ends without a blank line; unexpected \ WARNING: Explicit markup ends without a blank line; unexpected \
unindent\\.\\n? unindent\\.\\n?
%(root)s/i18n/literalblock.txt:13: WARNING: Literal block expected; none found.
%(root)s/images.txt:9: WARNING: image file not readable: foo.png %(root)s/images.txt:9: WARNING: image file not readable: foo.png
%(root)s/images.txt:23: WARNING: nonlocal image URI found: \ %(root)s/images.txt:23: WARNING: nonlocal image URI found: \
http://www.python.org/logo.png http://www.python.org/logo.png

View File

@ -22,19 +22,34 @@ from util import SkipTest
warnfile = StringIO() warnfile = StringIO()
root = test_roots / 'test-intl'
doctreedir = root / '_build' / 'doctree'
def with_intl_app(*args, **kw):
default_kw = {
'srcdir': root,
'doctreedir': doctreedir,
'confoverrides': {
'language': 'xx', 'locale_dirs': ['.'],
'gettext_compact': False,
},
}
default_kw.update(kw)
return with_app(*args, **default_kw)
def setup_module(): def setup_module():
# Delete remnants left over after failed build # Delete remnants left over after failed build
(test_root / 'xx').rmtree(True) (root / 'xx').rmtree(True)
(test_root / 'xx' / 'LC_MESSAGES').makedirs() (root / 'xx' / 'LC_MESSAGES').makedirs()
# Compile all required catalogs into binary format (*.mo). # Compile all required catalogs into binary format (*.mo).
for dirpath, dirs, files in os.walk(test_root): for dirpath, dirs, files in os.walk(root):
dirpath = path(dirpath) dirpath = path(dirpath)
for f in [f for f in files if f.endswith('.po')]: for f in [f for f in files if f.endswith('.po')]:
po = dirpath / f po = dirpath / f
mo = test_root / 'xx' / 'LC_MESSAGES' / ( mo = root / 'xx' / 'LC_MESSAGES' / (
relpath(po[:-3], test_root) + '.mo') relpath(po[:-3], root) + '.mo')
if not mo.parent.exists(): if not mo.parent.exists():
mo.parent.makedirs() mo.parent.makedirs()
try: try:
@ -52,12 +67,11 @@ def setup_module():
def teardown_module(): def teardown_module():
(test_root / '_build').rmtree(True) (root / '_build').rmtree(True)
(test_root / 'xx').rmtree(True) (root / 'xx').rmtree(True)
@with_app(buildername='text', @with_intl_app(buildername='text')
confoverrides={'language': 'xx', 'locale_dirs': ['.']})
def test_simple(app): def test_simple(app):
app.builder.build(['bom']) app.builder.build(['bom'])
result = (app.outdir / 'bom.txt').text(encoding='utf-8') result = (app.outdir / 'bom.txt').text(encoding='utf-8')
@ -67,31 +81,26 @@ def test_simple(app):
assert result == expect assert result == expect
@with_app(buildername='text', @with_intl_app(buildername='text')
confoverrides={'language': 'xx', 'locale_dirs': ['.']})
def test_subdir(app): def test_subdir(app):
app.builder.build(['subdir/includes']) app.builder.build(['subdir/contents'])
result = (app.outdir / 'subdir' / 'includes.txt').text(encoding='utf-8') result = (app.outdir / 'subdir' / 'contents.txt').text(encoding='utf-8')
assert result.startswith(u"\ntranslation\n***********\n\n") assert result.startswith(u"\nsubdir contents\n***************\n")
@with_app(buildername='html', cleanenv=True, @with_intl_app(buildername='html', cleanenv=True)
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
'gettext_compact': False})
def test_i18n_footnote_break_refid(app): def test_i18n_footnote_break_refid(app):
"""test for #955 cant-build-html-with-footnotes-when-using""" """test for #955 cant-build-html-with-footnotes-when-using"""
app.builder.build(['i18n/footnote']) app.builder.build(['footnote'])
result = (app.outdir / 'i18n' / 'footnote.html').text(encoding='utf-8') result = (app.outdir / 'footnote.html').text(encoding='utf-8')
# expect no error by build # expect no error by build
@with_app(buildername='text', cleanenv=True, @with_intl_app(buildername='text', cleanenv=True)
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
'gettext_compact': False})
def test_i18n_footnote_regression(app): def test_i18n_footnote_regression(app):
"""regression test for fix #955""" """regression test for fix #955"""
app.builder.build(['i18n/footnote']) app.builder.build(['footnote'])
result = (app.outdir / 'i18n' / 'footnote.txt').text(encoding='utf-8') result = (app.outdir / 'footnote.txt').text(encoding='utf-8')
expect = (u"\nI18N WITH FOOTNOTE" expect = (u"\nI18N WITH FOOTNOTE"
u"\n******************\n" # underline matches new translation u"\n******************\n" # underline matches new translation
u"\nI18N WITH FOOTNOTE INCLUDE THIS CONTENTS [ref] [1] [100]\n" u"\nI18N WITH FOOTNOTE INCLUDE THIS CONTENTS [ref] [1] [100]\n"
@ -101,13 +110,11 @@ def test_i18n_footnote_regression(app):
assert result == expect assert result == expect
@with_app(buildername='html', cleanenv=True, @with_intl_app(buildername='html', cleanenv=True)
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
'gettext_compact': False})
def test_i18n_footnote_backlink(app): def test_i18n_footnote_backlink(app):
"""i18n test for #1058""" """i18n test for #1058"""
app.builder.build(['i18n/footnote']) app.builder.build(['footnote'])
result = (app.outdir / 'i18n' / 'footnote.html').text(encoding='utf-8') result = (app.outdir / 'footnote.html').text(encoding='utf-8')
expects = [ expects = [
'<a class="footnote-reference" href="#id5" id="id1">[100]</a>', '<a class="footnote-reference" href="#id5" id="id1">[100]</a>',
'<a class="footnote-reference" href="#id4" id="id2">[1]</a>', '<a class="footnote-reference" href="#id4" id="id2">[1]</a>',
@ -121,13 +128,11 @@ def test_i18n_footnote_backlink(app):
assert len(matches) == 1 assert len(matches) == 1
@with_app(buildername='text', warning=warnfile, cleanenv=True, @with_intl_app(buildername='text', warning=warnfile, cleanenv=True)
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
'gettext_compact': False})
def test_i18n_warn_for_number_of_references_inconsistency(app): def test_i18n_warn_for_number_of_references_inconsistency(app):
app.builddir.rmtree(True) app.builddir.rmtree(True)
app.builder.build(['i18n/refs_inconsistency']) app.builder.build(['refs_inconsistency'])
result = (app.outdir / 'i18n' / 'refs_inconsistency.txt').text(encoding='utf-8') result = (app.outdir / 'refs_inconsistency.txt').text(encoding='utf-8')
expect = (u"\nI18N WITH REFS INCONSISTENCY" expect = (u"\nI18N WITH REFS INCONSISTENCY"
u"\n****************************\n" u"\n****************************\n"
u"\n* FOR FOOTNOTE [ref2].\n" u"\n* FOR FOOTNOTE [ref2].\n"
@ -139,7 +144,7 @@ def test_i18n_warn_for_number_of_references_inconsistency(app):
assert result == expect assert result == expect
warnings = warnfile.getvalue().replace(os.sep, '/') warnings = warnfile.getvalue().replace(os.sep, '/')
warning_fmt = u'.*/i18n/refs_inconsistency.txt:\\d+: ' \ warning_fmt = u'.*/refs_inconsistency.txt:\\d+: ' \
u'WARNING: inconsistent %s in translated message\n' u'WARNING: inconsistent %s in translated message\n'
expected_warning_expr = ( expected_warning_expr = (
warning_fmt % 'footnote references' + warning_fmt % 'footnote references' +
@ -148,12 +153,10 @@ def test_i18n_warn_for_number_of_references_inconsistency(app):
assert re.search(expected_warning_expr, warnings) assert re.search(expected_warning_expr, warnings)
@with_app(buildername='html', cleanenv=True, @with_intl_app(buildername='html', cleanenv=True)
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
'gettext_compact': False})
def test_i18n_link_to_undefined_reference(app): def test_i18n_link_to_undefined_reference(app):
app.builder.build(['i18n/refs_inconsistency']) app.builder.build(['refs_inconsistency'])
result = (app.outdir / 'i18n' / 'refs_inconsistency.html').text(encoding='utf-8') result = (app.outdir / 'refs_inconsistency.html').text(encoding='utf-8')
expected_expr = """<a class="reference external" href="http://www.example.com">reference</a>""" expected_expr = """<a class="reference external" href="http://www.example.com">reference</a>"""
assert len(re.findall(expected_expr, result)) == 2 assert len(re.findall(expected_expr, result)) == 2
@ -165,13 +168,11 @@ def test_i18n_link_to_undefined_reference(app):
assert len(re.findall(expected_expr, result)) == 1 assert len(re.findall(expected_expr, result)) == 1
@with_app(buildername='html', cleanenv=True, @with_intl_app(buildername='html', cleanenv=True)
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
'gettext_compact': False})
def test_i18n_keep_external_links(app): def test_i18n_keep_external_links(app):
"""regression test for #1044""" """regression test for #1044"""
app.builder.build(['i18n/external_links']) app.builder.build(['external_links'])
result = (app.outdir / 'i18n' / 'external_links.html').text(encoding='utf-8') result = (app.outdir / 'external_links.html').text(encoding='utf-8')
# external link check # external link check
expect_line = u"""<li>EXTERNAL LINK TO <a class="reference external" href="http://python.org">Python</a>.</li>""" expect_line = u"""<li>EXTERNAL LINK TO <a class="reference external" href="http://python.org">Python</a>.</li>"""
@ -206,13 +207,11 @@ def test_i18n_keep_external_links(app):
assert expect_line == matched_line assert expect_line == matched_line
@with_app(buildername='text', warning=warnfile, cleanenv=True, @with_intl_app(buildername='text', warning=warnfile, cleanenv=True)
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
'gettext_compact': False})
def test_i18n_literalblock_warning(app): def test_i18n_literalblock_warning(app):
app.builddir.rmtree(True) #for warnings acceleration app.builddir.rmtree(True) #for warnings acceleration
app.builder.build(['i18n/literalblock']) app.builder.build(['literalblock'])
result = (app.outdir / 'i18n' / 'literalblock.txt').text(encoding='utf-8') result = (app.outdir / 'literalblock.txt').text(encoding='utf-8')
expect = (u"\nI18N WITH LITERAL BLOCK" expect = (u"\nI18N WITH LITERAL BLOCK"
u"\n***********************\n" u"\n***********************\n"
u"\nCORRECT LITERAL BLOCK:\n" u"\nCORRECT LITERAL BLOCK:\n"
@ -223,18 +222,16 @@ def test_i18n_literalblock_warning(app):
assert result.startswith(expect) assert result.startswith(expect)
warnings = warnfile.getvalue().replace(os.sep, '/') warnings = warnfile.getvalue().replace(os.sep, '/')
expected_warning_expr = u'.*/i18n/literalblock.txt:\\d+: ' \ expected_warning_expr = u'.*/literalblock.txt:\\d+: ' \
u'WARNING: Literal block expected; none found.' u'WARNING: Literal block expected; none found.'
assert re.search(expected_warning_expr, warnings) assert re.search(expected_warning_expr, warnings)
@with_app(buildername='text', @with_intl_app(buildername='text')
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
'gettext_compact': False})
def test_i18n_definition_terms(app): def test_i18n_definition_terms(app):
# regression test for #975 # regression test for #975
app.builder.build(['i18n/definition_terms']) app.builder.build(['definition_terms'])
result = (app.outdir / 'i18n' / 'definition_terms.txt').text(encoding='utf-8') result = (app.outdir / 'definition_terms.txt').text(encoding='utf-8')
expect = (u"\nI18N WITH DEFINITION TERMS" expect = (u"\nI18N WITH DEFINITION TERMS"
u"\n**************************\n" u"\n**************************\n"
u"\nSOME TERM" u"\nSOME TERM"
@ -245,12 +242,10 @@ def test_i18n_definition_terms(app):
assert result == expect assert result == expect
@with_app(buildername='text', cleanenv=True, @with_intl_app(buildername='text')
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
'gettext_compact': False})
def test_seealso(app): def test_seealso(app):
app.builder.build(['i18n/seealso']) app.builder.build(['seealso'])
result = (app.outdir / 'i18n' / 'seealso.txt').text(encoding='utf-8') result = (app.outdir / 'seealso.txt').text(encoding='utf-8')
expect = (u"\nI18N WITH SEEALSO" expect = (u"\nI18N WITH SEEALSO"
u"\n*****************\n" u"\n*****************\n"
u"\nSee also: SHORT TEXT 1\n" u"\nSee also: SHORT TEXT 1\n"
@ -259,3 +254,48 @@ def test_seealso(app):
u"\n LONG TEXT 2\n") u"\n LONG TEXT 2\n")
assert result == expect assert result == expect
@with_intl_app(buildername='text')
def test_i18n_figure_caption(app):
# regression test for #940
app.builder.build(['figure_caption'])
result = (app.outdir / 'figure_caption.txt').text(encoding='utf-8')
expect = (u"\nI18N WITH FIGURE CAPTION"
u"\n************************\n"
u"\n [image]MY CAPTION OF THE FIGURE\n"
u"\n MY DESCRIPTION PARAGRAPH1 OF THE FIGURE.\n"
u"\n MY DESCRIPTION PARAGRAPH2 OF THE FIGURE.\n")
assert result == expect
@with_intl_app(buildername='html')
def test_i18n_index_entries(app):
# regression test for #976
app.builder.build(['index_entries'])
result = (app.outdir / 'genindex.html').text(encoding='utf-8')
def wrap(tag, keyword):
start_tag = "<%s[^>]*>" % tag
end_tag = "</%s>" % tag
return r"%s\s*%s\s*%s" % (start_tag, keyword, end_tag)
expected_exprs = [
wrap('a', 'NEWSLETTER'),
wrap('a', 'MAILING LIST'),
wrap('a', 'RECIPIENTS LIST'),
wrap('a', 'FIRST SECOND'),
wrap('a', 'SECOND THIRD'),
wrap('a', 'THIRD, FIRST'),
wrap('dt', 'ENTRY'),
wrap('dt', 'SEE'),
wrap('a', 'MODULE'),
wrap('a', 'KEYWORD'),
wrap('a', 'OPERATOR'),
wrap('a', 'OBJECT'),
wrap('a', 'EXCEPTION'),
wrap('a', 'STATEMENT'),
wrap('a', 'BUILTIN'),
]
for expr in expected_exprs:
assert re.search(expr, result, re.M)

View File

@ -17,10 +17,10 @@ from util import *
def teardown_module(): def teardown_module():
(test_root / '_build').rmtree(True) (test_roots / 'test-only-directive' / '_build').rmtree(True)
@with_app(buildername='text') @with_app(buildername='text', srcdir=(test_roots / 'test-only-directive'))
def test_sectioning(app): def test_sectioning(app):
def getsects(section): def getsects(section):

View File

@ -36,7 +36,7 @@ def test_wordcollector():
doc['file'] = 'dummy' doc['file'] = 'dummy'
parser.parse(FILE_CONTENTS, doc) parser.parse(FILE_CONTENTS, doc)
ix = IndexBuilder(None, 'en', {}) ix = IndexBuilder(None, 'en', {}, None)
ix.feed('filename', 'title', doc) ix.feed('filename', 'title', doc)
assert 'boson' not in ix._mapping assert 'boson' not in ix._mapping
assert 'fermion' in ix._mapping assert 'fermion' in ix._mapping

View File

@ -30,7 +30,7 @@ from nose import tools, SkipTest
__all__ = [ __all__ = [
'test_root', 'raises', 'raises_msg', 'test_root', 'test_roots', 'raises', 'raises_msg',
'skip_if', 'skip_unless', 'skip_unless_importable', 'Struct', 'skip_if', 'skip_unless', 'skip_unless_importable', 'Struct',
'ListOutput', 'TestApp', 'with_app', 'gen_with_app', 'ListOutput', 'TestApp', 'with_app', 'gen_with_app',
'path', 'with_tempdir', 'write_file', 'path', 'with_tempdir', 'write_file',
@ -39,6 +39,7 @@ __all__ = [
test_root = path(__file__).parent.joinpath('root').abspath() test_root = path(__file__).parent.joinpath('root').abspath()
test_roots = path(__file__).parent.joinpath('roots').abspath()
def _excstr(exc): def _excstr(exc):
@ -153,6 +154,8 @@ class TestApp(application.Sphinx):
self.cleanup_trees.insert(0, outdir) self.cleanup_trees.insert(0, outdir)
if doctreedir is None: if doctreedir is None:
doctreedir = srcdir.joinpath(srcdir, self.builddir, 'doctrees') doctreedir = srcdir.joinpath(srcdir, self.builddir, 'doctrees')
if not doctreedir.isdir():
doctreedir.makedirs()
if cleanenv: if cleanenv:
self.cleanup_trees.insert(0, doctreedir) self.cleanup_trees.insert(0, doctreedir)
if confoverrides is None: if confoverrides is None: