mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
merge birkenfeld/sphinx
This commit is contained in:
commit
32b87e258b
1
AUTHORS
1
AUTHORS
@ -13,6 +13,7 @@ Other contributors, listed alphabetically, are:
|
||||
* Charles Duffy -- original graphviz extension
|
||||
* Kevin Dunn -- MathJax extension
|
||||
* Josip Dzolonga -- coverage builder
|
||||
* Hernan Grecco -- search improvements
|
||||
* Horst Gutmann -- internationalization support
|
||||
* Martin Hans -- autodoc improvements
|
||||
* Doug Hellmann -- graphviz improvements
|
||||
|
27
CHANGES
27
CHANGES
@ -6,6 +6,31 @@ Release 1.2 (in development)
|
||||
admonition title ("See Also" instead of "See also"), and spurious indentation
|
||||
in the text builder.
|
||||
|
||||
* sphinx-build now has a verbose option :option:`-v` which can be
|
||||
repeated for greater effect. A single occurrance provides a
|
||||
slightly more verbose output than normal. Two or more occurrences
|
||||
of this option provides more detailed output which may be useful for
|
||||
debugging.
|
||||
|
||||
* sphinx-build now provides more specific error messages when called with
|
||||
invalid options or arguments.
|
||||
|
||||
* sphinx-build now supports the standard :option:`--help` and
|
||||
:option:`--version` options.
|
||||
|
||||
* #869: sphinx-build now has the option :option:`-T` for printing the full
|
||||
traceback after an unhandled exception.
|
||||
|
||||
* #976: Fix gettext does not extract index entries.
|
||||
|
||||
* #940: Fix gettext does not extract figure caption.
|
||||
|
||||
* #1067: Improve the ordering of the JavaScript search results: matches in titles
|
||||
come before matches in full text, and object results are better categorized.
|
||||
Also implement a pluggable search scorer.
|
||||
|
||||
* Fix text writer can not handle visit_legend for figure directive contents.
|
||||
|
||||
* PR#72: #975: Fix gettext does not extract definition terms before docutils 0.10.0
|
||||
|
||||
* PR#25: In inheritance diagrams, the first line of the class docstring
|
||||
@ -67,6 +92,8 @@ Release 1.2 (in development)
|
||||
|
||||
* #1041: Fix cpp domain parser fails to parse a const type with a modifier.
|
||||
|
||||
* #958: Do not preserve ``environment.pickle`` after a failed build.
|
||||
|
||||
* PR#88: Added the "Sphinx Developer's Guide" (:file:`doc/devguide.rst`)
|
||||
which outlines the basic development process of the Sphinx project.
|
||||
|
||||
|
@ -760,6 +760,15 @@ that use Sphinx' HTMLWriter class.
|
||||
|
||||
.. versionadded:: 1.1
|
||||
|
||||
.. confval:: html_search_scorer
|
||||
|
||||
The name of a javascript file (relative to the configuration directory) that
|
||||
implements a search results scorer. If empty, the default will be used.
|
||||
|
||||
.. XXX describe interface for scorer here
|
||||
|
||||
.. versionadded:: 1.2
|
||||
|
||||
.. confval:: htmlhelp_basename
|
||||
|
||||
Output file base name for HTML help builder. Default is ``'pydoc'``.
|
||||
|
@ -391,3 +391,6 @@ are in HTML form), these variables are also available:
|
||||
|
||||
* ``titles_only`` (false by default): if true, put only toplevel document
|
||||
titles in the tree
|
||||
|
||||
* ``includehidden`` (false by default): if true, the TOC tree will also
|
||||
contain hidden entries.
|
||||
|
@ -11,6 +11,7 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import types
|
||||
import posixpath
|
||||
@ -60,7 +61,8 @@ class Sphinx(object):
|
||||
|
||||
def __init__(self, srcdir, confdir, outdir, doctreedir, buildername,
|
||||
confoverrides=None, status=sys.stdout, warning=sys.stderr,
|
||||
freshenv=False, warningiserror=False, tags=None):
|
||||
freshenv=False, warningiserror=False, tags=None, verbosity=0):
|
||||
self.verbosity = verbosity
|
||||
self.next_listener_id = 0
|
||||
self._extensions = {}
|
||||
self._listeners = {}
|
||||
@ -203,12 +205,27 @@ class Sphinx(object):
|
||||
else:
|
||||
self.builder.build_update()
|
||||
except Exception, err:
|
||||
# delete the saved env to force a fresh build next time
|
||||
envfile = path.join(self.doctreedir, ENV_PICKLE_FILENAME)
|
||||
if path.isfile(envfile):
|
||||
os.unlink(envfile)
|
||||
self.emit('build-finished', err)
|
||||
raise
|
||||
else:
|
||||
self.emit('build-finished', None)
|
||||
self.builder.cleanup()
|
||||
|
||||
def _log(self, message, wfile, nonl=False):
|
||||
try:
|
||||
wfile.write(message)
|
||||
except UnicodeEncodeError:
|
||||
encoding = getattr(wfile, 'encoding', 'ascii') or 'ascii'
|
||||
wfile.write(message.encode(encoding, 'replace'))
|
||||
if not nonl:
|
||||
wfile.write('\n')
|
||||
if hasattr(wfile, 'flush'):
|
||||
wfile.flush()
|
||||
|
||||
def warn(self, message, location=None, prefix='WARNING: '):
|
||||
if isinstance(location, tuple):
|
||||
docname, lineno = location
|
||||
@ -221,26 +238,30 @@ class Sphinx(object):
|
||||
if self.warningiserror:
|
||||
raise SphinxWarning(warntext)
|
||||
self._warncount += 1
|
||||
try:
|
||||
self._warning.write(warntext)
|
||||
except UnicodeEncodeError:
|
||||
encoding = getattr(self._warning, 'encoding', 'ascii') or 'ascii'
|
||||
self._warning.write(warntext.encode(encoding, 'replace'))
|
||||
self._log(warntext, self._warning, True)
|
||||
|
||||
def info(self, message='', nonl=False):
|
||||
try:
|
||||
self._status.write(message)
|
||||
except UnicodeEncodeError:
|
||||
encoding = getattr(self._status, 'encoding', 'ascii') or 'ascii'
|
||||
self._status.write(message.encode(encoding, 'replace'))
|
||||
if not nonl:
|
||||
self._status.write('\n')
|
||||
self._status.flush()
|
||||
self._log(message, self._status, nonl)
|
||||
|
||||
def verbose(self, message, *args, **kwargs):
|
||||
if self.verbosity < 1:
|
||||
return
|
||||
if args or kwargs:
|
||||
message = message % (args or kwargs)
|
||||
self._log(message, self._warning)
|
||||
|
||||
def debug(self, message, *args, **kwargs):
|
||||
if self.verbosity < 2:
|
||||
return
|
||||
if args or kwargs:
|
||||
message = message % (args or kwargs)
|
||||
self._log(message, self._warning)
|
||||
|
||||
# general extensibility interface
|
||||
|
||||
def setup_extension(self, extension):
|
||||
"""Import and setup a Sphinx extension module. No-op if called twice."""
|
||||
self.debug('setting up extension: %r', extension)
|
||||
if extension in self._extensions:
|
||||
return
|
||||
try:
|
||||
@ -301,9 +322,12 @@ class Sphinx(object):
|
||||
else:
|
||||
self._listeners[event][listener_id] = callback
|
||||
self.next_listener_id += 1
|
||||
self.debug('connecting event %r: %r [id=%s]',
|
||||
event, callback, listener_id)
|
||||
return listener_id
|
||||
|
||||
def disconnect(self, listener_id):
|
||||
self.debug('disconnecting event: [id=%s]', listener_id)
|
||||
for event in self._listeners.itervalues():
|
||||
event.pop(listener_id, None)
|
||||
|
||||
@ -323,6 +347,7 @@ class Sphinx(object):
|
||||
# registering addon parts
|
||||
|
||||
def add_builder(self, builder):
|
||||
self.debug('adding builder: %r', builder)
|
||||
if not hasattr(builder, 'name'):
|
||||
raise ExtensionError('Builder class %s has no "name" attribute'
|
||||
% builder)
|
||||
@ -337,6 +362,7 @@ class Sphinx(object):
|
||||
self.builderclasses[builder.name] = builder
|
||||
|
||||
def add_config_value(self, name, default, rebuild):
|
||||
self.debug('adding config value: %r', (name, default, rebuild))
|
||||
if name in self.config.values:
|
||||
raise ExtensionError('Config value %r already present' % name)
|
||||
if rebuild in (False, True):
|
||||
@ -344,11 +370,13 @@ class Sphinx(object):
|
||||
self.config.values[name] = (default, rebuild)
|
||||
|
||||
def add_event(self, name):
|
||||
self.debug('adding event: %r', name)
|
||||
if name in self._events:
|
||||
raise ExtensionError('Event %r already present' % name)
|
||||
self._events[name] = ''
|
||||
|
||||
def add_node(self, node, **kwds):
|
||||
self.debug('adding node: %r', (node, kwds))
|
||||
nodes._add_node_class_names([node.__name__])
|
||||
for key, val in kwds.iteritems():
|
||||
try:
|
||||
@ -388,24 +416,30 @@ class Sphinx(object):
|
||||
return obj
|
||||
|
||||
def add_directive(self, name, obj, content=None, arguments=None, **options):
|
||||
self.debug('adding directive: %r',
|
||||
(name, obj, content, arguments, options))
|
||||
directives.register_directive(
|
||||
name, self._directive_helper(obj, content, arguments, **options))
|
||||
|
||||
def add_role(self, name, role):
|
||||
self.debug('adding role: %r', (name, role))
|
||||
roles.register_local_role(name, role)
|
||||
|
||||
def add_generic_role(self, name, nodeclass):
|
||||
# don't use roles.register_generic_role because it uses
|
||||
# register_canonical_role
|
||||
self.debug('adding generic role: %r', (name, nodeclass))
|
||||
role = roles.GenericRole(name, nodeclass)
|
||||
roles.register_local_role(name, role)
|
||||
|
||||
def add_domain(self, domain):
|
||||
self.debug('adding domain: %r', domain)
|
||||
if domain.name in self.domains:
|
||||
raise ExtensionError('domain %s already registered' % domain.name)
|
||||
self.domains[domain.name] = domain
|
||||
|
||||
def override_domain(self, domain):
|
||||
self.debug('overriding domain: %r', domain)
|
||||
if domain.name not in self.domains:
|
||||
raise ExtensionError('domain %s not yet registered' % domain.name)
|
||||
if not issubclass(domain, self.domains[domain.name]):
|
||||
@ -415,17 +449,21 @@ class Sphinx(object):
|
||||
|
||||
def add_directive_to_domain(self, domain, name, obj,
|
||||
content=None, arguments=None, **options):
|
||||
self.debug('adding directive to domain: %r',
|
||||
(domain, name, obj, content, arguments, options))
|
||||
if domain not in self.domains:
|
||||
raise ExtensionError('domain %s not yet registered' % domain)
|
||||
self.domains[domain].directives[name] = \
|
||||
self._directive_helper(obj, content, arguments, **options)
|
||||
|
||||
def add_role_to_domain(self, domain, name, role):
|
||||
self.debug('adding role to domain: %r', (domain, name, role))
|
||||
if domain not in self.domains:
|
||||
raise ExtensionError('domain %s not yet registered' % domain)
|
||||
self.domains[domain].roles[name] = role
|
||||
|
||||
def add_index_to_domain(self, domain, index):
|
||||
self.debug('adding index to domain: %r', (domain, index))
|
||||
if domain not in self.domains:
|
||||
raise ExtensionError('domain %s not yet registered' % domain)
|
||||
self.domains[domain].indices.append(index)
|
||||
@ -433,6 +471,9 @@ class Sphinx(object):
|
||||
def add_object_type(self, directivename, rolename, indextemplate='',
|
||||
parse_node=None, ref_nodeclass=None, objname='',
|
||||
doc_field_types=[]):
|
||||
self.debug('adding object type: %r',
|
||||
(directivename, rolename, indextemplate, parse_node,
|
||||
ref_nodeclass, objname, doc_field_types))
|
||||
StandardDomain.object_types[directivename] = \
|
||||
ObjType(objname or directivename, rolename)
|
||||
# create a subclass of GenericObject as the new directive
|
||||
@ -449,6 +490,9 @@ class Sphinx(object):
|
||||
|
||||
def add_crossref_type(self, directivename, rolename, indextemplate='',
|
||||
ref_nodeclass=None, objname=''):
|
||||
self.debug('adding crossref type: %r',
|
||||
(directivename, rolename, indextemplate, ref_nodeclass,
|
||||
objname))
|
||||
StandardDomain.object_types[directivename] = \
|
||||
ObjType(objname or directivename, rolename)
|
||||
# create a subclass of Target as the new directive
|
||||
@ -459,9 +503,11 @@ class Sphinx(object):
|
||||
StandardDomain.roles[rolename] = XRefRole(innernodeclass=ref_nodeclass)
|
||||
|
||||
def add_transform(self, transform):
|
||||
self.debug('adding transform: %r', transform)
|
||||
SphinxStandaloneReader.transforms.append(transform)
|
||||
|
||||
def add_javascript(self, filename):
|
||||
self.debug('adding javascript: %r', filename)
|
||||
from sphinx.builders.html import StandaloneHTMLBuilder
|
||||
if '://' in filename:
|
||||
StandaloneHTMLBuilder.script_files.append(filename)
|
||||
@ -470,6 +516,7 @@ class Sphinx(object):
|
||||
posixpath.join('_static', filename))
|
||||
|
||||
def add_stylesheet(self, filename):
|
||||
self.debug('adding stylesheet: %r', filename)
|
||||
from sphinx.builders.html import StandaloneHTMLBuilder
|
||||
if '://' in filename:
|
||||
StandaloneHTMLBuilder.css_files.append(filename)
|
||||
@ -478,21 +525,25 @@ class Sphinx(object):
|
||||
posixpath.join('_static', filename))
|
||||
|
||||
def add_lexer(self, alias, lexer):
|
||||
self.debug('adding lexer: %r', (alias, lexer))
|
||||
from sphinx.highlighting import lexers
|
||||
if lexers is None:
|
||||
return
|
||||
lexers[alias] = lexer
|
||||
|
||||
def add_autodocumenter(self, cls):
|
||||
self.debug('adding autodocumenter: %r', cls)
|
||||
from sphinx.ext import autodoc
|
||||
autodoc.add_documenter(cls)
|
||||
self.add_directive('auto' + cls.objtype, autodoc.AutoDirective)
|
||||
|
||||
def add_autodoc_attrgetter(self, type, getter):
|
||||
self.debug('adding autodoc attrgetter: %r', (type, getter))
|
||||
from sphinx.ext import autodoc
|
||||
autodoc.AutoDirective._special_attrgetters[type] = getter
|
||||
|
||||
def add_search_language(self, cls):
|
||||
self.debug('adding search language: %r', cls)
|
||||
from sphinx.search import languages, SearchLanguage
|
||||
assert isinstance(cls, SearchLanguage)
|
||||
languages[cls.lang] = cls
|
||||
|
@ -119,9 +119,13 @@ class Builder(object):
|
||||
summary = bold(summary)
|
||||
for item in iterable:
|
||||
l += 1
|
||||
self.info(term_width_line('%s[%3d%%] %s' %
|
||||
(summary, 100*l/length,
|
||||
colorfunc(item))), nonl=1)
|
||||
s = '%s[%3d%%] %s' % (summary, 100*l/length,
|
||||
colorfunc(item))
|
||||
if self.app.verbosity:
|
||||
s += '\n'
|
||||
else:
|
||||
s = term_width_line(s)
|
||||
self.info(s, nonl=1)
|
||||
yield item
|
||||
if l > 0:
|
||||
self.info()
|
||||
|
@ -15,9 +15,11 @@ from datetime import datetime
|
||||
from collections import defaultdict
|
||||
|
||||
from sphinx.builders import Builder
|
||||
from sphinx.util.nodes import extract_messages
|
||||
from sphinx.util import split_index_msg
|
||||
from sphinx.util.nodes import extract_messages, traverse_translatable_index
|
||||
from sphinx.util.osutil import SEP, safe_relpath, ensuredir, find_catalog
|
||||
from sphinx.util.console import darkgreen
|
||||
from sphinx.locale import pairindextypes
|
||||
|
||||
POHEADER = ur"""
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
@ -82,6 +84,16 @@ class I18nBuilder(Builder):
|
||||
for node, msg in extract_messages(doctree):
|
||||
catalog.add(msg, node)
|
||||
|
||||
# Extract translatable messages from index entries.
|
||||
for node, entries in traverse_translatable_index(doctree):
|
||||
for typ, msg, tid, main in entries:
|
||||
for m in split_index_msg(typ, msg):
|
||||
if typ == 'pair' and m in pairindextypes.values():
|
||||
# avoid built-in translated message was incorporated
|
||||
# in 'sphinx.util.nodes.process_index_entry'
|
||||
continue
|
||||
catalog.add(m, node)
|
||||
|
||||
|
||||
class MessageCatalogBuilder(I18nBuilder):
|
||||
"""
|
||||
|
@ -240,7 +240,8 @@ class StandaloneHTMLBuilder(Builder):
|
||||
if not lang or lang not in languages:
|
||||
lang = 'en'
|
||||
self.indexer = IndexBuilder(self.env, lang,
|
||||
self.config.html_search_options)
|
||||
self.config.html_search_options,
|
||||
self.config.html_search_scorer)
|
||||
self.load_indexer(docnames)
|
||||
|
||||
self.docwriter = HTMLWriter(self)
|
||||
@ -653,6 +654,8 @@ class StandaloneHTMLBuilder(Builder):
|
||||
self.indexer.feed(pagename, title, doctree)
|
||||
|
||||
def _get_local_toctree(self, docname, collapse=True, **kwds):
|
||||
if 'includehidden' not in kwds:
|
||||
kwds['includehidden'] = False
|
||||
return self.render_partial(self.env.get_toctree_for(
|
||||
docname, self, collapse, **kwds))['fragment']
|
||||
|
||||
|
@ -59,6 +59,10 @@ new and changed files
|
||||
-w <file> -- write warnings (and errors) to given file
|
||||
-W -- turn warnings into errors
|
||||
-P -- run Pdb on exception
|
||||
-T -- show full traceback on exception
|
||||
-v -- increase verbosity (can be repeated)
|
||||
--help -- show this help and exit
|
||||
--version -- show version information and exit
|
||||
Modi:
|
||||
* without -a and without filenames, write new and changed files.
|
||||
* with -a, write all files.
|
||||
@ -71,8 +75,15 @@ def main(argv):
|
||||
nocolor()
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt(argv[1:], 'ab:t:d:c:CD:A:ng:NEqQWw:P')
|
||||
opts, args = getopt.getopt(argv[1:], 'ab:t:d:c:CD:A:ng:NEqQWw:PThv',
|
||||
['help', 'version'])
|
||||
allopts = set(opt[0] for opt in opts)
|
||||
if '-h' in allopts or '--help' in allopts:
|
||||
usage(argv)
|
||||
return 0
|
||||
if '--version' in allopts:
|
||||
print 'Sphinx (sphinx-build) %s' % __version__
|
||||
return 0
|
||||
srcdir = confdir = abspath(args[0])
|
||||
if not path.isdir(srcdir):
|
||||
print >>sys.stderr, 'Error: Cannot find source directory `%s\'.' % (
|
||||
@ -87,15 +98,18 @@ def main(argv):
|
||||
if not path.isdir(outdir):
|
||||
print >>sys.stderr, 'Making output directory...'
|
||||
os.makedirs(outdir)
|
||||
except (IndexError, getopt.error):
|
||||
usage(argv)
|
||||
except getopt.error, err:
|
||||
usage(argv, 'Error: %s' % err)
|
||||
return 1
|
||||
except IndexError:
|
||||
usage(argv, 'Error: Insufficient arguments.')
|
||||
return 1
|
||||
|
||||
filenames = args[2:]
|
||||
err = 0
|
||||
for filename in filenames:
|
||||
if not path.isfile(filename):
|
||||
print >>sys.stderr, 'Cannot find file %r.' % filename
|
||||
print >>sys.stderr, 'Error: Cannot find file %r.' % filename
|
||||
err = 1
|
||||
if err:
|
||||
return 1
|
||||
@ -109,6 +123,8 @@ def main(argv):
|
||||
|
||||
buildername = None
|
||||
force_all = freshenv = warningiserror = use_pdb = False
|
||||
show_traceback = False
|
||||
verbosity = 0
|
||||
status = sys.stdout
|
||||
warning = sys.stderr
|
||||
error = sys.stderr
|
||||
@ -121,7 +137,7 @@ def main(argv):
|
||||
buildername = val
|
||||
elif opt == '-a':
|
||||
if filenames:
|
||||
usage(argv, 'Cannot combine -a option and filenames.')
|
||||
usage(argv, 'Error: Cannot combine -a option and filenames.')
|
||||
return 1
|
||||
force_all = True
|
||||
elif opt == '-t':
|
||||
@ -185,6 +201,11 @@ def main(argv):
|
||||
warnfile = val
|
||||
elif opt == '-P':
|
||||
use_pdb = True
|
||||
elif opt == '-T':
|
||||
show_traceback = True
|
||||
elif opt == '-v':
|
||||
verbosity += 1
|
||||
show_traceback = True
|
||||
|
||||
if warning and warnfile:
|
||||
warnfp = open(warnfile, 'w')
|
||||
@ -194,17 +215,10 @@ def main(argv):
|
||||
try:
|
||||
app = Sphinx(srcdir, confdir, outdir, doctreedir, buildername,
|
||||
confoverrides, status, warning, freshenv,
|
||||
warningiserror, tags)
|
||||
warningiserror, tags, verbosity)
|
||||
app.build(force_all, filenames)
|
||||
return app.statuscode
|
||||
except KeyboardInterrupt:
|
||||
if use_pdb:
|
||||
import pdb
|
||||
print >>error, red('Interrupted while building, starting debugger:')
|
||||
traceback.print_exc()
|
||||
pdb.post_mortem(sys.exc_info()[2])
|
||||
return 1
|
||||
except Exception, err:
|
||||
except (Exception, KeyboardInterrupt), err:
|
||||
if use_pdb:
|
||||
import pdb
|
||||
print >>error, red('Exception occurred while building, '
|
||||
@ -213,7 +227,12 @@ def main(argv):
|
||||
pdb.post_mortem(sys.exc_info()[2])
|
||||
else:
|
||||
print >>error
|
||||
if isinstance(err, SystemMessage):
|
||||
if show_traceback:
|
||||
traceback.print_exc(None, error)
|
||||
print >>error
|
||||
if isinstance(err, KeyboardInterrupt):
|
||||
print >>error, 'interrupted!'
|
||||
elif isinstance(err, SystemMessage):
|
||||
print >>error, red('reST markup error:')
|
||||
print >>error, terminal_safe(err.args[0])
|
||||
elif isinstance(err, SphinxError):
|
||||
|
@ -110,6 +110,7 @@ class Config(object):
|
||||
html_secnumber_suffix = ('. ', 'html'),
|
||||
html_search_language = (None, 'html'),
|
||||
html_search_options = ({}, 'html'),
|
||||
html_search_scorer = ('', None),
|
||||
|
||||
# HTML help only options
|
||||
htmlhelp_basename = (lambda self: make_filename(self.project), None),
|
||||
|
@ -169,6 +169,7 @@ class Index(Directive):
|
||||
indexnode = addnodes.index()
|
||||
indexnode['entries'] = ne = []
|
||||
indexnode['inline'] = False
|
||||
set_source_info(self, indexnode)
|
||||
for entry in arguments:
|
||||
ne.extend(process_index_entry(entry, targetid))
|
||||
return [indexnode, targetnode]
|
||||
|
@ -38,9 +38,9 @@ from docutils.transforms.parts import ContentsFilter
|
||||
|
||||
from sphinx import addnodes
|
||||
from sphinx.util import url_re, get_matching_docs, docname_join, split_into, \
|
||||
FilenameUniqDict
|
||||
split_index_msg, FilenameUniqDict
|
||||
from sphinx.util.nodes import clean_astext, make_refnode, extract_messages, \
|
||||
WarningStream
|
||||
traverse_translatable_index, WarningStream
|
||||
from sphinx.util.osutil import movefile, SEP, ustrftime, find_catalog, \
|
||||
fs_encoding
|
||||
from sphinx.util.matching import compile_matchers
|
||||
@ -71,7 +71,7 @@ default_settings = {
|
||||
|
||||
# This is increased every time an environment attribute is added
|
||||
# or changed to properly invalidate pickle files.
|
||||
ENV_VERSION = 41
|
||||
ENV_VERSION = 42
|
||||
|
||||
|
||||
default_substitutions = set([
|
||||
@ -303,6 +303,23 @@ class Locale(Transform):
|
||||
child.parent = node
|
||||
node.children = patch.children
|
||||
|
||||
# Extract and translate messages for index entries.
|
||||
for node, entries in traverse_translatable_index(self.document):
|
||||
new_entries = []
|
||||
for type, msg, tid, main in entries:
|
||||
msg_parts = split_index_msg(type, msg)
|
||||
msgstr_parts = []
|
||||
for part in msg_parts:
|
||||
msgstr = catalog.gettext(part)
|
||||
if not msgstr:
|
||||
msgstr = part
|
||||
msgstr_parts.append(msgstr)
|
||||
|
||||
new_entries.append((type, ';'.join(msgstr_parts), tid, main))
|
||||
|
||||
node['raw_entries'] = entries
|
||||
node['entries'] = new_entries
|
||||
|
||||
|
||||
class SphinxStandaloneReader(standalone.Reader):
|
||||
"""
|
||||
@ -365,9 +382,7 @@ class BuildEnvironment:
|
||||
del self.config.values
|
||||
domains = self.domains
|
||||
del self.domains
|
||||
# first write to a temporary file, so that if dumping fails,
|
||||
# the existing environment won't be overwritten
|
||||
picklefile = open(filename + '.tmp', 'wb')
|
||||
picklefile = open(filename, 'wb')
|
||||
# remove potentially pickling-problematic values from config
|
||||
for key, val in vars(self.config).items():
|
||||
if key.startswith('_') or \
|
||||
@ -379,7 +394,6 @@ class BuildEnvironment:
|
||||
pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL)
|
||||
finally:
|
||||
picklefile.close()
|
||||
movefile(filename + '.tmp', filename)
|
||||
# reset attributes
|
||||
self.domains = domains
|
||||
self.config.values = values
|
||||
@ -954,6 +968,7 @@ class BuildEnvironment:
|
||||
filterlevel = self.config.keep_warnings and 2 or 5
|
||||
for node in doctree.traverse(nodes.system_message):
|
||||
if node['level'] < filterlevel:
|
||||
self.app.debug('%s [filtered system message]', node.astext())
|
||||
node.parent.remove(node)
|
||||
|
||||
|
||||
@ -1340,46 +1355,56 @@ class BuildEnvironment:
|
||||
if toctree.get('hidden', False) and not includehidden:
|
||||
return None
|
||||
|
||||
def _walk_depth(node, depth, maxdepth):
|
||||
# For reading the following two helper function, it is useful to keep
|
||||
# in mind the node structure of a toctree (using HTML-like node names
|
||||
# for brevity):
|
||||
#
|
||||
# <ul>
|
||||
# <li>
|
||||
# <p><a></p>
|
||||
# <p><a></p>
|
||||
# ...
|
||||
# <ul>
|
||||
# ...
|
||||
# </ul>
|
||||
# </li>
|
||||
# </ul>
|
||||
#
|
||||
# The transformation is made in two passes in order to avoid
|
||||
# interactions between marking and pruning the tree (see bug #1046).
|
||||
|
||||
def _toctree_prune(node, depth, maxdepth):
|
||||
"""Utility: Cut a TOC at a specified depth."""
|
||||
|
||||
# For reading this function, it is useful to keep in mind the node
|
||||
# structure of a toctree (using HTML-like node names for brevity):
|
||||
#
|
||||
# <ul>
|
||||
# <li>
|
||||
# <p><a></p>
|
||||
# <p><a></p>
|
||||
# ...
|
||||
# <ul>
|
||||
# ...
|
||||
# </ul>
|
||||
# </li>
|
||||
# </ul>
|
||||
|
||||
for subnode in node.children[:]:
|
||||
if isinstance(subnode, (addnodes.compact_paragraph,
|
||||
nodes.list_item)):
|
||||
# for <p> and <li>, just indicate the depth level and
|
||||
# recurse to children
|
||||
subnode['classes'].append('toctree-l%d' % (depth-1))
|
||||
_walk_depth(subnode, depth, maxdepth)
|
||||
|
||||
# for <p> and <li>, just recurse
|
||||
_toctree_prune(subnode, depth, maxdepth)
|
||||
elif isinstance(subnode, nodes.bullet_list):
|
||||
# for <ul>, determine if the depth is too large or if the
|
||||
# entry is to be collapsed
|
||||
if maxdepth > 0 and depth > maxdepth:
|
||||
subnode.parent.replace(subnode, [])
|
||||
else:
|
||||
# to find out what to collapse, *first* walk subitems,
|
||||
# since that determines which children point to the
|
||||
# current page
|
||||
_walk_depth(subnode, depth+1, maxdepth)
|
||||
# cull sub-entries whose parents aren't 'current'
|
||||
if (collapse and depth > 1 and
|
||||
'iscurrent' not in subnode.parent):
|
||||
subnode.parent.remove(subnode)
|
||||
else:
|
||||
# recurse on visible children
|
||||
_toctree_prune(subnode, depth+1, maxdepth)
|
||||
|
||||
def _toctree_add_classes(node, depth):
|
||||
"""Add 'toctree-l%d' and 'current' classes to the toctree."""
|
||||
for subnode in node.children:
|
||||
if isinstance(subnode, (addnodes.compact_paragraph,
|
||||
nodes.list_item)):
|
||||
# for <p> and <li>, indicate the depth level and recurse
|
||||
subnode['classes'].append('toctree-l%d' % (depth-1))
|
||||
_toctree_add_classes(subnode, depth)
|
||||
elif isinstance(subnode, nodes.bullet_list):
|
||||
# for <ul>, just recurse
|
||||
_toctree_add_classes(subnode, depth+1)
|
||||
elif isinstance(subnode, nodes.reference):
|
||||
# for <a>, identify which entries point to the current
|
||||
# document and therefore may not be collapsed
|
||||
@ -1500,8 +1525,9 @@ class BuildEnvironment:
|
||||
newnode = addnodes.compact_paragraph('', '', *tocentries)
|
||||
newnode['toctree'] = True
|
||||
|
||||
# prune the tree to maxdepth and replace titles, also set level classes
|
||||
_walk_depth(newnode, 1, prune and maxdepth or 0)
|
||||
# prune the tree to maxdepth, also set toc depth and current classes
|
||||
_toctree_add_classes(newnode, 1)
|
||||
_toctree_prune(newnode, 1, prune and maxdepth or 0)
|
||||
|
||||
# set the target paths in the toctrees (they are not known at TOC
|
||||
# generation time)
|
||||
|
@ -317,13 +317,20 @@ class Documenter(object):
|
||||
|
||||
Returns True if successful, False if an error occurred.
|
||||
"""
|
||||
if self.objpath:
|
||||
self.env.app.debug('autodoc: from %s import %s',
|
||||
self.modname, '.'.join(self.objpath))
|
||||
try:
|
||||
self.env.app.debug('autodoc: import %s', self.modname)
|
||||
__import__(self.modname)
|
||||
parent = None
|
||||
obj = self.module = sys.modules[self.modname]
|
||||
self.env.app.debug('autodoc: => %r', obj)
|
||||
for part in self.objpath:
|
||||
parent = obj
|
||||
self.env.app.debug('autodoc: getattr(_, %r)', part)
|
||||
obj = self.get_attr(obj, part)
|
||||
self.env.app.debug('autodoc: => %r', obj)
|
||||
self.object_name = part
|
||||
self.parent = parent
|
||||
self.object = obj
|
||||
@ -331,12 +338,16 @@ class Documenter(object):
|
||||
# this used to only catch SyntaxError, ImportError and AttributeError,
|
||||
# but importing modules with side effects can raise all kinds of errors
|
||||
except Exception, err:
|
||||
if self.env.app and not self.env.app.quiet:
|
||||
self.env.app.info(traceback.format_exc().rstrip())
|
||||
self.directive.warn(
|
||||
'autodoc can\'t import/find %s %r, it reported error: '
|
||||
'"%s", please check your spelling and sys.path' %
|
||||
(self.objtype, str(self.fullname), err))
|
||||
if self.objpath:
|
||||
errmsg = 'autodoc: failed to import %s %r from module %r' % \
|
||||
(self.objtype, '.'.join(self.objpath), self.modname)
|
||||
else:
|
||||
errmsg = 'autodoc: failed to import %s %r' % \
|
||||
(self.objtype, self.fullname)
|
||||
errmsg += '; the following exception was raised:\n%s' % \
|
||||
traceback.format_exc()
|
||||
self.env.app.debug(errmsg)
|
||||
self.directive.warn(errmsg)
|
||||
self.env.note_reread()
|
||||
return False
|
||||
|
||||
@ -1294,6 +1305,13 @@ class AutoDirective(Directive):
|
||||
self.warnings = []
|
||||
self.result = ViewList()
|
||||
|
||||
try:
|
||||
source, lineno = self.reporter.get_source_and_line(self.lineno)
|
||||
except AttributeError:
|
||||
source = lineno = None
|
||||
self.env.app.debug('%s:%s: <input>\n%s',
|
||||
source, lineno, self.block_text)
|
||||
|
||||
# find out what documenter to call
|
||||
objtype = self.name[4:]
|
||||
doc_class = self._registry[objtype]
|
||||
@ -1314,6 +1332,9 @@ class AutoDirective(Directive):
|
||||
if not self.result:
|
||||
return self.warnings
|
||||
|
||||
if self.env.app.verbosity >= 2:
|
||||
self.env.app.debug('autodoc: <output>\n%s', '\n'.join(self.result))
|
||||
|
||||
# record all filenames as dependencies -- this will at least
|
||||
# partially make automatic invalidation possible
|
||||
for fn in self.filename_set:
|
||||
|
@ -293,6 +293,7 @@ def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
|
||||
entries = [('single', target, targetid, main)]
|
||||
indexnode = addnodes.index()
|
||||
indexnode['entries'] = entries
|
||||
set_role_source_info(inliner, lineno, indexnode)
|
||||
textnode = nodes.Text(title, title)
|
||||
return [indexnode, targetnode, textnode], []
|
||||
|
||||
|
@ -8,10 +8,12 @@
|
||||
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
from __future__ import with_statement
|
||||
import re
|
||||
import itertools
|
||||
import cPickle as pickle
|
||||
|
||||
from docutils.nodes import comment, Text, NodeVisitor, SkipNode
|
||||
from docutils.nodes import comment, title, Text, NodeVisitor, SkipNode
|
||||
|
||||
from sphinx.util import jsdump, rpartition
|
||||
|
||||
@ -92,6 +94,7 @@ var Stemmer = function() {
|
||||
(ord(word[0]) < 256 and (len(word) < 3 or word in self.stopwords or
|
||||
word.isdigit())))
|
||||
|
||||
|
||||
from sphinx.search import en, ja
|
||||
|
||||
languages = {
|
||||
@ -137,13 +140,16 @@ class WordCollector(NodeVisitor):
|
||||
def __init__(self, document, lang):
|
||||
NodeVisitor.__init__(self, document)
|
||||
self.found_words = []
|
||||
self.found_title_words = []
|
||||
self.lang = lang
|
||||
|
||||
def dispatch_visit(self, node):
|
||||
if node.__class__ is comment:
|
||||
raise SkipNode
|
||||
if node.__class__ is Text:
|
||||
elif node.__class__ is Text:
|
||||
self.found_words.extend(self.lang.split(node.astext()))
|
||||
elif node.__class__ is title:
|
||||
self.found_title_words.extend(self.lang.split(node.astext()))
|
||||
|
||||
|
||||
class IndexBuilder(object):
|
||||
@ -156,12 +162,14 @@ class IndexBuilder(object):
|
||||
'pickle': pickle
|
||||
}
|
||||
|
||||
def __init__(self, env, lang, options):
|
||||
def __init__(self, env, lang, options, scoring):
|
||||
self.env = env
|
||||
# filename -> title
|
||||
self._titles = {}
|
||||
# stemmed word -> set(filenames)
|
||||
self._mapping = {}
|
||||
# stemmed words in titles -> set(filenames)
|
||||
self._title_mapping = {}
|
||||
# objtype -> index
|
||||
self._objtypes = {}
|
||||
# objtype index -> (domain, type, objname (localized))
|
||||
@ -169,6 +177,12 @@ class IndexBuilder(object):
|
||||
# add language-specific SearchLanguage instance
|
||||
self.lang = languages[lang](options)
|
||||
|
||||
if scoring:
|
||||
with open(scoring, 'rb') as fp:
|
||||
self.js_scorer_code = fp.read().decode('utf-8')
|
||||
else:
|
||||
self.js_scorer_code = u''
|
||||
|
||||
def load(self, stream, format):
|
||||
"""Reconstruct from frozen data."""
|
||||
if isinstance(format, basestring):
|
||||
@ -179,12 +193,18 @@ class IndexBuilder(object):
|
||||
raise ValueError('old format')
|
||||
index2fn = frozen['filenames']
|
||||
self._titles = dict(zip(index2fn, frozen['titles']))
|
||||
self._mapping = {}
|
||||
for k, v in frozen['terms'].iteritems():
|
||||
if isinstance(v, int):
|
||||
self._mapping[k] = set([index2fn[v]])
|
||||
else:
|
||||
self._mapping[k] = set(index2fn[i] for i in v)
|
||||
|
||||
def load_terms(mapping):
|
||||
rv = {}
|
||||
for k, v in mapping.iteritems():
|
||||
if isinstance(v, int):
|
||||
rv[k] = set([index2fn[v]])
|
||||
else:
|
||||
rv[k] = set(index2fn[i] for i in v)
|
||||
return rv
|
||||
|
||||
self._mapping = load_terms(frozen['terms'])
|
||||
self._title_mapping = load_terms(frozen['titleterms'])
|
||||
# no need to load keywords/objtypes
|
||||
|
||||
def dump(self, stream, format):
|
||||
@ -229,28 +249,31 @@ class IndexBuilder(object):
|
||||
return rv
|
||||
|
||||
def get_terms(self, fn2index):
|
||||
rv = {}
|
||||
for k, v in self._mapping.iteritems():
|
||||
if len(v) == 1:
|
||||
fn, = v
|
||||
if fn in fn2index:
|
||||
rv[k] = fn2index[fn]
|
||||
else:
|
||||
rv[k] = [fn2index[fn] for fn in v if fn in fn2index]
|
||||
return rv
|
||||
rvs = {}, {}
|
||||
for rv, mapping in zip(rvs, (self._mapping, self._title_mapping)):
|
||||
for k, v in mapping.iteritems():
|
||||
if len(v) == 1:
|
||||
fn, = v
|
||||
if fn in fn2index:
|
||||
rv[k] = fn2index[fn]
|
||||
else:
|
||||
rv[k] = [fn2index[fn] for fn in v if fn in fn2index]
|
||||
return rvs
|
||||
|
||||
def freeze(self):
|
||||
"""Create a usable data structure for serializing."""
|
||||
filenames = self._titles.keys()
|
||||
titles = self._titles.values()
|
||||
fn2index = dict((f, i) for (i, f) in enumerate(filenames))
|
||||
terms = self.get_terms(fn2index)
|
||||
terms, title_terms = self.get_terms(fn2index)
|
||||
|
||||
objects = self.get_objects(fn2index) # populates _objtypes
|
||||
objtypes = dict((v, k[0] + ':' + k[1])
|
||||
for (k, v) in self._objtypes.iteritems())
|
||||
objnames = self._objnames
|
||||
return dict(filenames=filenames, titles=titles, terms=terms,
|
||||
objects=objects, objtypes=objtypes, objnames=objnames)
|
||||
objects=objects, objtypes=objtypes, objnames=objnames,
|
||||
titleterms=title_terms)
|
||||
|
||||
def prune(self, filenames):
|
||||
"""Remove data for all filenames not in the list."""
|
||||
@ -261,6 +284,8 @@ class IndexBuilder(object):
|
||||
self._titles = new_titles
|
||||
for wordnames in self._mapping.itervalues():
|
||||
wordnames.intersection_update(filenames)
|
||||
for wordnames in self._title_mapping.itervalues():
|
||||
wordnames.intersection_update(filenames)
|
||||
|
||||
def feed(self, filename, title, doctree):
|
||||
"""Feed a doctree to the index."""
|
||||
@ -269,19 +294,23 @@ class IndexBuilder(object):
|
||||
visitor = WordCollector(doctree, self.lang)
|
||||
doctree.walk(visitor)
|
||||
|
||||
def add_term(word, stem=self.lang.stem):
|
||||
word = stem(word)
|
||||
if self.lang.word_filter(word):
|
||||
self._mapping.setdefault(word, set()).add(filename)
|
||||
stem = self.lang.stem
|
||||
_filter = self.lang.word_filter
|
||||
|
||||
for word in self.lang.split(title):
|
||||
add_term(word)
|
||||
for word in itertools.chain(visitor.found_title_words,
|
||||
self.lang.split(title)):
|
||||
word = stem(word)
|
||||
if _filter(word):
|
||||
self._title_mapping.setdefault(word, set()).add(filename)
|
||||
|
||||
for word in visitor.found_words:
|
||||
add_term(word)
|
||||
word = stem(word)
|
||||
if word not in self._title_mapping and _filter(word):
|
||||
self._mapping.setdefault(word, set()).add(filename)
|
||||
|
||||
def context_for_searchtool(self):
|
||||
return dict(
|
||||
search_language_stemming_code = self.lang.js_stemmer_code,
|
||||
search_language_stop_words = jsdump.dumps(sorted(self.lang.stopwords)),
|
||||
search_scorer_tool = self.js_scorer_code,
|
||||
)
|
||||
|
@ -8,7 +8,7 @@
|
||||
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
#}
|
||||
{% extends "basic/layout.html" %}
|
||||
{%- extends "basic/layout.html" %}
|
||||
|
||||
{% block header %}
|
||||
<div class="header-wrapper">
|
||||
|
@ -7,7 +7,7 @@
|
||||
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
#}
|
||||
{% extends "layout.html" %}
|
||||
{%- extends "layout.html" %}
|
||||
{% set title = _('Overview') %}
|
||||
{% block body %}
|
||||
<h1>{{ docstitle|e }}</h1>
|
||||
|
@ -7,7 +7,7 @@
|
||||
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
#}
|
||||
{% extends "layout.html" %}
|
||||
{%- extends "layout.html" %}
|
||||
{% set title = indextitle %}
|
||||
{% block extrahead %}
|
||||
{{ super() }}
|
||||
|
@ -28,7 +28,7 @@
|
||||
</dt>
|
||||
{% endmacro %}
|
||||
|
||||
{% extends "layout.html" %}
|
||||
{%- extends "layout.html" %}
|
||||
{% set title = _('Index') %}
|
||||
{% block body %}
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
#}
|
||||
{% extends "layout.html" %}
|
||||
{%- extends "layout.html" %}
|
||||
{% set title = _('Index') %}
|
||||
{% block body %}
|
||||
|
||||
|
@ -28,7 +28,7 @@
|
||||
</dt>
|
||||
{% endmacro %}
|
||||
|
||||
{% extends "layout.html" %}
|
||||
{%- extends "layout.html" %}
|
||||
{% set title = _('Index') %}
|
||||
{% block body %}
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
#}
|
||||
{% extends "layout.html" %}
|
||||
{%- extends "layout.html" %}
|
||||
{% block body %}
|
||||
{{ body }}
|
||||
{% endblock %}
|
||||
|
@ -7,7 +7,7 @@
|
||||
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
#}
|
||||
{% extends "layout.html" %}
|
||||
{%- extends "layout.html" %}
|
||||
{% set title = _('Search') %}
|
||||
{% set script_files = script_files + ['_static/searchtools.js'] %}
|
||||
{% block extrahead %}
|
||||
|
@ -32,7 +32,7 @@ if (!window.console || !console.firebug) {
|
||||
*/
|
||||
jQuery.urldecode = function(x) {
|
||||
return decodeURIComponent(x).replace(/\+/g, ' ');
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* small helper function to urlencode strings
|
||||
|
@ -9,35 +9,42 @@
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* helper function to return a node containing the
|
||||
* search summary for a given text. keywords is a list
|
||||
* of stemmed words, hlwords is the list of normal, unstemmed
|
||||
* words. the first one is used to find the occurance, the
|
||||
* latter for highlighting it.
|
||||
*/
|
||||
|
||||
jQuery.makeSearchSummary = function(text, keywords, hlwords) {
|
||||
var textLower = text.toLowerCase();
|
||||
var start = 0;
|
||||
$.each(keywords, function() {
|
||||
var i = textLower.indexOf(this.toLowerCase());
|
||||
if (i > -1)
|
||||
start = i;
|
||||
});
|
||||
start = Math.max(start - 120, 0);
|
||||
var excerpt = ((start > 0) ? '...' : '') +
|
||||
$.trim(text.substr(start, 240)) +
|
||||
((start + 240 - text.length) ? '...' : '');
|
||||
var rv = $('<div class="context"></div>').text(excerpt);
|
||||
$.each(hlwords, function() {
|
||||
rv = rv.highlightText(this, 'highlighted');
|
||||
});
|
||||
return rv;
|
||||
}
|
||||
|
||||
{{ search_language_stemming_code|safe }}
|
||||
|
||||
{% if search_scorer_tool %}
|
||||
{{ search_scorer_tool|safe }}
|
||||
{% else %}
|
||||
/**
|
||||
* Simple result scoring code.
|
||||
*/
|
||||
var Scorer = {
|
||||
// Implement the following function to further tweak the score for each result
|
||||
// The function takes a result array [filename, title, anchor, descr, score]
|
||||
// and returns the new score.
|
||||
/*
|
||||
score: function(result) {
|
||||
return result[4];
|
||||
},
|
||||
*/
|
||||
|
||||
// query matches the full name of an object
|
||||
objNameMatch: 11,
|
||||
// or matches in the last dotted part of the object name
|
||||
objPartialMatch: 6,
|
||||
// Additive scores depending on the priority of the object
|
||||
objPrio: {0: 15, // used to be importantResults
|
||||
1: 5, // used to be objectResults
|
||||
2: -5}, // used to be unimportantResults
|
||||
// Used when the priority is not in the mapping.
|
||||
objPrioDefault: 0,
|
||||
|
||||
// query found in title
|
||||
title: 15,
|
||||
// query found in terms
|
||||
term: 5
|
||||
};
|
||||
{% endif %}
|
||||
|
||||
/**
|
||||
* Search Module
|
||||
*/
|
||||
@ -86,19 +93,20 @@ var Search = {
|
||||
if (this._pulse_status >= 0)
|
||||
return;
|
||||
function pulse() {
|
||||
var i;
|
||||
Search._pulse_status = (Search._pulse_status + 1) % 4;
|
||||
var dotString = '';
|
||||
for (var i = 0; i < Search._pulse_status; i++)
|
||||
for (i = 0; i < Search._pulse_status; i++)
|
||||
dotString += '.';
|
||||
Search.dots.text(dotString);
|
||||
if (Search._pulse_status > -1)
|
||||
window.setTimeout(pulse, 500);
|
||||
};
|
||||
}
|
||||
pulse();
|
||||
},
|
||||
|
||||
/**
|
||||
* perform a search for something
|
||||
* perform a search for something (or wait until index is loaded)
|
||||
*/
|
||||
performSearch : function(query) {
|
||||
// create the required interface elements
|
||||
@ -118,41 +126,46 @@ var Search = {
|
||||
this.deferQuery(query);
|
||||
},
|
||||
|
||||
/**
|
||||
* execute search (requires search index to be loaded)
|
||||
*/
|
||||
query : function(query) {
|
||||
var i;
|
||||
var stopwords = {{ search_language_stop_words }};
|
||||
|
||||
// Stem the searchterms and add them to the correct list
|
||||
// stem the searchterms and add them to the correct list
|
||||
var stemmer = new Stemmer();
|
||||
var searchterms = [];
|
||||
var excluded = [];
|
||||
var hlterms = [];
|
||||
var tmp = query.split(/\s+/);
|
||||
var objectterms = [];
|
||||
for (var i = 0; i < tmp.length; i++) {
|
||||
if (tmp[i] != "") {
|
||||
for (i = 0; i < tmp.length; i++) {
|
||||
if (tmp[i] !== "") {
|
||||
objectterms.push(tmp[i].toLowerCase());
|
||||
}
|
||||
|
||||
if ($u.indexOf(stopwords, tmp[i]) != -1 || tmp[i].match(/^\d+$/) ||
|
||||
tmp[i] == "") {
|
||||
tmp[i] === "") {
|
||||
// skip this "word"
|
||||
continue;
|
||||
}
|
||||
// stem the word
|
||||
var word = stemmer.stemWord(tmp[i]).toLowerCase();
|
||||
var toAppend;
|
||||
// select the correct list
|
||||
if (word[0] == '-') {
|
||||
var toAppend = excluded;
|
||||
toAppend = excluded;
|
||||
word = word.substr(1);
|
||||
}
|
||||
else {
|
||||
var toAppend = searchterms;
|
||||
toAppend = searchterms;
|
||||
hlterms.push(tmp[i].toLowerCase());
|
||||
}
|
||||
// only add if not already in the list
|
||||
if (!$u.contains(toAppend, word))
|
||||
toAppend.push(word);
|
||||
};
|
||||
}
|
||||
var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" "));
|
||||
|
||||
// console.debug('SEARCH: searching for:');
|
||||
@ -160,89 +173,51 @@ var Search = {
|
||||
// console.info('excluded: ', excluded);
|
||||
|
||||
// prepare search
|
||||
var filenames = this._index.filenames;
|
||||
var titles = this._index.titles;
|
||||
var terms = this._index.terms;
|
||||
var fileMap = {};
|
||||
var files = null;
|
||||
// different result priorities
|
||||
var importantResults = [];
|
||||
var objectResults = [];
|
||||
var regularResults = [];
|
||||
var unimportantResults = [];
|
||||
var titleterms = this._index.titleterms;
|
||||
|
||||
// array of [filename, title, anchor, descr, score]
|
||||
var results = [];
|
||||
$('#search-progress').empty();
|
||||
|
||||
// lookup as object
|
||||
for (var i = 0; i < objectterms.length; i++) {
|
||||
var others = [].concat(objectterms.slice(0,i),
|
||||
objectterms.slice(i+1, objectterms.length))
|
||||
var results = this.performObjectSearch(objectterms[i], others);
|
||||
// Assume first word is most likely to be the object,
|
||||
// other words more likely to be in description.
|
||||
// Therefore put matches for earlier words first.
|
||||
// (Results are eventually used in reverse order).
|
||||
objectResults = results[0].concat(objectResults);
|
||||
importantResults = results[1].concat(importantResults);
|
||||
unimportantResults = results[2].concat(unimportantResults);
|
||||
for (i = 0; i < objectterms.length; i++) {
|
||||
var others = [].concat(objectterms.slice(0, i),
|
||||
objectterms.slice(i+1, objectterms.length));
|
||||
results = results.concat(this.performObjectSearch(objectterms[i], others));
|
||||
}
|
||||
|
||||
// perform the search on the required terms
|
||||
for (var i = 0; i < searchterms.length; i++) {
|
||||
var word = searchterms[i];
|
||||
// no match but word was a required one
|
||||
if ((files = terms[word]) == null)
|
||||
break;
|
||||
if (files.length == undefined) {
|
||||
files = [files];
|
||||
}
|
||||
// create the mapping
|
||||
for (var j = 0; j < files.length; j++) {
|
||||
var file = files[j];
|
||||
if (file in fileMap)
|
||||
fileMap[file].push(word);
|
||||
else
|
||||
fileMap[file] = [word];
|
||||
}
|
||||
// lookup as search terms in fulltext
|
||||
results = results.concat(this.performTermsSearch(searchterms, excluded, terms, Scorer.term))
|
||||
.concat(this.performTermsSearch(searchterms, excluded, titleterms, Scorer.title));
|
||||
|
||||
// let the scorer override scores with a custom scoring function
|
||||
if (Scorer.score) {
|
||||
for (i = 0; i < results.length; i++)
|
||||
results[i][4] = Scorer.score(results[i]);
|
||||
}
|
||||
|
||||
// now check if the files don't contain excluded terms
|
||||
for (var file in fileMap) {
|
||||
var valid = true;
|
||||
|
||||
// check if all requirements are matched
|
||||
if (fileMap[file].length != searchterms.length)
|
||||
continue;
|
||||
|
||||
// ensure that none of the excluded terms is in the
|
||||
// search result.
|
||||
for (var i = 0; i < excluded.length; i++) {
|
||||
if (terms[excluded[i]] == file ||
|
||||
$u.contains(terms[excluded[i]] || [], file)) {
|
||||
valid = false;
|
||||
break;
|
||||
}
|
||||
// now sort the results by score (in opposite order of appearance, since the
|
||||
// display function below uses pop() to retrieve items) and then
|
||||
// alphabetically
|
||||
results.sort(function(a, b) {
|
||||
var left = a[4];
|
||||
var right = b[4];
|
||||
if (left > right) {
|
||||
return 1;
|
||||
} else if (left < right) {
|
||||
return -1;
|
||||
} else {
|
||||
// same score: sort alphabetically
|
||||
left = a[1].toLowerCase();
|
||||
right = b[1].toLowerCase();
|
||||
return (left > right) ? -1 : ((left < right) ? 1 : 0);
|
||||
}
|
||||
|
||||
// if we have still a valid result we can add it
|
||||
// to the result list
|
||||
if (valid)
|
||||
regularResults.push([filenames[file], titles[file], '', null]);
|
||||
}
|
||||
|
||||
// delete unused variables in order to not waste
|
||||
// memory until list is retrieved completely
|
||||
delete filenames, titles, terms;
|
||||
|
||||
// now sort the regular results descending by title
|
||||
regularResults.sort(function(a, b) {
|
||||
var left = a[1].toLowerCase();
|
||||
var right = b[1].toLowerCase();
|
||||
return (left > right) ? -1 : ((left < right) ? 1 : 0);
|
||||
});
|
||||
|
||||
// combine all results
|
||||
var results = unimportantResults.concat(regularResults)
|
||||
.concat(objectResults).concat(importantResults);
|
||||
// for debugging
|
||||
//Search.lastresults = results.slice(); // a copy
|
||||
//console.info('search results:', Search.lastresults);
|
||||
|
||||
// print the results
|
||||
var resultCount = results.length;
|
||||
@ -251,7 +226,7 @@ var Search = {
|
||||
if (results.length) {
|
||||
var item = results.pop();
|
||||
var listItem = $('<li style="display:none"></li>');
|
||||
if (DOCUMENTATION_OPTIONS.FILE_SUFFIX == '') {
|
||||
if (DOCUMENTATION_OPTIONS.FILE_SUFFIX === '') {
|
||||
// dirhtml builder
|
||||
var dirname = item[0] + '/';
|
||||
if (dirname.match(/\/index\/$/)) {
|
||||
@ -277,8 +252,8 @@ var Search = {
|
||||
} else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
|
||||
$.get(DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' +
|
||||
item[0] + '.txt', function(data) {
|
||||
if (data != '') {
|
||||
listItem.append($.makeSearchSummary(data, searchterms, hlterms));
|
||||
if (data !== '') {
|
||||
listItem.append(Search.makeSearchSummary(data, searchterms, hlterms));
|
||||
Search.output.append(listItem);
|
||||
}
|
||||
listItem.slideDown(5, function() {
|
||||
@ -307,20 +282,32 @@ var Search = {
|
||||
displayNextItem();
|
||||
},
|
||||
|
||||
/**
|
||||
* search for object names
|
||||
*/
|
||||
performObjectSearch : function(object, otherterms) {
|
||||
var filenames = this._index.filenames;
|
||||
var objects = this._index.objects;
|
||||
var objnames = this._index.objnames;
|
||||
var titles = this._index.titles;
|
||||
|
||||
var importantResults = [];
|
||||
var objectResults = [];
|
||||
var unimportantResults = [];
|
||||
var i;
|
||||
var results = [];
|
||||
|
||||
for (var prefix in objects) {
|
||||
for (var name in objects[prefix]) {
|
||||
var fullname = (prefix ? prefix + '.' : '') + name;
|
||||
if (fullname.toLowerCase().indexOf(object) > -1) {
|
||||
var score = 0;
|
||||
var parts = fullname.split('.');
|
||||
// check for different match types: exact matches of full name or
|
||||
// "last name" (i.e. last dotted part)
|
||||
if (fullname == object || parts[parts.length - 1] == object) {
|
||||
score += Scorer.objNameMatch;
|
||||
// matches in last name
|
||||
} else if (parts[parts.length - 1].indexOf(object) > -1) {
|
||||
score += Scorer.objPartialMatch;
|
||||
}
|
||||
var match = objects[prefix][name];
|
||||
var objname = objnames[match[1]][2];
|
||||
var title = titles[match[0]];
|
||||
@ -330,7 +317,7 @@ var Search = {
|
||||
var haystack = (prefix + ' ' + name + ' ' +
|
||||
objname + ' ' + title).toLowerCase();
|
||||
var allfound = true;
|
||||
for (var i = 0; i < otherterms.length; i++) {
|
||||
for (i = 0; i < otherterms.length; i++) {
|
||||
if (haystack.indexOf(otherterms[i]) == -1) {
|
||||
allfound = false;
|
||||
break;
|
||||
@ -341,37 +328,107 @@ var Search = {
|
||||
}
|
||||
}
|
||||
var descr = objname + _(', in ') + title;
|
||||
anchor = match[3];
|
||||
if (anchor == '')
|
||||
|
||||
var anchor = match[3];
|
||||
if (anchor === '')
|
||||
anchor = fullname;
|
||||
else if (anchor == '-')
|
||||
anchor = objnames[match[1]][1] + '-' + fullname;
|
||||
result = [filenames[match[0]], fullname, '#'+anchor, descr];
|
||||
switch (match[2]) {
|
||||
case 1: objectResults.push(result); break;
|
||||
case 0: importantResults.push(result); break;
|
||||
case 2: unimportantResults.push(result); break;
|
||||
// add custom score for some objects according to scorer
|
||||
if (Scorer.objPrio.hasOwnProperty(match[2])) {
|
||||
score += Scorer.objPrio[match[2]];
|
||||
} else {
|
||||
score += Scorer.objPrioDefault;
|
||||
}
|
||||
results.push([filenames[match[0]], fullname, '#'+anchor, descr, score]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sort results descending
|
||||
objectResults.sort(function(a, b) {
|
||||
return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
|
||||
});
|
||||
return results;
|
||||
},
|
||||
|
||||
importantResults.sort(function(a, b) {
|
||||
return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
|
||||
});
|
||||
/**
|
||||
* search for full-text terms in the index
|
||||
*/
|
||||
performTermsSearch : function(searchterms, excluded, terms, score) {
|
||||
var filenames = this._index.filenames;
|
||||
var titles = this._index.titles;
|
||||
|
||||
unimportantResults.sort(function(a, b) {
|
||||
return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
|
||||
});
|
||||
var i, j, file, files;
|
||||
var fileMap = {};
|
||||
var results = [];
|
||||
|
||||
return [importantResults, objectResults, unimportantResults]
|
||||
// perform the search on the required terms
|
||||
for (i = 0; i < searchterms.length; i++) {
|
||||
var word = searchterms[i];
|
||||
// no match but word was a required one
|
||||
if ((files = terms[word]) === null)
|
||||
break;
|
||||
if (files.length === undefined) {
|
||||
files = [files];
|
||||
}
|
||||
// create the mapping
|
||||
for (j = 0; j < files.length; j++) {
|
||||
file = files[j];
|
||||
if (file in fileMap)
|
||||
fileMap[file].push(word);
|
||||
else
|
||||
fileMap[file] = [word];
|
||||
}
|
||||
}
|
||||
|
||||
// now check if the files don't contain excluded terms
|
||||
for (file in fileMap) {
|
||||
var valid = true;
|
||||
|
||||
// check if all requirements are matched
|
||||
if (fileMap[file].length != searchterms.length)
|
||||
continue;
|
||||
|
||||
// ensure that none of the excluded terms is in the search result
|
||||
for (i = 0; i < excluded.length; i++) {
|
||||
if (terms[excluded[i]] == file ||
|
||||
$u.contains(terms[excluded[i]] || [], file)) {
|
||||
valid = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// if we have still a valid result we can add it to the result list
|
||||
if (valid) {
|
||||
results.push([filenames[file], titles[file], '', null, score]);
|
||||
}
|
||||
}
|
||||
return results;
|
||||
},
|
||||
|
||||
/**
|
||||
* helper function to return a node containing the
|
||||
* search summary for a given text. keywords is a list
|
||||
* of stemmed words, hlwords is the list of normal, unstemmed
|
||||
* words. the first one is used to find the occurance, the
|
||||
* latter for highlighting it.
|
||||
*/
|
||||
makeSearchSummary : function(text, keywords, hlwords) {
|
||||
var textLower = text.toLowerCase();
|
||||
var start = 0;
|
||||
$.each(keywords, function() {
|
||||
var i = textLower.indexOf(this.toLowerCase());
|
||||
if (i > -1)
|
||||
start = i;
|
||||
});
|
||||
start = Math.max(start - 120, 0);
|
||||
var excerpt = ((start > 0) ? '...' : '') +
|
||||
$.trim(text.substr(start, 240)) +
|
||||
((start + 240 - text.length) ? '...' : '');
|
||||
var rv = $('<div class="context"></div>').text(excerpt);
|
||||
$.each(hlwords, function() {
|
||||
rv = rv.highlightText(this, 'highlighted');
|
||||
});
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
$(document).ready(function() {
|
||||
Search.init();
|
||||
|
@ -7,7 +7,7 @@
|
||||
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
#}
|
||||
{% extends "basic/layout.html" %}
|
||||
{%- extends "basic/layout.html" %}
|
||||
|
||||
{% if theme_collapsiblesidebar|tobool %}
|
||||
{% set script_files = script_files + ['_static/sidebar.js'] %}
|
||||
|
@ -7,7 +7,7 @@
|
||||
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
#}
|
||||
{% extends "layout.html" %}
|
||||
{%- extends "layout.html" %}
|
||||
{%- block header %}{% endblock %}
|
||||
{%- block rootrellink %}{% endblock %}
|
||||
{%- block relbaritems %}{% endblock %}
|
||||
|
@ -7,7 +7,7 @@
|
||||
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
#}
|
||||
{% extends "basic/layout.html" %}
|
||||
{%- extends "basic/layout.html" %}
|
||||
|
||||
{# add only basic navigation links #}
|
||||
{% block sidebar1 %}{% endblock %}
|
||||
|
@ -7,7 +7,7 @@
|
||||
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
#}
|
||||
{% extends "basic/layout.html" %}
|
||||
{%- extends "basic/layout.html" %}
|
||||
{% set script_files = script_files + ['_static/theme_extras.js'] %}
|
||||
{% set css_files = css_files + ['_static/print.css'] %}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
{% extends "basic/layout.html" %}
|
||||
{%- extends "basic/layout.html" %}
|
||||
|
||||
{%- block extrahead %}
|
||||
<link rel="stylesheet" href="http://fonts.googleapis.com/css?family=Neuton&subset=latin" type="text/css" media="screen" charset="utf-8" />
|
||||
|
@ -8,7 +8,7 @@
|
||||
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
#}
|
||||
{% extends "basic/layout.html" %}
|
||||
{%- extends "basic/layout.html" %}
|
||||
{% set script_files = script_files + ['_static/theme_extras.js'] %}
|
||||
{% set css_files = css_files + ['_static/print.css'] %}
|
||||
{# do not display relbars #}
|
||||
|
@ -7,7 +7,7 @@
|
||||
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
#}
|
||||
{% extends "basic/layout.html" %}
|
||||
{%- extends "basic/layout.html" %}
|
||||
|
||||
{# put the sidebar before the body #}
|
||||
{% block sidebar1 %}{{ sidebar() }}{% endblock %}
|
||||
|
@ -291,6 +291,12 @@ class Tee(object):
|
||||
self.stream1.write(text)
|
||||
self.stream2.write(text)
|
||||
|
||||
def flush(self):
|
||||
if hasattr(self.stream1, 'flush'):
|
||||
self.stream1.flush()
|
||||
if hasattr(self.stream2, 'flush'):
|
||||
self.stream2.flush()
|
||||
|
||||
|
||||
def parselinenos(spec, total):
|
||||
"""Parse a line number spec (such as "1,2,4-6") and return a list of
|
||||
@ -354,6 +360,29 @@ def split_into(n, type, value):
|
||||
return parts
|
||||
|
||||
|
||||
def split_index_msg(type, value):
|
||||
# new entry types must be listed in directives/other.py!
|
||||
result = []
|
||||
try:
|
||||
if type == 'single':
|
||||
try:
|
||||
result = split_into(2, 'single', value)
|
||||
except ValueError:
|
||||
result = split_into(1, 'single', value)
|
||||
elif type == 'pair':
|
||||
result = split_into(2, 'pair', value)
|
||||
elif type == 'triple':
|
||||
result = split_into(3, 'triple', value)
|
||||
elif type == 'see':
|
||||
result = split_into(2, 'see', value)
|
||||
elif type == 'seealso':
|
||||
result = split_into(2, 'see', value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def format_exception_cut_frames(x=1):
|
||||
"""Format an exception with traceback, but only the last x frames."""
|
||||
typ, val, tb = sys.exc_info()
|
||||
|
@ -52,6 +52,13 @@ def extract_messages(doctree):
|
||||
node.line = definition_list_item.line - 1
|
||||
node.rawsource = definition_list_item.\
|
||||
rawsource.split("\n", 2)[0]
|
||||
# workaround: nodes.caption doesn't have source, line.
|
||||
# this issue was filed to Docutils tracker:
|
||||
# https://sourceforge.net/tracker/?func=detail&aid=3599485&group_id=38414&atid=422032
|
||||
if isinstance(node, nodes.caption) and not node.source:
|
||||
node.source = node.parent.source
|
||||
node.line = '' #need fix docutils to get `node.line`
|
||||
|
||||
if not node.source:
|
||||
continue # built-in message
|
||||
if isinstance(node, IGNORED_NODES):
|
||||
@ -67,6 +74,19 @@ def extract_messages(doctree):
|
||||
yield node, msg
|
||||
|
||||
|
||||
def traverse_translatable_index(doctree):
|
||||
"""Traverse translatable index node from a document tree."""
|
||||
def is_block_index(node):
|
||||
return isinstance(node, addnodes.index) and \
|
||||
node.get('inline') == False
|
||||
for node in doctree.traverse(is_block_index):
|
||||
if 'raw_entries' in node:
|
||||
entries = node['raw_entries']
|
||||
else:
|
||||
entries = node['entries']
|
||||
yield node, entries
|
||||
|
||||
|
||||
def nested_parse_with_titles(state, content, node):
|
||||
"""Version of state.nested_parse() that allows titles and does not require
|
||||
titles to have the same decoration as the calling document.
|
||||
|
@ -294,6 +294,11 @@ class TextTranslator(nodes.NodeVisitor):
|
||||
def visit_label(self, node):
|
||||
raise nodes.SkipNode
|
||||
|
||||
def visit_legend(self, node):
|
||||
pass
|
||||
def depart_legend(self, node):
|
||||
pass
|
||||
|
||||
# XXX: option list could use some better styling
|
||||
|
||||
def visit_option_list(self, node):
|
||||
|
@ -27,9 +27,7 @@ Contents:
|
||||
doctest
|
||||
extensions
|
||||
versioning/index
|
||||
only
|
||||
footnote
|
||||
i18n/index
|
||||
|
||||
Python <http://python.org/>
|
||||
|
||||
|
@ -191,6 +191,10 @@ Figures
|
||||
|
||||
My caption of the figure
|
||||
|
||||
My description paragraph of the figure.
|
||||
|
||||
Description paragraph is wraped with legend node.
|
||||
|
||||
|
||||
Version markup
|
||||
--------------
|
||||
|
12
tests/roots/test-intl/bom.po
Normal file
12
tests/roots/test-intl/bom.po
Normal file
@ -0,0 +1,12 @@
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
|
||||
msgid "File with UTF-8 BOM"
|
||||
msgstr "Datei mit UTF-8"
|
||||
|
||||
msgid "This file has a UTF-8 \"BOM\"."
|
||||
msgstr "This file has umlauts: äöü."
|
5
tests/roots/test-intl/bom.txt
Normal file
5
tests/roots/test-intl/bom.txt
Normal file
@ -0,0 +1,5 @@
|
||||
File with UTF-8 BOM
|
||||
===================
|
||||
|
||||
This file has a UTF-8 "BOM".
|
||||
|
7
tests/roots/test-intl/conf.py
Normal file
7
tests/roots/test-intl/conf.py
Normal file
@ -0,0 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys, os
|
||||
|
||||
project = 'Sphinx intl <Tests>'
|
||||
source_suffix = '.txt'
|
||||
keep_warnings = True
|
@ -2,9 +2,13 @@
|
||||
:maxdepth: 2
|
||||
:numbered:
|
||||
|
||||
subdir/contents
|
||||
bom
|
||||
footnote
|
||||
external_links
|
||||
refs_inconsistency
|
||||
literalblock
|
||||
seealso
|
||||
definition_terms
|
||||
figure_caption
|
||||
index_entries
|
29
tests/roots/test-intl/figure_caption.po
Normal file
29
tests/roots/test-intl/figure_caption.po
Normal file
@ -0,0 +1,29 @@
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
# Copyright (C) 2012, foof
|
||||
# This file is distributed under the same license as the foo package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: sphinx 1.0\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2013-01-04 7:00\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
|
||||
msgid "i18n with figure caption"
|
||||
msgstr "I18N WITH FIGURE CAPTION"
|
||||
|
||||
msgid "My caption of the figure"
|
||||
msgstr "MY CAPTION OF THE FIGURE"
|
||||
|
||||
msgid "My description paragraph1 of the figure."
|
||||
msgstr "MY DESCRIPTION PARAGRAPH1 OF THE FIGURE."
|
||||
|
||||
msgid "My description paragraph2 of the figure."
|
||||
msgstr "MY DESCRIPTION PARAGRAPH2 OF THE FIGURE."
|
12
tests/roots/test-intl/figure_caption.txt
Normal file
12
tests/roots/test-intl/figure_caption.txt
Normal file
@ -0,0 +1,12 @@
|
||||
:tocdepth: 2
|
||||
|
||||
i18n with figure caption
|
||||
========================
|
||||
|
||||
.. figure:: i18n.png
|
||||
|
||||
My caption of the figure
|
||||
|
||||
My description paragraph1 of the figure.
|
||||
|
||||
My description paragraph2 of the figure.
|
BIN
tests/roots/test-intl/i18n.png
Normal file
BIN
tests/roots/test-intl/i18n.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 66 KiB |
77
tests/roots/test-intl/index_entries.po
Normal file
77
tests/roots/test-intl/index_entries.po
Normal file
@ -0,0 +1,77 @@
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
# Copyright (C) 2013, foo
|
||||
# This file is distributed under the same license as the foo package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: foo foo\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2013-01-05 18:10\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
|
||||
msgid "i18n with index entries"
|
||||
msgstr ""
|
||||
|
||||
msgid "index target section"
|
||||
msgstr ""
|
||||
|
||||
msgid "this is :index:`Newsletter` target paragraph."
|
||||
msgstr "THIS IS :index:`NEWSLETTER` TARGET PARAGRAPH."
|
||||
|
||||
msgid "various index entries"
|
||||
msgstr ""
|
||||
|
||||
msgid "That's all."
|
||||
msgstr ""
|
||||
|
||||
msgid "Mailing List"
|
||||
msgstr "MAILING LIST"
|
||||
|
||||
msgid "Newsletter"
|
||||
msgstr "NEWSLETTER"
|
||||
|
||||
msgid "Recipients List"
|
||||
msgstr "RECIPIENTS LIST"
|
||||
|
||||
msgid "First"
|
||||
msgstr "FIRST"
|
||||
|
||||
msgid "Second"
|
||||
msgstr "SECOND"
|
||||
|
||||
msgid "Third"
|
||||
msgstr "THIRD"
|
||||
|
||||
msgid "Entry"
|
||||
msgstr "ENTRY"
|
||||
|
||||
msgid "See"
|
||||
msgstr "SEE"
|
||||
|
||||
msgid "Module"
|
||||
msgstr "MODULE"
|
||||
|
||||
msgid "Keyword"
|
||||
msgstr "KEYWORD"
|
||||
|
||||
msgid "Operator"
|
||||
msgstr "OPERATOR"
|
||||
|
||||
msgid "Object"
|
||||
msgstr "OBJECT"
|
||||
|
||||
msgid "Exception"
|
||||
msgstr "EXCEPTION"
|
||||
|
||||
msgid "Statement"
|
||||
msgstr "STATEMENT"
|
||||
|
||||
msgid "Builtin"
|
||||
msgstr "BUILTIN"
|
31
tests/roots/test-intl/index_entries.txt
Normal file
31
tests/roots/test-intl/index_entries.txt
Normal file
@ -0,0 +1,31 @@
|
||||
:tocdepth: 2
|
||||
|
||||
i18n with index entries
|
||||
=======================
|
||||
|
||||
.. index::
|
||||
single: Mailing List
|
||||
pair: Newsletter; Recipients List
|
||||
|
||||
index target section
|
||||
--------------------
|
||||
|
||||
this is :index:`Newsletter` target paragraph.
|
||||
|
||||
|
||||
various index entries
|
||||
---------------------
|
||||
|
||||
.. index::
|
||||
triple: First; Second; Third
|
||||
see: Entry; Mailing List
|
||||
seealso: See; Newsletter
|
||||
module: Module
|
||||
keyword: Keyword
|
||||
operator: Operator
|
||||
object: Object
|
||||
exception: Exception
|
||||
statement: Statement
|
||||
builtin: Builtin
|
||||
|
||||
That's all.
|
2
tests/roots/test-intl/subdir/contents.txt
Normal file
2
tests/roots/test-intl/subdir/contents.txt
Normal file
@ -0,0 +1,2 @@
|
||||
subdir contents
|
||||
===============
|
2
tests/roots/test-only-directive/conf.py
Normal file
2
tests/roots/test-only-directive/conf.py
Normal file
@ -0,0 +1,2 @@
|
||||
|
||||
project = 'test-only-directive'
|
6
tests/roots/test-only-directive/contents.rst
Normal file
6
tests/roots/test-only-directive/contents.rst
Normal file
@ -0,0 +1,6 @@
|
||||
test-only-directive
|
||||
===================
|
||||
|
||||
.. toctree::
|
||||
|
||||
only
|
@ -386,10 +386,10 @@ def test_generate():
|
||||
assert_warns("import for autodocumenting 'foobar'",
|
||||
'function', 'foobar', more_content=None)
|
||||
# importing
|
||||
assert_warns("import/find module 'test_foobar'",
|
||||
assert_warns("failed to import module 'test_foobar'",
|
||||
'module', 'test_foobar', more_content=None)
|
||||
# attributes missing
|
||||
assert_warns("import/find function 'util.foobar'",
|
||||
assert_warns("failed to import function 'foobar' from module 'util'",
|
||||
'function', 'util.foobar', more_content=None)
|
||||
|
||||
# test auto and given content mixing
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import re
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
from util import *
|
||||
@ -19,6 +20,7 @@ from util import SkipTest
|
||||
|
||||
def teardown_module():
|
||||
(test_root / '_build').rmtree(True)
|
||||
(test_roots / 'test-intl' / '_build').rmtree(True),
|
||||
|
||||
|
||||
@with_app(buildername='gettext')
|
||||
@ -87,3 +89,51 @@ def test_gettext(app):
|
||||
|
||||
_ = gettext.translation('test_root', app.outdir, languages=['en']).gettext
|
||||
assert _("Testing various markup") == u"Testing various markup"
|
||||
|
||||
|
||||
@with_app(buildername='gettext',
|
||||
srcdir=(test_roots / 'test-intl'),
|
||||
doctreedir=(test_roots / 'test-intl' / '_build' / 'doctree'),
|
||||
confoverrides={'gettext_compact': False})
|
||||
def test_gettext_index_entries(app):
|
||||
# regression test for #976
|
||||
app.builder.build(['index_entries'])
|
||||
|
||||
_msgid_getter = re.compile(r'msgid "(.*)"').search
|
||||
def msgid_getter(msgid):
|
||||
m = _msgid_getter(msgid)
|
||||
if m:
|
||||
return m.groups()[0]
|
||||
return None
|
||||
|
||||
pot = (app.outdir / 'index_entries.pot').text(encoding='utf-8')
|
||||
msgids = filter(None, map(msgid_getter, pot.splitlines()))
|
||||
|
||||
expected_msgids = [
|
||||
"i18n with index entries",
|
||||
"index target section",
|
||||
"this is :index:`Newsletter` target paragraph.",
|
||||
"various index entries",
|
||||
"That's all.",
|
||||
"Mailing List",
|
||||
"Newsletter",
|
||||
"Recipients List",
|
||||
"First",
|
||||
"Second",
|
||||
"Third",
|
||||
"Entry",
|
||||
"See",
|
||||
"Module",
|
||||
"Keyword",
|
||||
"Operator",
|
||||
"Object",
|
||||
"Exception",
|
||||
"Statement",
|
||||
"Builtin",
|
||||
]
|
||||
for expect in expected_msgids:
|
||||
assert expect in msgids
|
||||
msgids.remove(expect)
|
||||
|
||||
# unexpected msgid existent
|
||||
assert msgids == []
|
||||
|
@ -35,7 +35,6 @@ ENV_WARNINGS = """\
|
||||
%(root)s/autodoc_fodder.py:docstring of autodoc_fodder\\.MarkupError:2: \
|
||||
WARNING: Explicit markup ends without a blank line; unexpected \
|
||||
unindent\\.\\n?
|
||||
%(root)s/i18n/literalblock.txt:13: WARNING: Literal block expected; none found.
|
||||
%(root)s/images.txt:9: WARNING: image file not readable: foo.png
|
||||
%(root)s/images.txt:23: WARNING: nonlocal image URI found: \
|
||||
http://www.python.org/logo.png
|
||||
|
@ -22,19 +22,34 @@ from util import SkipTest
|
||||
|
||||
|
||||
warnfile = StringIO()
|
||||
root = test_roots / 'test-intl'
|
||||
doctreedir = root / '_build' / 'doctree'
|
||||
|
||||
|
||||
def with_intl_app(*args, **kw):
|
||||
default_kw = {
|
||||
'srcdir': root,
|
||||
'doctreedir': doctreedir,
|
||||
'confoverrides': {
|
||||
'language': 'xx', 'locale_dirs': ['.'],
|
||||
'gettext_compact': False,
|
||||
},
|
||||
}
|
||||
default_kw.update(kw)
|
||||
return with_app(*args, **default_kw)
|
||||
|
||||
|
||||
def setup_module():
|
||||
# Delete remnants left over after failed build
|
||||
(test_root / 'xx').rmtree(True)
|
||||
(test_root / 'xx' / 'LC_MESSAGES').makedirs()
|
||||
(root / 'xx').rmtree(True)
|
||||
(root / 'xx' / 'LC_MESSAGES').makedirs()
|
||||
# Compile all required catalogs into binary format (*.mo).
|
||||
for dirpath, dirs, files in os.walk(test_root):
|
||||
for dirpath, dirs, files in os.walk(root):
|
||||
dirpath = path(dirpath)
|
||||
for f in [f for f in files if f.endswith('.po')]:
|
||||
po = dirpath / f
|
||||
mo = test_root / 'xx' / 'LC_MESSAGES' / (
|
||||
relpath(po[:-3], test_root) + '.mo')
|
||||
mo = root / 'xx' / 'LC_MESSAGES' / (
|
||||
relpath(po[:-3], root) + '.mo')
|
||||
if not mo.parent.exists():
|
||||
mo.parent.makedirs()
|
||||
try:
|
||||
@ -52,12 +67,11 @@ def setup_module():
|
||||
|
||||
|
||||
def teardown_module():
|
||||
(test_root / '_build').rmtree(True)
|
||||
(test_root / 'xx').rmtree(True)
|
||||
(root / '_build').rmtree(True)
|
||||
(root / 'xx').rmtree(True)
|
||||
|
||||
|
||||
@with_app(buildername='text',
|
||||
confoverrides={'language': 'xx', 'locale_dirs': ['.']})
|
||||
@with_intl_app(buildername='text')
|
||||
def test_simple(app):
|
||||
app.builder.build(['bom'])
|
||||
result = (app.outdir / 'bom.txt').text(encoding='utf-8')
|
||||
@ -67,31 +81,26 @@ def test_simple(app):
|
||||
assert result == expect
|
||||
|
||||
|
||||
@with_app(buildername='text',
|
||||
confoverrides={'language': 'xx', 'locale_dirs': ['.']})
|
||||
@with_intl_app(buildername='text')
|
||||
def test_subdir(app):
|
||||
app.builder.build(['subdir/includes'])
|
||||
result = (app.outdir / 'subdir' / 'includes.txt').text(encoding='utf-8')
|
||||
assert result.startswith(u"\ntranslation\n***********\n\n")
|
||||
app.builder.build(['subdir/contents'])
|
||||
result = (app.outdir / 'subdir' / 'contents.txt').text(encoding='utf-8')
|
||||
assert result.startswith(u"\nsubdir contents\n***************\n")
|
||||
|
||||
|
||||
@with_app(buildername='html', cleanenv=True,
|
||||
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
|
||||
'gettext_compact': False})
|
||||
@with_intl_app(buildername='html', cleanenv=True)
|
||||
def test_i18n_footnote_break_refid(app):
|
||||
"""test for #955 cant-build-html-with-footnotes-when-using"""
|
||||
app.builder.build(['i18n/footnote'])
|
||||
result = (app.outdir / 'i18n' / 'footnote.html').text(encoding='utf-8')
|
||||
app.builder.build(['footnote'])
|
||||
result = (app.outdir / 'footnote.html').text(encoding='utf-8')
|
||||
# expect no error by build
|
||||
|
||||
|
||||
@with_app(buildername='text', cleanenv=True,
|
||||
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
|
||||
'gettext_compact': False})
|
||||
@with_intl_app(buildername='text', cleanenv=True)
|
||||
def test_i18n_footnote_regression(app):
|
||||
"""regression test for fix #955"""
|
||||
app.builder.build(['i18n/footnote'])
|
||||
result = (app.outdir / 'i18n' / 'footnote.txt').text(encoding='utf-8')
|
||||
app.builder.build(['footnote'])
|
||||
result = (app.outdir / 'footnote.txt').text(encoding='utf-8')
|
||||
expect = (u"\nI18N WITH FOOTNOTE"
|
||||
u"\n******************\n" # underline matches new translation
|
||||
u"\nI18N WITH FOOTNOTE INCLUDE THIS CONTENTS [ref] [1] [100]\n"
|
||||
@ -101,13 +110,11 @@ def test_i18n_footnote_regression(app):
|
||||
assert result == expect
|
||||
|
||||
|
||||
@with_app(buildername='html', cleanenv=True,
|
||||
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
|
||||
'gettext_compact': False})
|
||||
@with_intl_app(buildername='html', cleanenv=True)
|
||||
def test_i18n_footnote_backlink(app):
|
||||
"""i18n test for #1058"""
|
||||
app.builder.build(['i18n/footnote'])
|
||||
result = (app.outdir / 'i18n' / 'footnote.html').text(encoding='utf-8')
|
||||
app.builder.build(['footnote'])
|
||||
result = (app.outdir / 'footnote.html').text(encoding='utf-8')
|
||||
expects = [
|
||||
'<a class="footnote-reference" href="#id5" id="id1">[100]</a>',
|
||||
'<a class="footnote-reference" href="#id4" id="id2">[1]</a>',
|
||||
@ -121,13 +128,11 @@ def test_i18n_footnote_backlink(app):
|
||||
assert len(matches) == 1
|
||||
|
||||
|
||||
@with_app(buildername='text', warning=warnfile, cleanenv=True,
|
||||
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
|
||||
'gettext_compact': False})
|
||||
@with_intl_app(buildername='text', warning=warnfile, cleanenv=True)
|
||||
def test_i18n_warn_for_number_of_references_inconsistency(app):
|
||||
app.builddir.rmtree(True)
|
||||
app.builder.build(['i18n/refs_inconsistency'])
|
||||
result = (app.outdir / 'i18n' / 'refs_inconsistency.txt').text(encoding='utf-8')
|
||||
app.builder.build(['refs_inconsistency'])
|
||||
result = (app.outdir / 'refs_inconsistency.txt').text(encoding='utf-8')
|
||||
expect = (u"\nI18N WITH REFS INCONSISTENCY"
|
||||
u"\n****************************\n"
|
||||
u"\n* FOR FOOTNOTE [ref2].\n"
|
||||
@ -139,7 +144,7 @@ def test_i18n_warn_for_number_of_references_inconsistency(app):
|
||||
assert result == expect
|
||||
|
||||
warnings = warnfile.getvalue().replace(os.sep, '/')
|
||||
warning_fmt = u'.*/i18n/refs_inconsistency.txt:\\d+: ' \
|
||||
warning_fmt = u'.*/refs_inconsistency.txt:\\d+: ' \
|
||||
u'WARNING: inconsistent %s in translated message\n'
|
||||
expected_warning_expr = (
|
||||
warning_fmt % 'footnote references' +
|
||||
@ -148,12 +153,10 @@ def test_i18n_warn_for_number_of_references_inconsistency(app):
|
||||
assert re.search(expected_warning_expr, warnings)
|
||||
|
||||
|
||||
@with_app(buildername='html', cleanenv=True,
|
||||
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
|
||||
'gettext_compact': False})
|
||||
@with_intl_app(buildername='html', cleanenv=True)
|
||||
def test_i18n_link_to_undefined_reference(app):
|
||||
app.builder.build(['i18n/refs_inconsistency'])
|
||||
result = (app.outdir / 'i18n' / 'refs_inconsistency.html').text(encoding='utf-8')
|
||||
app.builder.build(['refs_inconsistency'])
|
||||
result = (app.outdir / 'refs_inconsistency.html').text(encoding='utf-8')
|
||||
|
||||
expected_expr = """<a class="reference external" href="http://www.example.com">reference</a>"""
|
||||
assert len(re.findall(expected_expr, result)) == 2
|
||||
@ -165,13 +168,11 @@ def test_i18n_link_to_undefined_reference(app):
|
||||
assert len(re.findall(expected_expr, result)) == 1
|
||||
|
||||
|
||||
@with_app(buildername='html', cleanenv=True,
|
||||
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
|
||||
'gettext_compact': False})
|
||||
@with_intl_app(buildername='html', cleanenv=True)
|
||||
def test_i18n_keep_external_links(app):
|
||||
"""regression test for #1044"""
|
||||
app.builder.build(['i18n/external_links'])
|
||||
result = (app.outdir / 'i18n' / 'external_links.html').text(encoding='utf-8')
|
||||
app.builder.build(['external_links'])
|
||||
result = (app.outdir / 'external_links.html').text(encoding='utf-8')
|
||||
|
||||
# external link check
|
||||
expect_line = u"""<li>EXTERNAL LINK TO <a class="reference external" href="http://python.org">Python</a>.</li>"""
|
||||
@ -206,35 +207,31 @@ def test_i18n_keep_external_links(app):
|
||||
assert expect_line == matched_line
|
||||
|
||||
|
||||
@with_app(buildername='text', warning=warnfile, cleanenv=True,
|
||||
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
|
||||
'gettext_compact': False})
|
||||
@with_intl_app(buildername='text', warning=warnfile, cleanenv=True)
|
||||
def test_i18n_literalblock_warning(app):
|
||||
app.builddir.rmtree(True) #for warnings acceleration
|
||||
app.builder.build(['i18n/literalblock'])
|
||||
result = (app.outdir / 'i18n' / 'literalblock.txt').text(encoding='utf-8')
|
||||
app.builder.build(['literalblock'])
|
||||
result = (app.outdir / 'literalblock.txt').text(encoding='utf-8')
|
||||
expect = (u"\nI18N WITH LITERAL BLOCK"
|
||||
u"\n***********************\n"
|
||||
u"\nCORRECT LITERAL BLOCK:\n"
|
||||
u"\n this is"
|
||||
u"\n literal block\n"
|
||||
u"\nMISSING LITERAL BLOCK:\n"
|
||||
u"\n<SYSTEM MESSAGE: ")
|
||||
u"\n<SYSTEM MESSAGE:")
|
||||
assert result.startswith(expect)
|
||||
|
||||
warnings = warnfile.getvalue().replace(os.sep, '/')
|
||||
expected_warning_expr = u'.*/i18n/literalblock.txt:\\d+: ' \
|
||||
expected_warning_expr = u'.*/literalblock.txt:\\d+: ' \
|
||||
u'WARNING: Literal block expected; none found.'
|
||||
assert re.search(expected_warning_expr, warnings)
|
||||
|
||||
|
||||
@with_app(buildername='text',
|
||||
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
|
||||
'gettext_compact': False})
|
||||
@with_intl_app(buildername='text')
|
||||
def test_i18n_definition_terms(app):
|
||||
# regression test for #975
|
||||
app.builder.build(['i18n/definition_terms'])
|
||||
result = (app.outdir / 'i18n' / 'definition_terms.txt').text(encoding='utf-8')
|
||||
app.builder.build(['definition_terms'])
|
||||
result = (app.outdir / 'definition_terms.txt').text(encoding='utf-8')
|
||||
expect = (u"\nI18N WITH DEFINITION TERMS"
|
||||
u"\n**************************\n"
|
||||
u"\nSOME TERM"
|
||||
@ -245,12 +242,10 @@ def test_i18n_definition_terms(app):
|
||||
assert result == expect
|
||||
|
||||
|
||||
@with_app(buildername='text', cleanenv=True,
|
||||
confoverrides={'language': 'xx', 'locale_dirs': ['.'],
|
||||
'gettext_compact': False})
|
||||
@with_intl_app(buildername='text')
|
||||
def test_seealso(app):
|
||||
app.builder.build(['i18n/seealso'])
|
||||
result = (app.outdir / 'i18n' / 'seealso.txt').text(encoding='utf-8')
|
||||
app.builder.build(['seealso'])
|
||||
result = (app.outdir / 'seealso.txt').text(encoding='utf-8')
|
||||
expect = (u"\nI18N WITH SEEALSO"
|
||||
u"\n*****************\n"
|
||||
u"\nSee also: SHORT TEXT 1\n"
|
||||
@ -259,3 +254,48 @@ def test_seealso(app):
|
||||
u"\n LONG TEXT 2\n")
|
||||
assert result == expect
|
||||
|
||||
|
||||
@with_intl_app(buildername='text')
|
||||
def test_i18n_figure_caption(app):
|
||||
# regression test for #940
|
||||
app.builder.build(['figure_caption'])
|
||||
result = (app.outdir / 'figure_caption.txt').text(encoding='utf-8')
|
||||
expect = (u"\nI18N WITH FIGURE CAPTION"
|
||||
u"\n************************\n"
|
||||
u"\n [image]MY CAPTION OF THE FIGURE\n"
|
||||
u"\n MY DESCRIPTION PARAGRAPH1 OF THE FIGURE.\n"
|
||||
u"\n MY DESCRIPTION PARAGRAPH2 OF THE FIGURE.\n")
|
||||
|
||||
assert result == expect
|
||||
|
||||
|
||||
@with_intl_app(buildername='html')
|
||||
def test_i18n_index_entries(app):
|
||||
# regression test for #976
|
||||
app.builder.build(['index_entries'])
|
||||
result = (app.outdir / 'genindex.html').text(encoding='utf-8')
|
||||
|
||||
def wrap(tag, keyword):
|
||||
start_tag = "<%s[^>]*>" % tag
|
||||
end_tag = "</%s>" % tag
|
||||
return r"%s\s*%s\s*%s" % (start_tag, keyword, end_tag)
|
||||
|
||||
expected_exprs = [
|
||||
wrap('a', 'NEWSLETTER'),
|
||||
wrap('a', 'MAILING LIST'),
|
||||
wrap('a', 'RECIPIENTS LIST'),
|
||||
wrap('a', 'FIRST SECOND'),
|
||||
wrap('a', 'SECOND THIRD'),
|
||||
wrap('a', 'THIRD, FIRST'),
|
||||
wrap('dt', 'ENTRY'),
|
||||
wrap('dt', 'SEE'),
|
||||
wrap('a', 'MODULE'),
|
||||
wrap('a', 'KEYWORD'),
|
||||
wrap('a', 'OPERATOR'),
|
||||
wrap('a', 'OBJECT'),
|
||||
wrap('a', 'EXCEPTION'),
|
||||
wrap('a', 'STATEMENT'),
|
||||
wrap('a', 'BUILTIN'),
|
||||
]
|
||||
for expr in expected_exprs:
|
||||
assert re.search(expr, result, re.M)
|
||||
|
@ -17,10 +17,10 @@ from util import *
|
||||
|
||||
|
||||
def teardown_module():
|
||||
(test_root / '_build').rmtree(True)
|
||||
(test_roots / 'test-only-directive' / '_build').rmtree(True)
|
||||
|
||||
|
||||
@with_app(buildername='text')
|
||||
@with_app(buildername='text', srcdir=(test_roots / 'test-only-directive'))
|
||||
def test_sectioning(app):
|
||||
|
||||
def getsects(section):
|
||||
|
@ -36,7 +36,7 @@ def test_wordcollector():
|
||||
doc['file'] = 'dummy'
|
||||
parser.parse(FILE_CONTENTS, doc)
|
||||
|
||||
ix = IndexBuilder(None, 'en', {})
|
||||
ix = IndexBuilder(None, 'en', {}, None)
|
||||
ix.feed('filename', 'title', doc)
|
||||
assert 'boson' not in ix._mapping
|
||||
assert 'fermion' in ix._mapping
|
||||
|
@ -30,7 +30,7 @@ from nose import tools, SkipTest
|
||||
|
||||
|
||||
__all__ = [
|
||||
'test_root', 'raises', 'raises_msg',
|
||||
'test_root', 'test_roots', 'raises', 'raises_msg',
|
||||
'skip_if', 'skip_unless', 'skip_unless_importable', 'Struct',
|
||||
'ListOutput', 'TestApp', 'with_app', 'gen_with_app',
|
||||
'path', 'with_tempdir', 'write_file',
|
||||
@ -39,6 +39,7 @@ __all__ = [
|
||||
|
||||
|
||||
test_root = path(__file__).parent.joinpath('root').abspath()
|
||||
test_roots = path(__file__).parent.joinpath('roots').abspath()
|
||||
|
||||
|
||||
def _excstr(exc):
|
||||
@ -153,6 +154,8 @@ class TestApp(application.Sphinx):
|
||||
self.cleanup_trees.insert(0, outdir)
|
||||
if doctreedir is None:
|
||||
doctreedir = srcdir.joinpath(srcdir, self.builddir, 'doctrees')
|
||||
if not doctreedir.isdir():
|
||||
doctreedir.makedirs()
|
||||
if cleanenv:
|
||||
self.cleanup_trees.insert(0, doctreedir)
|
||||
if confoverrides is None:
|
||||
|
Loading…
Reference in New Issue
Block a user