#586: Implemented improved glossary markup which allows multiple terms per definition.

This commit is contained in:
Georg Brandl
2011-01-06 20:34:41 +01:00
parent 0d6c738b02
commit f925de6aa0
11 changed files with 143 additions and 28 deletions

View File

@@ -51,6 +51,9 @@ Release 1.1 (in development)
* #526: Added Iranian translation.
* #586: Implemented improved glossary markup which allows multiple terms per
definition.
* #559: :confval:`html_add_permalinks` is now a string giving the
text to display in permalinks.

View File

@@ -153,9 +153,9 @@ Glossary
.. rst:directive:: .. glossary::
This directive must contain a reST definition list with terms and
definitions. The definitions will then be referencable with the :rst:role:`term`
role. Example::
This directive must contain a reST definition-list-like markup with terms and
definitions. The definitions will then be referencable with the
:rst:role:`term` role. Example::
.. glossary::
@@ -169,10 +169,25 @@ Glossary
The directory which, including its subdirectories, contains all
source files for one Sphinx project.
In contrast to regular definition lists, *multiple* terms per entry are
allowed, and inline markup is allowed in terms. You can link to all of the
terms. For example::
.. glossary::
term 1
term 2
Definition of both terms.
(When the glossary is sorted, the first term determines the sort order.)
.. versionadded:: 0.6
You can now give the glossary directive a ``:sorted:`` flag that will
automatically sort the entries alphabetically.
.. versionchanged:: 1.1
Now supports multiple terms and inline markup in terms.
Grammar production displays
---------------------------

View File

@@ -171,6 +171,9 @@ class literal_emphasis(nodes.emphasis):
class abbreviation(nodes.Inline, nodes.TextElement):
"""Node for abbreviations with explanations."""
class termsep(nodes.Structural, nodes.Element):
"""Separates two terms within a <term> node."""
# make the new nodes known to docutils; needed because the HTML writer will
# choke at some point if these are not added

View File

@@ -14,6 +14,7 @@ import unicodedata
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
from sphinx import addnodes
from sphinx.roles import XRefRole
@@ -206,8 +207,8 @@ class OptionXRefRole(XRefRole):
class Glossary(Directive):
"""
Directive to create a glossary with cross-reference targets
for :term: roles.
Directive to create a glossary with cross-reference targets for :term:
roles.
"""
has_content = True
@@ -224,37 +225,100 @@ class Glossary(Directive):
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
node = addnodes.glossary()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
# the content should be definition lists
dls = [child for child in node
if isinstance(child, nodes.definition_list)]
# now, extract definition terms to enable cross-reference creation
new_dl = nodes.definition_list()
new_dl['classes'].append('glossary')
# This directive implements a custom format of the reST definition list
# that allows multiple lines of terms before the definition. This is
# easy to parse since we know that the contents of the glossary *must
# be* a definition list.
# first, collect single entries
entries = []
in_definition = True
was_empty = True
messages = []
for (source, lineno, line) in self.content.xitems():
# empty line -> add to last definition
if not line:
if in_definition and entries:
entries[-1][1].append('', source, lineno)
was_empty = True
continue
# unindented line -> a term
if line and not line[0].isspace():
# first term of definition
if in_definition:
if not was_empty:
messages.append(self.state.reporter.system_message(
2, 'glossary term must be preceded by empty line',
source=source, line=lineno))
entries.append(([(line, source, lineno)], ViewList()))
in_definition = False
# second term and following
else:
if was_empty:
messages.append(self.state.reporter.system_message(
2, 'glossary terms must not be separated by empty '
'lines', source=source, line=lineno))
entries[-1][0].append((line, source, lineno))
else:
if not in_definition:
# first line of definition, determines indentation
in_definition = True
indent_len = len(line) - len(line.lstrip())
entries[-1][1].append(line[indent_len:], source, lineno)
was_empty = False
# now, parse all the entries into a big definition list
items = []
for dl in dls:
for li in dl.children:
if not li.children or not isinstance(li[0], nodes.term):
continue
termtext = li.children[0].astext()
for terms, definition in entries:
termtexts = []
termnodes = []
system_messages = []
ids = []
for line, source, lineno in terms:
# parse the term with inline markup
res = self.state.inline_text(line, lineno)
system_messages.extend(res[1])
# get a text-only representation of the term and register it
# as a cross-reference target
tmp = nodes.paragraph('', '', *res[0])
termtext = tmp.astext()
new_id = 'term-' + nodes.make_id(termtext)
if new_id in gloss_entries:
new_id = 'term-' + str(len(gloss_entries))
gloss_entries.add(new_id)
li[0]['names'].append(new_id)
li[0]['ids'].append(new_id)
ids.append(new_id)
objects['term', termtext.lower()] = env.docname, new_id
termtexts.append(termtext)
# add an index entry too
indexnode = addnodes.index()
indexnode['entries'] = [('single', termtext, new_id, termtext)]
li.insert(0, indexnode)
items.append((termtext, li))
termnodes += indexnode
termnodes.extend(res[0])
termnodes.append(addnodes.termsep())
# make a single "term" node with all the terms, separated by termsep
# nodes (remove the dangling trailing separator)
term = nodes.term('', '', *termnodes[:-1])
term['ids'].extend(ids)
term['names'].extend(ids)
term += system_messages
defnode = nodes.definition()
self.state.nested_parse(definition, definition.items[0][1], defnode)
items.append((termtexts,
nodes.definition_list_item('', term, defnode)))
if 'sorted' in self.options:
items.sort(key=lambda x: unicodedata.normalize('NFD', x[0].lower()))
new_dl.extend(item[1] for item in items)
node.children = [new_dl]
return [node]
items.sort(key=lambda x:
unicodedata.normalize('NFD', x[0][0].lower()))
dlist = nodes.definition_list()
dlist['classes'].append('glossary')
dlist.extend(item[1] for item in items)
node += dlist
return messages + [node]
token_re = re.compile('`([a-z_][a-z0-9_]*)`')

View File

@@ -285,7 +285,6 @@ def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
entries = [('single', target, targetid, target)]
indexnode = addnodes.index()
indexnode['entries'] = entries
indexnode['inline'] = True
textnode = nodes.Text(title, title)
return [indexnode, targetnode, textnode], []

View File

@@ -483,6 +483,10 @@ class HTMLTranslator(BaseTranslator):
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_termsep(self, node):
self.body.append('<br />')
raise nodes.SkipNode
def depart_title(self, node):
close_tag = self.context[-1]
if (self.permalink_text and self.builder.add_permalinks and

View File

@@ -788,6 +788,10 @@ class LaTeXTranslator(nodes.NodeVisitor):
def depart_term(self, node):
self.body.append(self.context.pop())
def visit_termsep(self, node):
self.body.append(', ')
raise nodes.SkipNode
def visit_classifier(self, node):
self.body.append('{[}')
def depart_classifier(self, node):
@@ -1059,7 +1063,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('\n\\end{flushright}\n')
def visit_index(self, node, scre=re.compile(r';\s*')):
if not node.get('inline'):
if not node.get('inline', True):
self.body.append('\n')
entries = node['entries']
for type, string, tid, _ in entries:

View File

@@ -161,6 +161,10 @@ class ManualPageTranslator(BaseTranslator):
def depart_versionmodified(self, node):
self.depart_paragraph(node)
def visit_termsep(self, node):
self.body.append(', ')
raise nodes.SkipNode
# overwritten -- we don't want source comments to show up
def visit_comment(self, node):
raise nodes.SkipNode

View File

@@ -762,6 +762,9 @@ class TexinfoTranslator(nodes.NodeVisitor):
def depart_term(self, node):
pass
def visit_termsep(self, node):
self.add_text(self.at_item_x + ' ', fresh=1)
def visit_classifier(self, node):
self.add_text(' : ')
def depart_classifier(self, node):

View File

@@ -483,6 +483,10 @@ class TextTranslator(nodes.NodeVisitor):
if not self._li_has_classifier:
self.end_state(end=None)
def visit_termsep(self, node):
self.add_text(', ')
raise nodes.SkipNode
def visit_classifier(self, node):
self.add_text(' : ')
def depart_classifier(self, node):

View File

@@ -239,13 +239,25 @@ This tests :CLASS:`role names in uppercase`.
* Monty Python
.. glossary::
:sorted:
boson
Particle with integer spin.
fermion
*fermion*
Particle with half-integer spin.
tauon
myon
electron
Examples for fermions.
über
Gewisse
änhlich
Dinge
.. productionlist::
try_stmt: `try1_stmt` | `try2_stmt`
try1_stmt: "try" ":" `suite`