Now term nodes in a glossary directive are wrapped with termset node to handle multiple term correctly.

Fix #2251; Line breaks in .rst files are transferred to .pot files in a wrong way.
This commit is contained in:
shimizukawa 2016-02-13 17:51:44 +09:00
parent b0897a47c4
commit 804e866404
10 changed files with 61 additions and 61 deletions

View File

@ -21,6 +21,9 @@ Incompatible changes
refers to :confval:`exclude_patterns` to exclude extra files and directories.
* #2300: enhance autoclass:: to use the docstring of __new__ if __init__ method's is missing
of empty
* #2251: term nodes in a glossary directive are wrapped with ``termset`` node to handle
multiple term correctly. ``termsep`` node is removed and ``termset`` is added.
By this change, every writers must have visit_termset and depart_termset method.
Features added
--------------
@ -84,6 +87,7 @@ Bugs fixed
* #2074: make gettext should use canonical relative paths for .pot. Thanks to
anatoly techtonik.
* #2311: Fix sphinx.ext.inheritance_diagram raises AttributeError
* #2251: Line breaks in .rst files are transferred to .pot files in a wrong way.
Documentation

View File

@ -54,4 +54,4 @@ You should not need to generate the nodes below in extensions.
.. autoclass:: start_of_file
.. autoclass:: productionlist
.. autoclass:: production
.. autoclass:: termsep
.. autoclass:: termset

View File

@ -208,8 +208,8 @@ class abbreviation(nodes.Inline, nodes.TextElement):
"""Node for abbreviations with explanations."""
class termsep(nodes.Structural, nodes.Element):
"""Separates two terms within a <term> node."""
class termset(nodes.Structural, nodes.Element):
"""A set of <term> node"""
class manpage(nodes.Inline, nodes.TextElement):

View File

@ -214,7 +214,7 @@ class OptionXRefRole(XRefRole):
return title, target
def make_termnodes_from_paragraph_node(env, node, new_id=None):
def register_term_to_glossary(env, node, new_id=None):
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
objects = env.domaindata['std']['objects']
@ -229,25 +229,18 @@ def make_termnodes_from_paragraph_node(env, node, new_id=None):
# add an index entry too
indexnode = addnodes.index()
indexnode['entries'] = [('single', termtext, new_id, 'main')]
new_termnodes = []
new_termnodes.append(indexnode)
new_termnodes.extend(node.children)
new_termnodes.append(addnodes.termsep())
for termnode in new_termnodes:
termnode.source, termnode.line = node.source, node.line
return new_id, termtext, new_termnodes
indexnode.source, indexnode.line = node.source, node.line
node.append(indexnode)
node['ids'].append(new_id)
node['names'].append(new_id)
def make_term_from_paragraph_node(termnodes, ids):
# make a single "term" node with all the terms, separated by termsep
# nodes (remove the dangling trailing separator)
term = nodes.term('', '', *termnodes[:-1])
term.source, term.line = termnodes[0].source, termnodes[0].line
term.rawsource = term.astext()
term['ids'].extend(ids)
term['names'].extend(ids)
return term
def make_termset_from_termnodes(termnodes):
# make a single "termset" node with all the terms
termset = addnodes.termset('', *termnodes)
termset.source, termset.line = termnodes[0].source, termnodes[0].line
termset.rawsource = termset.astext()
return termset
class Glossary(Directive):
@ -330,7 +323,6 @@ class Glossary(Directive):
termtexts = []
termnodes = []
system_messages = []
ids = []
for line, source, lineno in terms:
# parse the term with inline markup
res = self.state.inline_text(line, lineno)
@ -338,17 +330,15 @@ class Glossary(Directive):
# get a text-only representation of the term and register it
# as a cross-reference target
tmp = nodes.paragraph('', '', *res[0])
tmp.source = source
tmp.line = lineno
new_id, termtext, new_termnodes = \
make_termnodes_from_paragraph_node(env, tmp)
ids.append(new_id)
termtexts.append(termtext)
termnodes.extend(new_termnodes)
term = nodes.term('', '', *res[0])
term.source = source
term.line = lineno
register_term_to_glossary(env, term)
termtexts.append(term.astext())
termnodes.append(term)
term = make_term_from_paragraph_node(termnodes, ids)
term += system_messages
termset = make_termset_from_termnodes(termnodes)
termset += system_messages
defnode = nodes.definition()
if definition:
@ -356,7 +346,7 @@ class Glossary(Directive):
defnode)
items.append((termtexts,
nodes.definition_list_item('', term, defnode)))
nodes.definition_list_item('', termset, defnode)))
if 'sorted' in self.options:
items.sort(key=lambda x:

View File

@ -27,10 +27,7 @@ from sphinx.util.nodes import (
from sphinx.util.osutil import ustrftime
from sphinx.util.i18n import find_catalog
from sphinx.util.pycompat import indent
from sphinx.domains.std import (
make_term_from_paragraph_node,
make_termnodes_from_paragraph_node,
)
from sphinx.domains.std import register_term_to_glossary
default_substitutions = set([
@ -340,18 +337,10 @@ class Locale(Transform):
# glossary terms update refid
if isinstance(node, nodes.term):
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
ids = []
termnodes = []
for _id in node['names']:
if _id in gloss_entries:
gloss_entries.remove(_id)
_id, _, new_termnodes = \
make_termnodes_from_paragraph_node(env, patch, _id)
ids.append(_id)
termnodes.extend(new_termnodes)
if termnodes and ids:
patch = make_term_from_paragraph_node(termnodes, ids)
register_term_to_glossary(env, patch, _id)
node['ids'] = patch['ids']
node['names'] = patch['names']
processed = True

View File

@ -629,9 +629,11 @@ class HTMLTranslator(BaseTranslator):
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_termsep(self, node):
self.body.append('<br />')
raise nodes.SkipNode
def visit_termset(self, node):
pass
def depart_termset(self, node):
pass
def visit_manpage(self, node):
return self.visit_literal_emphasis(node)
@ -692,6 +694,15 @@ class HTMLTranslator(BaseTranslator):
(self.builder.current_docname, node.line))
raise nodes.SkipNode
# overwritten to do not add '</dt>' in 'visit_definition' state.
def visit_definition(self, node):
self.body.append(self.starttag(node, 'dd', ''))
self.set_first_last(node)
# overwritten to add '</dt>' in 'depart_term' state.
def depart_term(self, node):
self.body.append('</dt>\n')
def unknown_visit(self, node):
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)

View File

@ -1222,9 +1222,11 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.unrestrict_footnote(node)
self.in_term -= 1
def visit_termsep(self, node):
self.body.append(', ')
raise nodes.SkipNode
def visit_termset(self, node):
pass
def depart_termset(self, node):
pass
def visit_classifier(self, node):
self.body.append('{[}')

View File

@ -200,9 +200,11 @@ class ManualPageTranslator(BaseTranslator):
def depart_versionmodified(self, node):
self.depart_paragraph(node)
def visit_termsep(self, node):
self.body.append(', ')
raise nodes.SkipNode
def visit_termset(self, node):
pass
def depart_termset(self, node):
pass
# overwritten -- we don't want source comments to show up
def visit_comment(self, node):

View File

@ -952,10 +952,10 @@ class TexinfoTranslator(nodes.NodeVisitor):
def depart_term(self, node):
pass
def visit_termsep(self, node):
self.body.append('\n%s ' % self.at_item_x)
def visit_termset(self, node):
pass
def depart_termsep(self, node):
def depart_termset(self, node):
pass
def visit_classifier(self, node):

View File

@ -640,9 +640,11 @@ class TextTranslator(nodes.NodeVisitor):
if not self._classifier_count_in_li:
self.end_state(end=None)
def visit_termsep(self, node):
self.add_text(', ')
raise nodes.SkipNode
def visit_termset(self, node):
pass
def depart_termset(self, node):
pass
def visit_classifier(self, node):
self.add_text(' : ')