Fix #6559: Wrong node-ids are generated in glossary directive

This commit is contained in:
Takeshi KOMIYA 2019-07-08 02:11:01 +09:00
parent efe18663b2
commit 7123f4038a
5 changed files with 53 additions and 24 deletions

View File

@ -12,6 +12,7 @@ Deprecated
* The ``decode`` argument of ``sphinx.pycode.ModuleAnalyzer()``
* ``sphinx.directives.other.Index``
* ``sphinx.environment.temp_data['gloss_entries']``
* ``sphinx.environment.BuildEnvironment.indexentries``
* ``sphinx.environment.collectors.indexentries.IndexEntriesCollector``
* ``sphinx.io.FiletypeNotFoundError``
@ -36,6 +37,7 @@ Bugs fixed
* #6925: html: Remove redundant type="text/javascript" from <script> elements
* #6906, #6907: autodoc: failed to read the source codes encoeded in cp1251
* #6961: latex: warning for babel shown twice
* #6559: Wrong node-ids are generated in glossary directive
Testing
--------

View File

@ -36,6 +36,11 @@ The following is a list of deprecated interfaces.
- 4.0
- ``sphinx.domains.index.IndexDirective``
* - ``sphinx.environment.temp_data['gloss_entries']``
- 2.4
- 4.0
- ``documents.nameids``
* - ``sphinx.environment.BuildEnvironment.indexentries``
- 2.4
- 4.0

View File

@ -243,34 +243,50 @@ def split_term_classifiers(line: str) -> List[Optional[str]]:
def make_glossary_term(env: "BuildEnvironment", textnodes: Iterable[Node], index_key: str,
source: str, lineno: int, new_id: str = None) -> nodes.term:
source: str, lineno: int, node_id: str = None,
document: nodes.document = None) -> nodes.term:
# get a text-only representation of the term and register it
# as a cross-reference target
term = nodes.term('', '', *textnodes)
term.source = source
term.line = lineno
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
termtext = term.astext()
if new_id is None:
new_id = nodes.make_id('term-' + termtext)
if new_id == 'term':
# the term is not good for node_id. Generate it by sequence number instead.
new_id = 'term-%d' % env.new_serialno('glossary')
while new_id in gloss_entries:
new_id = 'term-%d' % env.new_serialno('glossary')
gloss_entries.add(new_id)
if node_id:
# node_id is given from outside (mainly i18n module), use it forcedly
pass
elif document:
node_id = nodes.make_id('term-' + termtext)
if node_id == 'term':
# "term" is not good for node_id. Generate it by sequence number instead.
node_id = 'term-%d' % env.new_serialno('glossary')
while node_id in document.ids:
node_id = 'term-%d' % env.new_serialno('glossary')
document.note_explicit_target(term)
else:
warnings.warn('make_glossary_term() expects document is passed as an argument.',
RemovedInSphinx40Warning)
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
node_id = nodes.make_id('term-' + termtext)
if node_id == 'term':
# "term" is not good for node_id. Generate it by sequence number instead.
node_id = 'term-%d' % env.new_serialno('glossary')
while node_id in gloss_entries:
node_id = 'term-%d' % env.new_serialno('glossary')
gloss_entries.add(node_id)
term['ids'].append(node_id)
std = cast(StandardDomain, env.get_domain('std'))
std.add_object('term', termtext.lower(), env.docname, new_id)
std.add_object('term', termtext.lower(), env.docname, node_id)
# add an index entry too
indexnode = addnodes.index()
indexnode['entries'] = [('single', termtext, new_id, 'main', index_key)]
indexnode['entries'] = [('single', termtext, node_id, 'main', index_key)]
indexnode.source, indexnode.line = term.source, term.line
term.append(indexnode)
term['ids'].append(new_id)
term['names'].append(new_id)
return term
@ -368,7 +384,8 @@ class Glossary(SphinxDirective):
textnodes, sysmsg = self.state.inline_text(parts[0], lineno)
# use first classifier as a index key
term = make_glossary_term(self.env, textnodes, parts[1], source, lineno)
term = make_glossary_term(self.env, textnodes, parts[1], source, lineno,
document=self.state.document)
term.rawsource = line
system_messages.extend(sysmsg)
termtexts.append(term.astext())

View File

@ -202,18 +202,13 @@ class Locale(SphinxTransform):
# glossary terms update refid
if isinstance(node, nodes.term):
gloss_entries = self.env.temp_data.setdefault('gloss_entries', set())
for _id in node['names']:
if _id in gloss_entries:
gloss_entries.remove(_id)
for _id in node['ids']:
parts = split_term_classifiers(msgstr)
patch = publish_msgstr(self.app, parts[0], source,
node.line, self.config, settings)
patch = make_glossary_term(self.env, patch, parts[1],
source, node.line, _id)
node['ids'] = patch['ids']
node['names'] = patch['names']
source, node.line, _id,
self.document)
processed = True
# update leaves with processed nodes

View File

@ -255,6 +255,16 @@ def test_glossary_alphanumeric(app):
assert ("/", "/", "term", "index", "term-0", -1) in objects
def test_glossary_conflicted_labels(app):
text = (".. _term-foo:\n"
".. glossary::\n"
"\n"
" foo\n")
restructuredtext.parse(app, text)
objects = list(app.env.get_domain("std").get_objects())
assert ("foo", "foo", "term", "index", "term-0", -1) in objects
def test_cmdoption(app):
text = (".. program:: ls\n"
"\n"