Fix annotations

This commit is contained in:
Takeshi KOMIYA 2018-12-10 22:46:54 +09:00
parent 63af636fd9
commit f48ccd029d
5 changed files with 17 additions and 12 deletions

View File

@ -280,7 +280,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
if ':' in node_id:
target['ids'][i] = self.fix_fragment('', node_id)
next_node = target.next_node(siblings=True)
next_node = target.next_node(siblings=True) # type: nodes.Node
if isinstance(next_node, nodes.Element):
for i, node_id in enumerate(next_node['ids']):
if ':' in node_id:

View File

@ -57,7 +57,7 @@ default_settings = {
'halt_level': 5,
'file_insertion_enabled': True,
'smartquotes_locales': [],
}
} # type: Dict[str, Any]
# This is increased every time an environment attribute is added
# or changed to properly invalidate pickle files.

View File

@ -19,7 +19,8 @@ from sphinx.util.nodes import nested_parse_with_titles
if False:
# For type annotation
from typing import Any, Callable, Dict, List, Set, Type # NOQA
from docutils.statemachine import State, StateMachine, StringList # NOQA
from docutils.parsers.rst.state import RSTState # NOQA
from docutils.statemachine import StateMachine, StringList # NOQA
from docutils.utils import Reporter # NOQA
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
@ -80,7 +81,7 @@ def process_documenter_options(documenter, config, options):
def parse_generated_content(state, content, documenter):
# type: (State, StringList, Documenter) -> List[nodes.Node]
# type: (RSTState, StringList, Documenter) -> List[nodes.Node]
"""Parse a generated content by Documenter."""
with switch_source_input(state, content):
if documenter.titles_allowed:

View File

@ -2015,8 +2015,9 @@ class LaTeXTranslator(SphinxTranslator):
# reference to a label
id = uri[1:].replace('#', ':')
self.body.append(self.hyperlink(id))
if len(node) and hasattr(node[0], 'attributes') and \
'std-term' in node[0].get('classes', []):
if (len(node) and
isinstance(node[0], nodes.Element) and
'std-term' in node[0].get('classes', [])):
# don't add a pageref for glossary terms
self.context.append('}}}')
# mark up as termreference
@ -2150,7 +2151,9 @@ class LaTeXTranslator(SphinxTranslator):
def visit_thebibliography(self, node):
# type: (thebibliography) -> None
longest_label = max((subnode[0].astext() for subnode in node), key=len)
citations = cast(Iterable[nodes.citation], node)
labels = (cast(nodes.label, citation[0]) for citation in citations)
longest_label = max((label.astext() for label in labels), key=len)
if len(longest_label) > MAX_CITATION_LABEL_LENGTH:
# adjust max width of citation labels not to break the layout
longest_label = longest_label[:MAX_CITATION_LABEL_LENGTH]
@ -2164,9 +2167,9 @@ class LaTeXTranslator(SphinxTranslator):
def visit_citation(self, node):
# type: (nodes.citation) -> None
label = node[0].astext()
self.body.append(u'\\bibitem[%s]{%s:%s}' %
(self.encode(label), node['docname'], node['ids'][0]))
label = cast(nodes.label, node[0])
self.body.append(u'\\bibitem[%s]{%s:%s}' % (self.encode(label.astext()),
node['docname'], node['ids'][0]))
def depart_citation(self, node):
# type: (nodes.citation) -> None

View File

@ -308,8 +308,9 @@ class TexinfoTranslator(SphinxTranslator):
# type: () -> None
"""Collect the menu entries for each "node" section."""
node_menus = self.node_menus
for node in ([self.document] +
self.document.traverse(nodes.section)):
targets = [self.document] # type: List[nodes.Element]
targets.extend(self.document.traverse(nodes.section))
for node in targets:
assert 'node_name' in node and node['node_name']
entries = [s['node_name'] for s in find_subsections(node)]
node_menus[node['node_name']] = entries