mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Merge pull request #5539 from tk0miya/master
Fix mypy violations (and merge 1.8 branch to master)
This commit is contained in:
commit
c97441e2af
9
CHANGES
9
CHANGES
@ -68,6 +68,15 @@ Features added
|
|||||||
Bugs fixed
|
Bugs fixed
|
||||||
----------
|
----------
|
||||||
|
|
||||||
|
* #5490: latex: enumerated list causes a crash with recommonmark
|
||||||
|
* #5492: sphinx-build fails to build docs w/ Python < 3.5.2
|
||||||
|
* #3704: latex: wrong ``\label`` positioning for figures with a legend
|
||||||
|
* #5496: C++, fix assertion when a symbol is declared more than twice.
|
||||||
|
* #5493: gettext: crashed with broken template
|
||||||
|
* #5495: csv-table directive with file option in included file is broken (refs:
|
||||||
|
#4821)
|
||||||
|
* #5498: autodoc: unable to find type hints for a ``functools.partial``
|
||||||
|
|
||||||
Testing
|
Testing
|
||||||
--------
|
--------
|
||||||
|
|
||||||
|
@ -104,7 +104,13 @@ But, sometimes, the change of interface are needed for some reasons. In such
|
|||||||
cases, we've marked them as deprecated. And they are kept during the two
|
cases, we've marked them as deprecated. And they are kept during the two
|
||||||
major versions (for more details, please see :ref:`deprecation-policy`).
|
major versions (for more details, please see :ref:`deprecation-policy`).
|
||||||
|
|
||||||
The following is a list of deprecated interface.
|
The following is a list of deprecated interfaces.
|
||||||
|
|
||||||
|
.. tabularcolumns:: |>{\raggedright}\Y{.4}|>{\centering}\Y{.1}|>{\centering}\Y{.12}|>{\raggedright\arraybackslash}\Y{.38}|
|
||||||
|
|
||||||
|
.. |LaTeXHyphenate| raw:: latex
|
||||||
|
|
||||||
|
\hspace{0pt}
|
||||||
|
|
||||||
.. list-table:: deprecated APIs
|
.. list-table:: deprecated APIs
|
||||||
:header-rows: 1
|
:header-rows: 1
|
||||||
@ -112,8 +118,8 @@ The following is a list of deprecated interface.
|
|||||||
:widths: 40, 10, 10, 40
|
:widths: 40, 10, 10, 40
|
||||||
|
|
||||||
* - Target
|
* - Target
|
||||||
- Deprecated
|
- |LaTeXHyphenate|\ Deprecated
|
||||||
- (will be) Removed
|
- (will be) Removed
|
||||||
- Alternatives
|
- Alternatives
|
||||||
|
|
||||||
* - ``suffix`` argument of ``BuildEnvironment.doc2path()``
|
* - ``suffix`` argument of ``BuildEnvironment.doc2path()``
|
||||||
|
@ -22,6 +22,7 @@ from six import StringIO
|
|||||||
|
|
||||||
from sphinx.builders import Builder
|
from sphinx.builders import Builder
|
||||||
from sphinx.domains.python import pairindextypes
|
from sphinx.domains.python import pairindextypes
|
||||||
|
from sphinx.errors import ThemeError
|
||||||
from sphinx.locale import __
|
from sphinx.locale import __
|
||||||
from sphinx.util import split_index_msg, logging, status_iterator
|
from sphinx.util import split_index_msg, logging, status_iterator
|
||||||
from sphinx.util.console import bold # type: ignore
|
from sphinx.util.console import bold # type: ignore
|
||||||
@ -247,11 +248,14 @@ class MessageCatalogBuilder(I18nBuilder):
|
|||||||
|
|
||||||
for template in status_iterator(files, __('reading templates... '), "purple", # type: ignore # NOQA
|
for template in status_iterator(files, __('reading templates... '), "purple", # type: ignore # NOQA
|
||||||
len(files), self.app.verbosity):
|
len(files), self.app.verbosity):
|
||||||
with open(template, 'r', encoding='utf-8') as f: # type: ignore
|
try:
|
||||||
context = f.read()
|
with open(template, 'r', encoding='utf-8') as f: # type: ignore
|
||||||
for line, meth, msg in extract_translations(context):
|
context = f.read()
|
||||||
origin = MsgOrigin(template, line)
|
for line, meth, msg in extract_translations(context):
|
||||||
self.catalogs['sphinx'].add(msg, origin)
|
origin = MsgOrigin(template, line)
|
||||||
|
self.catalogs['sphinx'].add(msg, origin)
|
||||||
|
except Exception as exc:
|
||||||
|
raise ThemeError('%s: %r' % (template, exc))
|
||||||
|
|
||||||
def build(self, docnames, summary=None, method='update'):
|
def build(self, docnames, summary=None, method='update'):
|
||||||
# type: (Iterable[unicode], unicode, unicode) -> None
|
# type: (Iterable[unicode], unicode, unicode) -> None
|
||||||
|
@ -374,7 +374,7 @@ class StandaloneHTMLBuilder(Builder):
|
|||||||
if filename and '://' not in filename:
|
if filename and '://' not in filename:
|
||||||
filename = posixpath.join('_static', filename)
|
filename = posixpath.join('_static', filename)
|
||||||
|
|
||||||
self.script_files.append(JavaScript(filename, **kwargs)) # type: ignore
|
self.script_files.append(JavaScript(filename, **kwargs))
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def default_translator_class(self):
|
def default_translator_class(self):
|
||||||
|
@ -214,7 +214,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
|
|||||||
|
|
||||||
def keyword_item(self, name, ref):
|
def keyword_item(self, name, ref):
|
||||||
# type: (unicode, Any) -> unicode
|
# type: (unicode, Any) -> unicode
|
||||||
matchobj = _idpattern.match(name) # type: ignore
|
matchobj = _idpattern.match(name)
|
||||||
if matchobj:
|
if matchobj:
|
||||||
groupdict = matchobj.groupdict()
|
groupdict = matchobj.groupdict()
|
||||||
shortname = groupdict['title']
|
shortname = groupdict['title']
|
||||||
|
@ -492,7 +492,7 @@ def valid_dir(d):
|
|||||||
if not path.isdir(dir):
|
if not path.isdir(dir):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if set(['Makefile', 'make.bat']) & set(os.listdir(dir)): # type: ignore
|
if set(['Makefile', 'make.bat']) & set(os.listdir(dir)):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if d['sep']:
|
if d['sep']:
|
||||||
@ -508,7 +508,7 @@ def valid_dir(d):
|
|||||||
d['dot'] + 'templates',
|
d['dot'] + 'templates',
|
||||||
d['master'] + d['suffix'],
|
d['master'] + d['suffix'],
|
||||||
]
|
]
|
||||||
if set(reserved_names) & set(os.listdir(dir)): # type: ignore
|
if set(reserved_names) & set(os.listdir(dir)):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
@ -424,7 +424,7 @@ def correct_copyright_year(app, config):
|
|||||||
for k in ('copyright', 'epub_copyright'):
|
for k in ('copyright', 'epub_copyright'):
|
||||||
if k in config:
|
if k in config:
|
||||||
replace = r'\g<1>%s' % format_date('%Y')
|
replace = r'\g<1>%s' % format_date('%Y')
|
||||||
config[k] = copyright_year_re.sub(replace, config[k]) # type: ignore
|
config[k] = copyright_year_re.sub(replace, config[k])
|
||||||
|
|
||||||
|
|
||||||
def check_confval_types(app, config):
|
def check_confval_types(app, config):
|
||||||
|
@ -8,7 +8,6 @@
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
from contextlib import contextmanager
|
|
||||||
|
|
||||||
from docutils import nodes
|
from docutils import nodes
|
||||||
from docutils.parsers.rst import directives
|
from docutils.parsers.rst import directives
|
||||||
@ -380,7 +379,6 @@ class Include(BaseInclude, SphinxDirective):
|
|||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
# type: () -> List[nodes.Node]
|
# type: () -> List[nodes.Node]
|
||||||
current_filename = self.env.doc2path(self.env.docname)
|
|
||||||
if self.arguments[0].startswith('<') and \
|
if self.arguments[0].startswith('<') and \
|
||||||
self.arguments[0].endswith('>'):
|
self.arguments[0].endswith('>'):
|
||||||
# docutils "standard" includes, do not do path processing
|
# docutils "standard" includes, do not do path processing
|
||||||
@ -388,27 +386,7 @@ class Include(BaseInclude, SphinxDirective):
|
|||||||
rel_filename, filename = self.env.relfn2path(self.arguments[0])
|
rel_filename, filename = self.env.relfn2path(self.arguments[0])
|
||||||
self.arguments[0] = filename
|
self.arguments[0] = filename
|
||||||
self.env.note_included(filename)
|
self.env.note_included(filename)
|
||||||
with patched_warnings(self, current_filename):
|
return BaseInclude.run(self)
|
||||||
return BaseInclude.run(self)
|
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def patched_warnings(directive, parent_filename):
|
|
||||||
# type: (BaseInclude, unicode) -> Generator[None, None, None]
|
|
||||||
"""Add includee filename to the warnings during inclusion."""
|
|
||||||
try:
|
|
||||||
original = directive.state_machine.insert_input
|
|
||||||
|
|
||||||
def insert_input(input_lines, source):
|
|
||||||
# type: (Any, unicode) -> None
|
|
||||||
source += ' <included from %s>' % parent_filename
|
|
||||||
original(input_lines, source)
|
|
||||||
|
|
||||||
# patch insert_input() temporarily
|
|
||||||
directive.state_machine.insert_input = insert_input
|
|
||||||
yield
|
|
||||||
finally:
|
|
||||||
directive.state_machine.insert_input = original
|
|
||||||
|
|
||||||
|
|
||||||
def setup(app):
|
def setup(app):
|
||||||
|
@ -83,7 +83,7 @@ class CObject(ObjectDescription):
|
|||||||
def _parse_type(self, node, ctype):
|
def _parse_type(self, node, ctype):
|
||||||
# type: (nodes.Node, unicode) -> None
|
# type: (nodes.Node, unicode) -> None
|
||||||
# add cross-ref nodes for all words
|
# add cross-ref nodes for all words
|
||||||
for part in [_f for _f in wsplit_re.split(ctype) if _f]: # type: ignore
|
for part in [_f for _f in wsplit_re.split(ctype) if _f]:
|
||||||
tnode = nodes.Text(part, part)
|
tnode = nodes.Text(part, part)
|
||||||
if part[0] in string.ascii_letters + '_' and \
|
if part[0] in string.ascii_letters + '_' and \
|
||||||
part not in self.stopwords:
|
part not in self.stopwords:
|
||||||
@ -98,10 +98,10 @@ class CObject(ObjectDescription):
|
|||||||
def _parse_arglist(self, arglist):
|
def _parse_arglist(self, arglist):
|
||||||
# type: (unicode) -> Iterator[unicode]
|
# type: (unicode) -> Iterator[unicode]
|
||||||
while True:
|
while True:
|
||||||
m = c_funcptr_arg_sig_re.match(arglist) # type: ignore
|
m = c_funcptr_arg_sig_re.match(arglist)
|
||||||
if m:
|
if m:
|
||||||
yield m.group()
|
yield m.group()
|
||||||
arglist = c_funcptr_arg_sig_re.sub('', arglist) # type: ignore
|
arglist = c_funcptr_arg_sig_re.sub('', arglist)
|
||||||
if ',' in arglist:
|
if ',' in arglist:
|
||||||
_, arglist = arglist.split(',', 1)
|
_, arglist = arglist.split(',', 1)
|
||||||
else:
|
else:
|
||||||
@ -118,9 +118,9 @@ class CObject(ObjectDescription):
|
|||||||
# type: (unicode, addnodes.desc_signature) -> unicode
|
# type: (unicode, addnodes.desc_signature) -> unicode
|
||||||
"""Transform a C signature into RST nodes."""
|
"""Transform a C signature into RST nodes."""
|
||||||
# first try the function pointer signature regex, it's more specific
|
# first try the function pointer signature regex, it's more specific
|
||||||
m = c_funcptr_sig_re.match(sig) # type: ignore
|
m = c_funcptr_sig_re.match(sig)
|
||||||
if m is None:
|
if m is None:
|
||||||
m = c_sig_re.match(sig) # type: ignore
|
m = c_sig_re.match(sig)
|
||||||
if m is None:
|
if m is None:
|
||||||
raise ValueError('no match')
|
raise ValueError('no match')
|
||||||
rettype, name, arglist, const = m.groups()
|
rettype, name, arglist, const = m.groups()
|
||||||
@ -162,7 +162,7 @@ class CObject(ObjectDescription):
|
|||||||
arg = arg.strip()
|
arg = arg.strip()
|
||||||
param = addnodes.desc_parameter('', '', noemph=True)
|
param = addnodes.desc_parameter('', '', noemph=True)
|
||||||
try:
|
try:
|
||||||
m = c_funcptr_arg_sig_re.match(arg) # type: ignore
|
m = c_funcptr_arg_sig_re.match(arg)
|
||||||
if m:
|
if m:
|
||||||
self._parse_type(param, m.group(1) + '(')
|
self._parse_type(param, m.group(1) + '(')
|
||||||
param += nodes.emphasis(m.group(2), m.group(2))
|
param += nodes.emphasis(m.group(2), m.group(2))
|
||||||
|
@ -4010,14 +4010,20 @@ class Symbol:
|
|||||||
|
|
||||||
noDecl = []
|
noDecl = []
|
||||||
withDecl = []
|
withDecl = []
|
||||||
|
dupDecl = []
|
||||||
for s in symbols:
|
for s in symbols:
|
||||||
if s.declaration is None:
|
if s.declaration is None:
|
||||||
noDecl.append(s)
|
noDecl.append(s)
|
||||||
|
elif s.isRedeclaration:
|
||||||
|
dupDecl.append(s)
|
||||||
else:
|
else:
|
||||||
withDecl.append(s)
|
withDecl.append(s)
|
||||||
if Symbol.debug_lookup:
|
if Symbol.debug_lookup:
|
||||||
print(" #noDecl: ", len(noDecl))
|
print(" #noDecl: ", len(noDecl))
|
||||||
print(" #withDecl:", len(withDecl))
|
print(" #withDecl:", len(withDecl))
|
||||||
|
print(" #dupDecl: ", len(dupDecl))
|
||||||
|
if len(dupDecl) > 0:
|
||||||
|
assert len(withDecl) > 0
|
||||||
# assert len(noDecl) <= 1 # we should fill in symbols when they are there
|
# assert len(noDecl) <= 1 # we should fill in symbols when they are there
|
||||||
# TODO: enable assertion when we at some point find out how to do cleanup
|
# TODO: enable assertion when we at some point find out how to do cleanup
|
||||||
# With partial builds we may start with a large symbol tree stripped of declarations.
|
# With partial builds we may start with a large symbol tree stripped of declarations.
|
||||||
|
@ -157,7 +157,7 @@ class PyXrefMixin:
|
|||||||
if split_contnode:
|
if split_contnode:
|
||||||
contnode = nodes.Text(sub_target)
|
contnode = nodes.Text(sub_target)
|
||||||
|
|
||||||
if delims_re.match(sub_target): # type: ignore
|
if delims_re.match(sub_target):
|
||||||
results.append(contnode or innernode(sub_target, sub_target))
|
results.append(contnode or innernode(sub_target, sub_target))
|
||||||
else:
|
else:
|
||||||
results.append(self.make_xref(rolename, domain, sub_target,
|
results.append(self.make_xref(rolename, domain, sub_target,
|
||||||
@ -252,7 +252,7 @@ class PyObject(ObjectDescription):
|
|||||||
* it is stripped from the displayed name if present
|
* it is stripped from the displayed name if present
|
||||||
* it is added to the full name (return value) if not present
|
* it is added to the full name (return value) if not present
|
||||||
"""
|
"""
|
||||||
m = py_sig_re.match(sig) # type: ignore
|
m = py_sig_re.match(sig)
|
||||||
if m is None:
|
if m is None:
|
||||||
raise ValueError
|
raise ValueError
|
||||||
name_prefix, name, arglist, retann = m.groups()
|
name_prefix, name, arglist, retann = m.groups()
|
||||||
|
@ -77,7 +77,7 @@ def parse_directive(d):
|
|||||||
if not dir.startswith('.'):
|
if not dir.startswith('.'):
|
||||||
# Assume it is a directive without syntax
|
# Assume it is a directive without syntax
|
||||||
return (dir, '')
|
return (dir, '')
|
||||||
m = dir_sig_re.match(dir) # type: ignore
|
m = dir_sig_re.match(dir)
|
||||||
if not m:
|
if not m:
|
||||||
return (dir, '')
|
return (dir, '')
|
||||||
parsed_dir, parsed_args = m.groups()
|
parsed_dir, parsed_args = m.groups()
|
||||||
|
@ -155,10 +155,10 @@ class Cmdoption(ObjectDescription):
|
|||||||
# type: (unicode, addnodes.desc_signature) -> unicode
|
# type: (unicode, addnodes.desc_signature) -> unicode
|
||||||
"""Transform an option description into RST nodes."""
|
"""Transform an option description into RST nodes."""
|
||||||
count = 0
|
count = 0
|
||||||
firstname = ''
|
firstname = '' # type: unicode
|
||||||
for potential_option in sig.split(', '):
|
for potential_option in sig.split(', '):
|
||||||
potential_option = potential_option.strip()
|
potential_option = potential_option.strip()
|
||||||
m = option_desc_re.match(potential_option) # type: ignore
|
m = option_desc_re.match(potential_option)
|
||||||
if not m:
|
if not m:
|
||||||
logger.warning(__('Malformed option description %r, should '
|
logger.warning(__('Malformed option description %r, should '
|
||||||
'look like "opt", "-opt args", "--opt args", '
|
'look like "opt", "-opt args", "--opt args", '
|
||||||
@ -387,7 +387,7 @@ def token_xrefs(text):
|
|||||||
# type: (unicode) -> List[nodes.Node]
|
# type: (unicode) -> List[nodes.Node]
|
||||||
retnodes = []
|
retnodes = []
|
||||||
pos = 0
|
pos = 0
|
||||||
for m in token_re.finditer(text): # type: ignore
|
for m in token_re.finditer(text):
|
||||||
if m.start() > pos:
|
if m.start() > pos:
|
||||||
txt = text[pos:m.start()]
|
txt = text[pos:m.start()]
|
||||||
retnodes.append(nodes.Text(txt, txt))
|
retnodes.append(nodes.Text(txt, txt))
|
||||||
|
@ -300,7 +300,7 @@ class Documenter:
|
|||||||
# an autogenerated one
|
# an autogenerated one
|
||||||
try:
|
try:
|
||||||
explicit_modname, path, base, args, retann = \
|
explicit_modname, path, base, args, retann = \
|
||||||
py_ext_sig_re.match(self.name).groups() # type: ignore
|
py_ext_sig_re.match(self.name).groups()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
logger.warning(__('invalid signature for auto%s (%r)') % (self.objtype, self.name),
|
logger.warning(__('invalid signature for auto%s (%r)') % (self.objtype, self.name),
|
||||||
type='autodoc')
|
type='autodoc')
|
||||||
@ -314,7 +314,7 @@ class Documenter:
|
|||||||
modname = None
|
modname = None
|
||||||
parents = []
|
parents = []
|
||||||
|
|
||||||
self.modname, self.objpath = self.resolve_name(modname, parents, path, base)
|
self.modname, self.objpath = self.resolve_name(modname, parents, path, base) # type: ignore # NOQA
|
||||||
|
|
||||||
if not self.modname:
|
if not self.modname:
|
||||||
return False
|
return False
|
||||||
@ -934,7 +934,7 @@ class DocstringSignatureMixin:
|
|||||||
if not doclines:
|
if not doclines:
|
||||||
continue
|
continue
|
||||||
# match first line of docstring against signature RE
|
# match first line of docstring against signature RE
|
||||||
match = py_ext_sig_re.match(doclines[0]) # type: ignore
|
match = py_ext_sig_re.match(doclines[0])
|
||||||
if not match:
|
if not match:
|
||||||
continue
|
continue
|
||||||
exmod, path, base, args, retann = match.groups()
|
exmod, path, base, args, retann = match.groups()
|
||||||
@ -951,7 +951,7 @@ class DocstringSignatureMixin:
|
|||||||
result = args, retann
|
result = args, retann
|
||||||
# don't look any further
|
# don't look any further
|
||||||
break
|
break
|
||||||
return result
|
return result # type: ignore
|
||||||
|
|
||||||
def get_doc(self, encoding=None, ignore=1):
|
def get_doc(self, encoding=None, ignore=1):
|
||||||
# type: (unicode, int) -> List[List[unicode]]
|
# type: (unicode, int) -> List[List[unicode]]
|
||||||
|
@ -423,7 +423,7 @@ def mangle_signature(sig, max_chars=30):
|
|||||||
|
|
||||||
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
|
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
|
||||||
while s:
|
while s:
|
||||||
m = opt_re.search(s) # type: ignore
|
m = opt_re.search(s)
|
||||||
if not m:
|
if not m:
|
||||||
# The rest are arguments
|
# The rest are arguments
|
||||||
args = s.split(', ')
|
args = s.split(', ')
|
||||||
@ -481,7 +481,7 @@ def extract_summary(doc, document):
|
|||||||
summary = doc[0].strip()
|
summary = doc[0].strip()
|
||||||
else:
|
else:
|
||||||
# Try to find the "first sentence", which may span multiple lines
|
# Try to find the "first sentence", which may span multiple lines
|
||||||
sentences = periods_re.split(" ".join(doc)) # type: ignore
|
sentences = periods_re.split(" ".join(doc))
|
||||||
if len(sentences) == 1:
|
if len(sentences) == 1:
|
||||||
summary = sentences[0].strip()
|
summary = sentences[0].strip()
|
||||||
else:
|
else:
|
||||||
|
@ -302,11 +302,11 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
|
|||||||
template = None
|
template = None
|
||||||
current_module = module
|
current_module = module
|
||||||
in_autosummary = False
|
in_autosummary = False
|
||||||
base_indent = ""
|
base_indent = "" # type: unicode
|
||||||
|
|
||||||
for line in lines:
|
for line in lines:
|
||||||
if in_autosummary:
|
if in_autosummary:
|
||||||
m = toctree_arg_re.match(line) # type: ignore
|
m = toctree_arg_re.match(line)
|
||||||
if m:
|
if m:
|
||||||
toctree = m.group(1)
|
toctree = m.group(1)
|
||||||
if filename:
|
if filename:
|
||||||
@ -314,7 +314,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
|
|||||||
toctree)
|
toctree)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
m = template_arg_re.match(line) # type: ignore
|
m = template_arg_re.match(line)
|
||||||
if m:
|
if m:
|
||||||
template = m.group(1).strip()
|
template = m.group(1).strip()
|
||||||
continue
|
continue
|
||||||
@ -322,7 +322,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
|
|||||||
if line.strip().startswith(':'):
|
if line.strip().startswith(':'):
|
||||||
continue # skip options
|
continue # skip options
|
||||||
|
|
||||||
m = autosummary_item_re.match(line) # type: ignore
|
m = autosummary_item_re.match(line)
|
||||||
if m:
|
if m:
|
||||||
name = m.group(1).strip()
|
name = m.group(1).strip()
|
||||||
if name.startswith('~'):
|
if name.startswith('~'):
|
||||||
@ -338,7 +338,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
|
|||||||
|
|
||||||
in_autosummary = False
|
in_autosummary = False
|
||||||
|
|
||||||
m = autosummary_re.match(line) # type: ignore
|
m = autosummary_re.match(line)
|
||||||
if m:
|
if m:
|
||||||
in_autosummary = True
|
in_autosummary = True
|
||||||
base_indent = m.group(1)
|
base_indent = m.group(1)
|
||||||
@ -346,7 +346,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
|
|||||||
template = None
|
template = None
|
||||||
continue
|
continue
|
||||||
|
|
||||||
m = automodule_re.search(line) # type: ignore
|
m = automodule_re.search(line)
|
||||||
if m:
|
if m:
|
||||||
current_module = m.group(1).strip()
|
current_module = m.group(1).strip()
|
||||||
# recurse into the automodule docstring
|
# recurse into the automodule docstring
|
||||||
@ -354,7 +354,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
|
|||||||
current_module, filename=filename))
|
current_module, filename=filename))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
m = module_re.match(line) # type: ignore
|
m = module_re.match(line)
|
||||||
if m:
|
if m:
|
||||||
current_module = m.group(2)
|
current_module = m.group(2)
|
||||||
continue
|
continue
|
||||||
|
@ -100,7 +100,7 @@ class CoverageBuilder(Builder):
|
|||||||
# Fetch all the info from the header files
|
# Fetch all the info from the header files
|
||||||
c_objects = self.env.domaindata['c']['objects']
|
c_objects = self.env.domaindata['c']['objects']
|
||||||
for filename in self.c_sourcefiles:
|
for filename in self.c_sourcefiles:
|
||||||
undoc = set()
|
undoc = set() # type: Set[Tuple[unicode, unicode]]
|
||||||
with open(filename, 'r') as f:
|
with open(filename, 'r') as f:
|
||||||
for line in f:
|
for line in f:
|
||||||
for key, regex in self.c_regexes:
|
for key, regex in self.c_regexes:
|
||||||
|
@ -59,7 +59,7 @@ class ClickableMapDefinition:
|
|||||||
|
|
||||||
def parse(self, dot=None):
|
def parse(self, dot=None):
|
||||||
# type: (unicode) -> None
|
# type: (unicode) -> None
|
||||||
matched = self.maptag_re.match(self.content[0]) # type: ignore
|
matched = self.maptag_re.match(self.content[0])
|
||||||
if not matched:
|
if not matched:
|
||||||
raise GraphvizError('Invalid clickable map file found: %s' % self.filename)
|
raise GraphvizError('Invalid clickable map file found: %s' % self.filename)
|
||||||
|
|
||||||
@ -72,7 +72,7 @@ class ClickableMapDefinition:
|
|||||||
self.content[0] = self.content[0].replace('%3', self.id)
|
self.content[0] = self.content[0].replace('%3', self.id)
|
||||||
|
|
||||||
for line in self.content:
|
for line in self.content:
|
||||||
if self.href_re.search(line): # type: ignore
|
if self.href_re.search(line):
|
||||||
self.clickable.append(line)
|
self.clickable.append(line)
|
||||||
|
|
||||||
def generate_clickable_map(self):
|
def generate_clickable_map(self):
|
||||||
|
@ -189,7 +189,7 @@ def convert_dvi_to_png(dvipath, builder):
|
|||||||
depth = None
|
depth = None
|
||||||
if builder.config.imgmath_use_preview:
|
if builder.config.imgmath_use_preview:
|
||||||
for line in stdout.splitlines():
|
for line in stdout.splitlines():
|
||||||
matched = depth_re.match(line) # type: ignore
|
matched = depth_re.match(line)
|
||||||
if matched:
|
if matched:
|
||||||
depth = int(matched.group(1))
|
depth = int(matched.group(1))
|
||||||
write_png_depth(filename, depth)
|
write_png_depth(filename, depth)
|
||||||
|
@ -77,7 +77,7 @@ def try_import(objname):
|
|||||||
__import__(objname)
|
__import__(objname)
|
||||||
return sys.modules.get(objname) # type: ignore
|
return sys.modules.get(objname) # type: ignore
|
||||||
except (ImportError, ValueError): # ValueError,py27 -> ImportError,py3
|
except (ImportError, ValueError): # ValueError,py27 -> ImportError,py3
|
||||||
matched = module_sig_re.match(objname) # type: ignore
|
matched = module_sig_re.match(objname)
|
||||||
|
|
||||||
if not matched:
|
if not matched:
|
||||||
return None
|
return None
|
||||||
@ -88,7 +88,7 @@ def try_import(objname):
|
|||||||
return None
|
return None
|
||||||
try:
|
try:
|
||||||
__import__(modname)
|
__import__(modname)
|
||||||
return getattr(sys.modules.get(modname), attrname, None)
|
return getattr(sys.modules.get(modname), attrname, None) # type: ignore
|
||||||
except (ImportError, ValueError): # ValueError,py27 -> ImportError,py3
|
except (ImportError, ValueError): # ValueError,py27 -> ImportError,py3
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -239,7 +239,7 @@ class GoogleDocstring(UnicodeMixin):
|
|||||||
_name, _type, _desc = before, '', after # type: unicode, unicode, unicode
|
_name, _type, _desc = before, '', after # type: unicode, unicode, unicode
|
||||||
|
|
||||||
if parse_type:
|
if parse_type:
|
||||||
match = _google_typed_arg_regex.match(before) # type: ignore
|
match = _google_typed_arg_regex.match(before)
|
||||||
if match:
|
if match:
|
||||||
_name = match.group(1)
|
_name = match.group(1)
|
||||||
_type = match.group(2)
|
_type = match.group(2)
|
||||||
@ -500,9 +500,9 @@ class GoogleDocstring(UnicodeMixin):
|
|||||||
# type: (List[unicode]) -> bool
|
# type: (List[unicode]) -> bool
|
||||||
if not lines:
|
if not lines:
|
||||||
return False
|
return False
|
||||||
if _bullet_list_regex.match(lines[0]): # type: ignore
|
if _bullet_list_regex.match(lines[0]):
|
||||||
return True
|
return True
|
||||||
if _enumerated_list_regex.match(lines[0]): # type: ignore
|
if _enumerated_list_regex.match(lines[0]):
|
||||||
return True
|
return True
|
||||||
if len(lines) < 2 or lines[0].endswith('::'):
|
if len(lines) < 2 or lines[0].endswith('::'):
|
||||||
return False
|
return False
|
||||||
@ -576,7 +576,7 @@ class GoogleDocstring(UnicodeMixin):
|
|||||||
section = self._consume_section_header()
|
section = self._consume_section_header()
|
||||||
self._is_in_section = True
|
self._is_in_section = True
|
||||||
self._section_indent = self._get_current_indent()
|
self._section_indent = self._get_current_indent()
|
||||||
if _directive_regex.match(section): # type: ignore
|
if _directive_regex.match(section):
|
||||||
lines = [section] + self._consume_to_next_section()
|
lines = [section] + self._consume_to_next_section()
|
||||||
else:
|
else:
|
||||||
lines = self._sections[section.lower()](section)
|
lines = self._sections[section.lower()](section)
|
||||||
@ -704,7 +704,7 @@ class GoogleDocstring(UnicodeMixin):
|
|||||||
fields = self._consume_fields(parse_type=False, prefer_type=True)
|
fields = self._consume_fields(parse_type=False, prefer_type=True)
|
||||||
lines = [] # type: List[unicode]
|
lines = [] # type: List[unicode]
|
||||||
for _name, _type, _desc in fields:
|
for _name, _type, _desc in fields:
|
||||||
m = self._name_rgx.match(_type).groupdict() # type: ignore
|
m = self._name_rgx.match(_type).groupdict()
|
||||||
if m['role']:
|
if m['role']:
|
||||||
_type = m['name']
|
_type = m['name']
|
||||||
_type = ' ' + _type if _type else ''
|
_type = ' ' + _type if _type else ''
|
||||||
@ -766,9 +766,9 @@ class GoogleDocstring(UnicodeMixin):
|
|||||||
# type: (unicode) -> Tuple[unicode, unicode, unicode]
|
# type: (unicode) -> Tuple[unicode, unicode, unicode]
|
||||||
before_colon = []
|
before_colon = []
|
||||||
after_colon = []
|
after_colon = []
|
||||||
colon = ''
|
colon = '' # type: unicode
|
||||||
found_colon = False
|
found_colon = False
|
||||||
for i, source in enumerate(_xref_regex.split(line)): # type: ignore
|
for i, source in enumerate(_xref_regex.split(line)):
|
||||||
if found_colon:
|
if found_colon:
|
||||||
after_colon.append(source)
|
after_colon.append(source)
|
||||||
else:
|
else:
|
||||||
@ -962,7 +962,7 @@ class NumpyDocstring(GoogleDocstring):
|
|||||||
section, underline = self._line_iter.peek(2)
|
section, underline = self._line_iter.peek(2)
|
||||||
section = section.lower()
|
section = section.lower()
|
||||||
if section in self._sections and isinstance(underline, string_types):
|
if section in self._sections and isinstance(underline, string_types):
|
||||||
return bool(_numpy_section_regex.match(underline)) # type: ignore
|
return bool(_numpy_section_regex.match(underline))
|
||||||
elif self._directive_sections:
|
elif self._directive_sections:
|
||||||
if _directive_regex.match(section):
|
if _directive_regex.match(section):
|
||||||
for directive_section in self._directive_sections:
|
for directive_section in self._directive_sections:
|
||||||
@ -996,7 +996,7 @@ class NumpyDocstring(GoogleDocstring):
|
|||||||
def parse_item_name(text):
|
def parse_item_name(text):
|
||||||
# type: (unicode) -> Tuple[unicode, unicode]
|
# type: (unicode) -> Tuple[unicode, unicode]
|
||||||
"""Match ':role:`name`' or 'name'"""
|
"""Match ':role:`name`' or 'name'"""
|
||||||
m = self._name_rgx.match(text) # type: ignore
|
m = self._name_rgx.match(text)
|
||||||
if m:
|
if m:
|
||||||
g = m.groups()
|
g = m.groups()
|
||||||
if g[1] is None:
|
if g[1] is None:
|
||||||
@ -1020,7 +1020,7 @@ class NumpyDocstring(GoogleDocstring):
|
|||||||
if not line.strip():
|
if not line.strip():
|
||||||
continue
|
continue
|
||||||
|
|
||||||
m = self._name_rgx.match(line) # type: ignore
|
m = self._name_rgx.match(line)
|
||||||
if m and line[m.end():].strip().startswith(':'):
|
if m and line[m.end():].strip().startswith(':'):
|
||||||
push_item(current_func, rest)
|
push_item(current_func, rest)
|
||||||
current_func, line = line[:m.end()], line[m.end():]
|
current_func, line = line[:m.end()], line[m.end():]
|
||||||
|
@ -149,8 +149,8 @@ class PygmentsBridge:
|
|||||||
|
|
||||||
# trim doctest options if wanted
|
# trim doctest options if wanted
|
||||||
if isinstance(lexer, PythonConsoleLexer) and self.trim_doctest_flags:
|
if isinstance(lexer, PythonConsoleLexer) and self.trim_doctest_flags:
|
||||||
source = doctest.blankline_re.sub('', source) # type: ignore
|
source = doctest.blankline_re.sub('', source)
|
||||||
source = doctest.doctestopt_re.sub('', source) # type: ignore
|
source = doctest.doctestopt_re.sub('', source)
|
||||||
|
|
||||||
# highlight via Pygments
|
# highlight via Pygments
|
||||||
formatter = self.get_formatter(**kwargs)
|
formatter = self.get_formatter(**kwargs)
|
||||||
|
@ -44,7 +44,7 @@ class _TranslationProxy(UserString):
|
|||||||
if not args:
|
if not args:
|
||||||
# not called with "function" and "arguments", but a plain string
|
# not called with "function" and "arguments", but a plain string
|
||||||
return text_type(func)
|
return text_type(func)
|
||||||
return object.__new__(cls) # type: ignore
|
return object.__new__(cls)
|
||||||
|
|
||||||
def __getnewargs__(self):
|
def __getnewargs__(self):
|
||||||
# type: () -> Tuple
|
# type: () -> Tuple
|
||||||
|
@ -33,8 +33,8 @@ class ModuleAnalyzer:
|
|||||||
def for_string(cls, string, modname, srcname='<string>'):
|
def for_string(cls, string, modname, srcname='<string>'):
|
||||||
# type: (unicode, unicode, unicode) -> ModuleAnalyzer
|
# type: (unicode, unicode, unicode) -> ModuleAnalyzer
|
||||||
if isinstance(string, bytes):
|
if isinstance(string, bytes):
|
||||||
return cls(BytesIO(string), modname, srcname) # type: ignore
|
return cls(BytesIO(string), modname, srcname)
|
||||||
return cls(StringIO(string), modname, srcname, decoded=True) # type: ignore
|
return cls(StringIO(string), modname, srcname, decoded=True)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def for_file(cls, filename, modname):
|
def for_file(cls, filename, modname):
|
||||||
@ -43,7 +43,7 @@ class ModuleAnalyzer:
|
|||||||
return cls.cache['file', filename]
|
return cls.cache['file', filename]
|
||||||
try:
|
try:
|
||||||
with open(filename, 'rb') as f:
|
with open(filename, 'rb') as f:
|
||||||
obj = cls(f, modname, filename) # type: ignore
|
obj = cls(f, modname, filename)
|
||||||
cls.cache['file', filename] = obj
|
cls.cache['file', filename] = obj
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
if '.egg/' in filename:
|
if '.egg/' in filename:
|
||||||
|
@ -358,7 +358,7 @@ class SphinxComponentRegistry:
|
|||||||
# type: (unicode, Type[nodes.NodeVisitor], bool) -> None
|
# type: (unicode, Type[nodes.NodeVisitor], bool) -> None
|
||||||
logger.debug('[app] Change of translator for the %s builder.' % name)
|
logger.debug('[app] Change of translator for the %s builder.' % name)
|
||||||
if name in self.translators and not override:
|
if name in self.translators and not override:
|
||||||
raise ExtensionError(__('Translatoro for %r already exists') % name)
|
raise ExtensionError(__('Translator for %r already exists') % name)
|
||||||
self.translators[name] = translator
|
self.translators[name] = translator
|
||||||
|
|
||||||
def add_translation_handlers(self, node, **kwargs):
|
def add_translation_handlers(self, node, **kwargs):
|
||||||
|
@ -260,7 +260,7 @@ def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
|
|||||||
text = utils.unescape(text)
|
text = utils.unescape(text)
|
||||||
if typ == 'menuselection':
|
if typ == 'menuselection':
|
||||||
text = text.replace('-->', u'\N{TRIANGULAR BULLET}')
|
text = text.replace('-->', u'\N{TRIANGULAR BULLET}')
|
||||||
spans = _amp_re.split(text) # type: ignore
|
spans = _amp_re.split(text)
|
||||||
|
|
||||||
node = nodes.inline(rawtext=rawtext)
|
node = nodes.inline(rawtext=rawtext)
|
||||||
for i, span in enumerate(spans):
|
for i, span in enumerate(spans):
|
||||||
@ -342,7 +342,7 @@ _abbr_re = re.compile(r'\((.*)\)$', re.S)
|
|||||||
def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
|
def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
|
||||||
# type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
|
# type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
|
||||||
text = utils.unescape(text)
|
text = utils.unescape(text)
|
||||||
m = _abbr_re.search(text) # type: ignore
|
m = _abbr_re.search(text)
|
||||||
if m is None:
|
if m is None:
|
||||||
return [addnodes.abbreviation(text, text, **options)], []
|
return [addnodes.abbreviation(text, text, **options)], []
|
||||||
abbr = text[:m.start()].strip()
|
abbr = text[:m.start()].strip()
|
||||||
|
@ -85,7 +85,7 @@ var Stemmer = function() {
|
|||||||
at white spaces, which should be enough for most languages except CJK
|
at white spaces, which should be enough for most languages except CJK
|
||||||
languages.
|
languages.
|
||||||
"""
|
"""
|
||||||
return self._word_re.findall(input) # type: ignore
|
return self._word_re.findall(input)
|
||||||
|
|
||||||
def stem(self, word):
|
def stem(self, word):
|
||||||
# type: (unicode) -> unicode
|
# type: (unicode) -> unicode
|
||||||
|
@ -194,7 +194,7 @@ def remove_unicode_literals(s):
|
|||||||
# type: (unicode) -> unicode
|
# type: (unicode) -> unicode
|
||||||
warnings.warn('remove_unicode_literals() is deprecated.',
|
warnings.warn('remove_unicode_literals() is deprecated.',
|
||||||
RemovedInSphinx40Warning)
|
RemovedInSphinx40Warning)
|
||||||
return _unicode_literals_re.sub(lambda x: x.group(1) or x.group(2), s) # type: ignore
|
return _unicode_literals_re.sub(lambda x: x.group(1) or x.group(2), s)
|
||||||
|
|
||||||
|
|
||||||
def find_files(root, suffix=None):
|
def find_files(root, suffix=None):
|
||||||
|
@ -10,7 +10,6 @@
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import warnings
|
import warnings
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
from docutils import nodes
|
from docutils import nodes
|
||||||
from docutils.writers.docutils_xml import XMLTranslator
|
from docutils.writers.docutils_xml import XMLTranslator
|
||||||
@ -20,7 +19,8 @@ from sphinx.deprecation import RemovedInSphinx30Warning
|
|||||||
from sphinx.transforms import SphinxTransform
|
from sphinx.transforms import SphinxTransform
|
||||||
from sphinx.util import logging
|
from sphinx.util import logging
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if False:
|
||||||
|
# For type annotation
|
||||||
from typing import Any, Callable, Dict, Iterable, List, Tuple # NOQA
|
from typing import Any, Callable, Dict, Iterable, List, Tuple # NOQA
|
||||||
from docutils.parsers.rst.states import Inliner # NOQA
|
from docutils.parsers.rst.states import Inliner # NOQA
|
||||||
from docutils.writers.html4css1 import Writer # NOQA
|
from docutils.writers.html4css1 import Writer # NOQA
|
||||||
|
@ -377,7 +377,7 @@ def detect_encoding(readline):
|
|||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
matches = _coding_re.findall(line_string) # type: ignore
|
matches = _coding_re.findall(line_string)
|
||||||
if not matches:
|
if not matches:
|
||||||
return None
|
return None
|
||||||
return get_normal_name(matches[0])
|
return get_normal_name(matches[0])
|
||||||
|
@ -225,13 +225,13 @@ class sphinx_domains:
|
|||||||
class WarningStream:
|
class WarningStream:
|
||||||
def write(self, text):
|
def write(self, text):
|
||||||
# type: (unicode) -> None
|
# type: (unicode) -> None
|
||||||
matched = report_re.search(text) # type: ignore
|
matched = report_re.search(text)
|
||||||
if not matched:
|
if not matched:
|
||||||
logger.warning(text.rstrip("\r\n"))
|
logger.warning(text.rstrip("\r\n"))
|
||||||
else:
|
else:
|
||||||
location, type, level = matched.groups()
|
location, type, level = matched.groups()
|
||||||
message = report_re.sub('', text).rstrip() # type: ignore
|
message = report_re.sub('', text).rstrip()
|
||||||
logger.log(type, message, location=location)
|
logger.log(type, message, location=location) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
class LoggingReporter(Reporter):
|
class LoggingReporter(Reporter):
|
||||||
|
@ -322,10 +322,14 @@ class Signature:
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.annotations = typing.get_type_hints(subject) # type: ignore
|
if ispartial(subject):
|
||||||
|
# get_type_hints() does not support partial objects
|
||||||
|
self.annotations = {} # type: Dict[str, Any]
|
||||||
|
else:
|
||||||
|
self.annotations = typing.get_type_hints(subject) # type: ignore
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
if (3, 5, 0) <= sys.version_info < (3, 5, 3) and isinstance(exc, AttributeError):
|
if (3, 5, 0) <= sys.version_info < (3, 5, 3) and isinstance(exc, AttributeError):
|
||||||
# python 3.5.2 raises ValueError for partial objects.
|
# python 3.5.2 raises ValueError for classmethod-ized partial objects.
|
||||||
self.annotations = {}
|
self.annotations = {}
|
||||||
else:
|
else:
|
||||||
logger.warning('Invalid type annotation found on %r. Ignored: %r',
|
logger.warning('Invalid type annotation found on %r. Ignored: %r',
|
||||||
|
@ -153,7 +153,7 @@ class SphinxLoggerAdapter(logging.LoggerAdapter):
|
|||||||
|
|
||||||
def handle(self, record):
|
def handle(self, record):
|
||||||
# type: (logging.LogRecord) -> None
|
# type: (logging.LogRecord) -> None
|
||||||
self.logger.handle(record) # type: ignore
|
self.logger.handle(record)
|
||||||
|
|
||||||
|
|
||||||
class WarningStreamHandler(logging.StreamHandler):
|
class WarningStreamHandler(logging.StreamHandler):
|
||||||
@ -411,7 +411,8 @@ class SphinxLogRecordTranslator(logging.Filter):
|
|||||||
def filter(self, record): # type: ignore
|
def filter(self, record): # type: ignore
|
||||||
# type: (SphinxWarningLogRecord) -> bool
|
# type: (SphinxWarningLogRecord) -> bool
|
||||||
if isinstance(record, logging.LogRecord):
|
if isinstance(record, logging.LogRecord):
|
||||||
record.__class__ = self.LogRecordClass # force subclassing to handle location
|
# force subclassing to handle location
|
||||||
|
record.__class__ = self.LogRecordClass # type: ignore
|
||||||
|
|
||||||
location = getattr(record, 'location', None)
|
location = getattr(record, 'location', None)
|
||||||
if isinstance(location, tuple):
|
if isinstance(location, tuple):
|
||||||
|
@ -335,7 +335,7 @@ def clean_astext(node):
|
|||||||
def split_explicit_title(text):
|
def split_explicit_title(text):
|
||||||
# type: (unicode) -> Tuple[bool, unicode, unicode]
|
# type: (unicode) -> Tuple[bool, unicode, unicode]
|
||||||
"""Split role content into title and target, if given."""
|
"""Split role content into title and target, if given."""
|
||||||
match = explicit_title_re.match(text) # type: ignore
|
match = explicit_title_re.match(text)
|
||||||
if match:
|
if match:
|
||||||
return True, match.group(1), match.group(2)
|
return True, match.group(1), match.group(2)
|
||||||
return False, text, text
|
return False, text, text
|
||||||
|
@ -30,7 +30,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
def escape(text):
|
def escape(text):
|
||||||
# type: (unicode) -> unicode
|
# type: (unicode) -> unicode
|
||||||
text = symbols_re.sub(r'\\\1', text) # type: ignore
|
text = symbols_re.sub(r'\\\1', text)
|
||||||
text = re.sub(r'^\.', r'\.', text) # escape a dot at top
|
text = re.sub(r'^\.', r'\.', text) # escape a dot at top
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
@ -1679,7 +1679,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
|
|||||||
|
|
||||||
def visit_figure(self, node):
|
def visit_figure(self, node):
|
||||||
# type: (nodes.Node) -> None
|
# type: (nodes.Node) -> None
|
||||||
labels = self.hypertarget_to(node)
|
|
||||||
if self.table:
|
if self.table:
|
||||||
# TODO: support align option
|
# TODO: support align option
|
||||||
if 'width' in node:
|
if 'width' in node:
|
||||||
@ -1691,7 +1690,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
|
|||||||
self.body.append('\\begin{sphinxfigure-in-table}\n\\centering\n')
|
self.body.append('\\begin{sphinxfigure-in-table}\n\\centering\n')
|
||||||
if any(isinstance(child, nodes.caption) for child in node):
|
if any(isinstance(child, nodes.caption) for child in node):
|
||||||
self.body.append('\\capstart')
|
self.body.append('\\capstart')
|
||||||
self.context.append(labels + '\\end{sphinxfigure-in-table}\\relax\n')
|
self.context.append('\\end{sphinxfigure-in-table}\\relax\n')
|
||||||
elif node.get('align', '') in ('left', 'right'):
|
elif node.get('align', '') in ('left', 'right'):
|
||||||
length = None
|
length = None
|
||||||
if 'width' in node:
|
if 'width' in node:
|
||||||
@ -1700,7 +1699,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
|
|||||||
length = self.latex_image_length(node[0]['width'])
|
length = self.latex_image_length(node[0]['width'])
|
||||||
self.body.append('\\begin{wrapfigure}{%s}{%s}\n\\centering' %
|
self.body.append('\\begin{wrapfigure}{%s}{%s}\n\\centering' %
|
||||||
(node['align'] == 'right' and 'r' or 'l', length or '0pt'))
|
(node['align'] == 'right' and 'r' or 'l', length or '0pt'))
|
||||||
self.context.append(labels + '\\end{wrapfigure}\n')
|
self.context.append('\\end{wrapfigure}\n')
|
||||||
elif self.in_minipage:
|
elif self.in_minipage:
|
||||||
self.body.append('\n\\begin{center}')
|
self.body.append('\n\\begin{center}')
|
||||||
self.context.append('\\end{center}\n')
|
self.context.append('\\end{center}\n')
|
||||||
@ -1709,7 +1708,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
|
|||||||
self.elements['figure_align'])
|
self.elements['figure_align'])
|
||||||
if any(isinstance(child, nodes.caption) for child in node):
|
if any(isinstance(child, nodes.caption) for child in node):
|
||||||
self.body.append('\\capstart\n')
|
self.body.append('\\capstart\n')
|
||||||
self.context.append(labels + '\\end{figure}\n')
|
self.context.append('\\end{figure}\n')
|
||||||
|
|
||||||
def depart_figure(self, node):
|
def depart_figure(self, node):
|
||||||
# type: (nodes.Node) -> None
|
# type: (nodes.Node) -> None
|
||||||
@ -1730,6 +1729,9 @@ class LaTeXTranslator(nodes.NodeVisitor):
|
|||||||
def depart_caption(self, node):
|
def depart_caption(self, node):
|
||||||
# type: (nodes.Node) -> None
|
# type: (nodes.Node) -> None
|
||||||
self.body.append('}')
|
self.body.append('}')
|
||||||
|
if isinstance(node.parent, nodes.figure):
|
||||||
|
labels = self.hypertarget_to(node.parent)
|
||||||
|
self.body.append(labels)
|
||||||
self.in_caption -= 1
|
self.in_caption -= 1
|
||||||
|
|
||||||
def visit_legend(self, node):
|
def visit_legend(self, node):
|
||||||
|
@ -16,6 +16,8 @@ figures
|
|||||||
|
|
||||||
labeled figure
|
labeled figure
|
||||||
|
|
||||||
|
with a legend
|
||||||
|
|
||||||
code-blocks
|
code-blocks
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
|
@ -1296,7 +1296,8 @@ def test_latex_labels(app, status, warning):
|
|||||||
r'\label{\detokenize{index:figure1}}'
|
r'\label{\detokenize{index:figure1}}'
|
||||||
r'\end{figure}' in result)
|
r'\end{figure}' in result)
|
||||||
assert (r'\caption{labeled figure}'
|
assert (r'\caption{labeled figure}'
|
||||||
r'\label{\detokenize{index:figure3}}'
|
'\\label{\detokenize{index:figure3}}\n'
|
||||||
|
'\\begin{sphinxlegend}\nwith a legend\n\\end{sphinxlegend}\n'
|
||||||
r'\end{figure}' in result)
|
r'\end{figure}' in result)
|
||||||
|
|
||||||
# code-blocks
|
# code-blocks
|
||||||
|
Loading…
Reference in New Issue
Block a user