mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Merged birkenfeld/sphinx into default
This commit is contained in:
commit
409e29023b
25
CHANGES
25
CHANGES
@ -6,11 +6,36 @@ Features added
|
|||||||
|
|
||||||
* Builders: rebuild i18n target document when catalog updated.
|
* Builders: rebuild i18n target document when catalog updated.
|
||||||
|
|
||||||
|
Incompatible changes
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
* PR#144, #1182: Force timezone offset to LocalTimeZone on POT-Creation-Date
|
||||||
|
that was generated by gettext builder. Thanks to masklinn and Jakub Wilk.
|
||||||
|
|
||||||
Bugs fixed
|
Bugs fixed
|
||||||
----------
|
----------
|
||||||
|
|
||||||
|
* #1173: Newest Jinja2 2.7 breaks Python version compatibilities because the
|
||||||
|
Jinja2 2.7 drops support for Python 2.5, 3.1, 3.2. Thanks to Alexander Dupuy.
|
||||||
|
* #1160: Citation target missing cause AssertionError.
|
||||||
|
* #1157: Combination of 'globaltoc.html' and hidden toctree cause exception.
|
||||||
* Fix: 'make gettext' cause UnicodeDecodeError when templates contain utf-8
|
* Fix: 'make gettext' cause UnicodeDecodeError when templates contain utf-8
|
||||||
encoded string.
|
encoded string.
|
||||||
|
* #1162, PR#139: singlehtml builder doesn't copy images to _images/.
|
||||||
|
* PR#141, #982: Avoid crash when writing PNG file using Python 3. Thanks to
|
||||||
|
Marcin Wojdyr.
|
||||||
|
* PR#145: In parallel builds, sphinx drops second document file to write.
|
||||||
|
Thanks to tychoish.
|
||||||
|
* #1188: sphinx-quickstart raises UnicodeEncodeError if "Project version"
|
||||||
|
includes non-ASCII characters.
|
||||||
|
* #1189: "Title underline is short" WARNING is given when using fullwidth
|
||||||
|
characters to "Project name" on quickstart.
|
||||||
|
* #1190: Output TeX/texinfo/man filename has no basename (only extention)
|
||||||
|
when using multibyte characters to "Project name" on quickstart.
|
||||||
|
* #1090: Fix multiple cross references (term, ref, doc) in the same line
|
||||||
|
return the same link with i18n.
|
||||||
|
* #1193: Fix multiple link references in the same line return the same
|
||||||
|
link with i18n.
|
||||||
|
|
||||||
|
|
||||||
Release 1.2 (beta1 released Mar 31, 2013)
|
Release 1.2 (beta1 released Mar 31, 2013)
|
||||||
|
1
EXAMPLES
1
EXAMPLES
@ -80,6 +80,7 @@ Documentation using a customized version of the default theme
|
|||||||
* Mayavi: http://code.enthought.com/projects/mayavi/docs/development/html/mayavi
|
* Mayavi: http://code.enthought.com/projects/mayavi/docs/development/html/mayavi
|
||||||
* NOC: http://redmine.nocproject.org/projects/noc
|
* NOC: http://redmine.nocproject.org/projects/noc
|
||||||
* NumPy: http://docs.scipy.org/doc/numpy/reference/
|
* NumPy: http://docs.scipy.org/doc/numpy/reference/
|
||||||
|
* OpenCV: http://docs.opencv.org/
|
||||||
* Peach^3: http://peach3.nl/doc/latest/userdoc/
|
* Peach^3: http://peach3.nl/doc/latest/userdoc/
|
||||||
* PyLit: http://pylit.berlios.de/
|
* PyLit: http://pylit.berlios.de/
|
||||||
* Sage: http://sagemath.org/doc/
|
* Sage: http://sagemath.org/doc/
|
||||||
|
9
setup.py
9
setup.py
@ -44,10 +44,15 @@ A development egg can be found `here
|
|||||||
<http://bitbucket.org/birkenfeld/sphinx/get/tip.gz#egg=Sphinx-dev>`_.
|
<http://bitbucket.org/birkenfeld/sphinx/get/tip.gz#egg=Sphinx-dev>`_.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
requires = ['Pygments>=1.2', 'Jinja2>=2.3', 'docutils>=0.7']
|
requires = ['Pygments>=1.2', 'docutils>=0.7']
|
||||||
|
|
||||||
if sys.version_info[:3] >= (3, 3, 0):
|
if sys.version_info[:3] >= (3, 3, 0):
|
||||||
requires[2] = 'docutils>=0.10'
|
requires[1] = 'docutils>=0.10'
|
||||||
|
|
||||||
|
if sys.version_info < (2, 6) or (3, 0) <= sys.version_info < (3, 3):
|
||||||
|
requires.append('Jinja2>=2.3,<2.7')
|
||||||
|
else:
|
||||||
|
requires.append('Jinja2>=2.3')
|
||||||
|
|
||||||
if sys.version_info < (2, 5):
|
if sys.version_info < (2, 5):
|
||||||
print('ERROR: Sphinx requires at least Python 2.5 to run.')
|
print('ERROR: Sphinx requires at least Python 2.5 to run.')
|
||||||
|
@ -355,7 +355,6 @@ class Builder(object):
|
|||||||
self.write_doc_serialized(firstname, doctree)
|
self.write_doc_serialized(firstname, doctree)
|
||||||
self.write_doc(firstname, doctree)
|
self.write_doc(firstname, doctree)
|
||||||
# for the rest, determine how many documents to write in one go
|
# for the rest, determine how many documents to write in one go
|
||||||
docnames = docnames[1:]
|
|
||||||
ndocs = len(docnames)
|
ndocs = len(docnames)
|
||||||
chunksize = min(ndocs // nproc, 10)
|
chunksize = min(ndocs // nproc, 10)
|
||||||
nchunks, rest = divmod(ndocs, chunksize)
|
nchunks, rest = divmod(ndocs, chunksize)
|
||||||
|
@ -11,7 +11,8 @@
|
|||||||
|
|
||||||
from os import path, walk
|
from os import path, walk
|
||||||
from codecs import open
|
from codecs import open
|
||||||
from datetime import datetime
|
from time import time
|
||||||
|
from datetime import datetime, tzinfo, timedelta
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
@ -107,6 +108,25 @@ class I18nBuilder(Builder):
|
|||||||
catalog.add(m, node)
|
catalog.add(m, node)
|
||||||
|
|
||||||
|
|
||||||
|
timestamp = time()
|
||||||
|
|
||||||
|
class LocalTimeZone(tzinfo):
|
||||||
|
|
||||||
|
def __init__(self, *args, **kw):
|
||||||
|
super(LocalTimeZone, self).__init__(*args, **kw)
|
||||||
|
tzdelta = datetime.fromtimestamp(timestamp) - \
|
||||||
|
datetime.utcfromtimestamp(timestamp)
|
||||||
|
self.tzdelta = tzdelta
|
||||||
|
|
||||||
|
def utcoffset(self, dt):
|
||||||
|
return self.tzdelta
|
||||||
|
|
||||||
|
def dst(self, dt):
|
||||||
|
return timedelta(0)
|
||||||
|
|
||||||
|
ltz = LocalTimeZone()
|
||||||
|
|
||||||
|
|
||||||
class MessageCatalogBuilder(I18nBuilder):
|
class MessageCatalogBuilder(I18nBuilder):
|
||||||
"""
|
"""
|
||||||
Builds gettext-style message catalogs (.pot files).
|
Builds gettext-style message catalogs (.pot files).
|
||||||
@ -154,8 +174,8 @@ class MessageCatalogBuilder(I18nBuilder):
|
|||||||
version = self.config.version,
|
version = self.config.version,
|
||||||
copyright = self.config.copyright,
|
copyright = self.config.copyright,
|
||||||
project = self.config.project,
|
project = self.config.project,
|
||||||
# XXX should supply tz
|
ctime = datetime.fromtimestamp(
|
||||||
ctime = datetime.now().strftime('%Y-%m-%d %H:%M%z'),
|
timestamp, ltz).strftime('%Y-%m-%d %H:%M%z'),
|
||||||
)
|
)
|
||||||
for textdomain, catalog in self.status_iterator(
|
for textdomain, catalog in self.status_iterator(
|
||||||
self.catalogs.iteritems(), "writing message catalogs... ",
|
self.catalogs.iteritems(), "writing message catalogs... ",
|
||||||
|
@ -938,6 +938,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
|
|||||||
doctree = self.assemble_doctree()
|
doctree = self.assemble_doctree()
|
||||||
self.info()
|
self.info()
|
||||||
self.info(bold('writing... '), nonl=True)
|
self.info(bold('writing... '), nonl=True)
|
||||||
|
self.write_doc_serialized(self.config.master_doc, doctree)
|
||||||
self.write_doc(self.config.master_doc, doctree)
|
self.write_doc(self.config.master_doc, doctree)
|
||||||
self.info('done')
|
self.info('done')
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ class XMLBuilder(Builder):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def get_target_uri(self, docname, typ=None):
|
def get_target_uri(self, docname, typ=None):
|
||||||
return ''
|
return docname
|
||||||
|
|
||||||
def prepare_writing(self, docnames):
|
def prepare_writing(self, docnames):
|
||||||
self.writer = self._writer_class(self)
|
self.writer = self._writer_class(self)
|
||||||
|
@ -205,6 +205,42 @@ class OptionXRefRole(XRefRole):
|
|||||||
return title, target
|
return title, target
|
||||||
|
|
||||||
|
|
||||||
|
def make_termnodes_from_paragraph_node(env, node, new_id=None):
|
||||||
|
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
|
||||||
|
objects = env.domaindata['std']['objects']
|
||||||
|
|
||||||
|
termtext = node.astext()
|
||||||
|
if new_id is None:
|
||||||
|
new_id = 'term-' + nodes.make_id(termtext)
|
||||||
|
if new_id in gloss_entries:
|
||||||
|
new_id = 'term-' + str(len(gloss_entries))
|
||||||
|
gloss_entries.add(new_id)
|
||||||
|
objects['term', termtext.lower()] = env.docname, new_id
|
||||||
|
|
||||||
|
# add an index entry too
|
||||||
|
indexnode = addnodes.index()
|
||||||
|
indexnode['entries'] = [('single', termtext, new_id, 'main')]
|
||||||
|
new_termnodes = []
|
||||||
|
new_termnodes.append(indexnode)
|
||||||
|
new_termnodes.extend(node.children)
|
||||||
|
new_termnodes.append(addnodes.termsep())
|
||||||
|
for termnode in new_termnodes:
|
||||||
|
termnode.source, termnode.line = node.source, node.line
|
||||||
|
|
||||||
|
return new_id, termtext, new_termnodes
|
||||||
|
|
||||||
|
|
||||||
|
def make_term_from_paragraph_node(termnodes, ids):
|
||||||
|
# make a single "term" node with all the terms, separated by termsep
|
||||||
|
# nodes (remove the dangling trailing separator)
|
||||||
|
term = nodes.term('', '', *termnodes[:-1])
|
||||||
|
term.source, term.line = termnodes[0].source, termnodes[0].line
|
||||||
|
term.rawsource = term.astext()
|
||||||
|
term['ids'].extend(ids)
|
||||||
|
term['names'].extend(ids)
|
||||||
|
return term
|
||||||
|
|
||||||
|
|
||||||
class Glossary(Directive):
|
class Glossary(Directive):
|
||||||
"""
|
"""
|
||||||
Directive to create a glossary with cross-reference targets for :term:
|
Directive to create a glossary with cross-reference targets for :term:
|
||||||
@ -221,8 +257,6 @@ class Glossary(Directive):
|
|||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
env = self.state.document.settings.env
|
env = self.state.document.settings.env
|
||||||
objects = env.domaindata['std']['objects']
|
|
||||||
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
|
|
||||||
node = addnodes.glossary()
|
node = addnodes.glossary()
|
||||||
node.document = self.state.document
|
node.document = self.state.document
|
||||||
|
|
||||||
@ -296,31 +330,15 @@ class Glossary(Directive):
|
|||||||
# get a text-only representation of the term and register it
|
# get a text-only representation of the term and register it
|
||||||
# as a cross-reference target
|
# as a cross-reference target
|
||||||
tmp = nodes.paragraph('', '', *res[0])
|
tmp = nodes.paragraph('', '', *res[0])
|
||||||
termtext = tmp.astext()
|
tmp.source = source
|
||||||
new_id = 'term-' + nodes.make_id(termtext)
|
tmp.line = lineno
|
||||||
if new_id in gloss_entries:
|
new_id, termtext, new_termnodes = \
|
||||||
new_id = 'term-' + str(len(gloss_entries))
|
make_termnodes_from_paragraph_node(env, tmp)
|
||||||
gloss_entries.add(new_id)
|
|
||||||
ids.append(new_id)
|
ids.append(new_id)
|
||||||
objects['term', termtext.lower()] = env.docname, new_id
|
|
||||||
termtexts.append(termtext)
|
termtexts.append(termtext)
|
||||||
# add an index entry too
|
|
||||||
indexnode = addnodes.index()
|
|
||||||
indexnode['entries'] = [('single', termtext, new_id, 'main')]
|
|
||||||
new_termnodes = []
|
|
||||||
new_termnodes.append(indexnode)
|
|
||||||
new_termnodes.extend(res[0])
|
|
||||||
new_termnodes.append(addnodes.termsep())
|
|
||||||
for termnode in new_termnodes:
|
|
||||||
termnode.source, termnode.line = source, lineno
|
|
||||||
termnodes.extend(new_termnodes)
|
termnodes.extend(new_termnodes)
|
||||||
# make a single "term" node with all the terms, separated by termsep
|
|
||||||
# nodes (remove the dangling trailing separator)
|
term = make_term_from_paragraph_node(termnodes, ids)
|
||||||
term = nodes.term('', '', *termnodes[:-1])
|
|
||||||
term.source, term.line = termnodes[0].source, termnodes[0].line
|
|
||||||
term.rawsource = term.astext()
|
|
||||||
term['ids'].extend(ids)
|
|
||||||
term['names'].extend(ids)
|
|
||||||
term += system_messages
|
term += system_messages
|
||||||
|
|
||||||
defnode = nodes.definition()
|
defnode = nodes.definition()
|
||||||
|
@ -1049,7 +1049,8 @@ class BuildEnvironment:
|
|||||||
for toctreenode in doctree.traverse(addnodes.toctree):
|
for toctreenode in doctree.traverse(addnodes.toctree):
|
||||||
toctree = self.resolve_toctree(docname, builder, toctreenode,
|
toctree = self.resolve_toctree(docname, builder, toctreenode,
|
||||||
prune=True, **kwds)
|
prune=True, **kwds)
|
||||||
toctrees.append(toctree)
|
if toctree:
|
||||||
|
toctrees.append(toctree)
|
||||||
if not toctrees:
|
if not toctrees:
|
||||||
return None
|
return None
|
||||||
result = toctrees[0]
|
result = toctrees[0]
|
||||||
@ -1353,6 +1354,10 @@ class BuildEnvironment:
|
|||||||
if not isinstance(contnode, nodes.Element):
|
if not isinstance(contnode, nodes.Element):
|
||||||
del node['ids'][:]
|
del node['ids'][:]
|
||||||
raise
|
raise
|
||||||
|
elif 'ids' in node:
|
||||||
|
# remove ids attribute that annotated at
|
||||||
|
# transforms.CitationReference.apply.
|
||||||
|
del node['ids'][:]
|
||||||
# no new node found? try the missing-reference event
|
# no new node found? try the missing-reference event
|
||||||
if newnode is None:
|
if newnode is None:
|
||||||
newnode = builder.app.emit_firstresult(
|
newnode = builder.app.emit_firstresult(
|
||||||
|
@ -14,6 +14,8 @@ from os import path
|
|||||||
|
|
||||||
TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
|
TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
|
||||||
|
|
||||||
|
from docutils.utils import column_width
|
||||||
|
|
||||||
from sphinx import __version__
|
from sphinx import __version__
|
||||||
from sphinx.util.osutil import make_filename
|
from sphinx.util.osutil import make_filename
|
||||||
from sphinx.util.console import purple, bold, red, turquoise, \
|
from sphinx.util.console import purple, bold, red, turquoise, \
|
||||||
@ -43,7 +45,8 @@ QUICKSTART_CONF += u'''\
|
|||||||
# %(project)s documentation build configuration file, created by
|
# %(project)s documentation build configuration file, created by
|
||||||
# sphinx-quickstart on %(now)s.
|
# sphinx-quickstart on %(now)s.
|
||||||
#
|
#
|
||||||
# This file is execfile()d with the current directory set to its containing dir.
|
# This file is execfile()d with the current directory set to its
|
||||||
|
# containing dir.
|
||||||
#
|
#
|
||||||
# Note that not all possible configuration values are present in this
|
# Note that not all possible configuration values are present in this
|
||||||
# autogenerated file.
|
# autogenerated file.
|
||||||
@ -51,20 +54,22 @@ QUICKSTART_CONF += u'''\
|
|||||||
# All configuration values have a default; values that are commented out
|
# All configuration values have a default; values that are commented out
|
||||||
# serve to show the default.
|
# serve to show the default.
|
||||||
|
|
||||||
import sys, os
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
# If extensions (or modules to document with autodoc) are in another directory,
|
# If extensions (or modules to document with autodoc) are in another directory,
|
||||||
# add these directories to sys.path here. If the directory is relative to the
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||||
#sys.path.insert(0, os.path.abspath('.'))
|
#sys.path.insert(0, os.path.abspath('.'))
|
||||||
|
|
||||||
# -- General configuration -----------------------------------------------------
|
# -- General configuration ------------------------------------------------
|
||||||
|
|
||||||
# If your documentation needs a minimal Sphinx version, state it here.
|
# If your documentation needs a minimal Sphinx version, state it here.
|
||||||
#needs_sphinx = '1.0'
|
#needs_sphinx = '1.0'
|
||||||
|
|
||||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
|
# ones.
|
||||||
extensions = [%(extensions)s]
|
extensions = [%(extensions)s]
|
||||||
|
|
||||||
# Add any paths that contain templates here, relative to this directory.
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
@ -106,7 +111,8 @@ release = '%(release_str)s'
|
|||||||
# directories to ignore when looking for source files.
|
# directories to ignore when looking for source files.
|
||||||
exclude_patterns = [%(exclude_patterns)s]
|
exclude_patterns = [%(exclude_patterns)s]
|
||||||
|
|
||||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
# The reST default role (used for this markup: `text`) to use for all
|
||||||
|
# documents.
|
||||||
#default_role = None
|
#default_role = None
|
||||||
|
|
||||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||||
@ -130,7 +136,7 @@ pygments_style = 'sphinx'
|
|||||||
#keep_warnings = False
|
#keep_warnings = False
|
||||||
|
|
||||||
|
|
||||||
# -- Options for HTML output ---------------------------------------------------
|
# -- Options for HTML output ----------------------------------------------
|
||||||
|
|
||||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||||
# a list of builtin themes.
|
# a list of builtin themes.
|
||||||
@ -210,7 +216,7 @@ html_static_path = ['%(dot)sstatic']
|
|||||||
htmlhelp_basename = '%(project_fn)sdoc'
|
htmlhelp_basename = '%(project_fn)sdoc'
|
||||||
|
|
||||||
|
|
||||||
# -- Options for LaTeX output --------------------------------------------------
|
# -- Options for LaTeX output ---------------------------------------------
|
||||||
|
|
||||||
latex_elements = {
|
latex_elements = {
|
||||||
# The paper size ('letterpaper' or 'a4paper').
|
# The paper size ('letterpaper' or 'a4paper').
|
||||||
@ -224,7 +230,8 @@ latex_elements = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Grouping the document tree into LaTeX files. List of tuples
|
# Grouping the document tree into LaTeX files. List of tuples
|
||||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
# (source start file, target name, title,
|
||||||
|
# author, documentclass [howto/manual]).
|
||||||
latex_documents = [
|
latex_documents = [
|
||||||
('%(master_str)s', '%(project_fn)s.tex', u'%(project_doc_texescaped_str)s',
|
('%(master_str)s', '%(project_fn)s.tex', u'%(project_doc_texescaped_str)s',
|
||||||
u'%(author_texescaped_str)s', 'manual'),
|
u'%(author_texescaped_str)s', 'manual'),
|
||||||
@ -251,7 +258,7 @@ latex_documents = [
|
|||||||
#latex_domain_indices = True
|
#latex_domain_indices = True
|
||||||
|
|
||||||
|
|
||||||
# -- Options for manual page output --------------------------------------------
|
# -- Options for manual page output ---------------------------------------
|
||||||
|
|
||||||
# One entry per manual page. List of tuples
|
# One entry per manual page. List of tuples
|
||||||
# (source start file, name, description, authors, manual section).
|
# (source start file, name, description, authors, manual section).
|
||||||
@ -264,7 +271,7 @@ man_pages = [
|
|||||||
#man_show_urls = False
|
#man_show_urls = False
|
||||||
|
|
||||||
|
|
||||||
# -- Options for Texinfo output ------------------------------------------------
|
# -- Options for Texinfo output -------------------------------------------
|
||||||
|
|
||||||
# Grouping the document tree into Texinfo files. List of tuples
|
# Grouping the document tree into Texinfo files. List of tuples
|
||||||
# (source start file, target name, title, author,
|
# (source start file, target name, title, author,
|
||||||
@ -290,7 +297,7 @@ texinfo_documents = [
|
|||||||
|
|
||||||
EPUB_CONFIG = u'''
|
EPUB_CONFIG = u'''
|
||||||
|
|
||||||
# -- Options for Epub output ---------------------------------------------------
|
# -- Options for Epub output ----------------------------------------------
|
||||||
|
|
||||||
# Bibliographic Dublin Core info.
|
# Bibliographic Dublin Core info.
|
||||||
epub_title = u'%(project_str)s'
|
epub_title = u'%(project_str)s'
|
||||||
@ -861,9 +868,24 @@ def ok(x):
|
|||||||
def do_prompt(d, key, text, default=None, validator=nonempty):
|
def do_prompt(d, key, text, default=None, validator=nonempty):
|
||||||
while True:
|
while True:
|
||||||
if default:
|
if default:
|
||||||
prompt = purple(PROMPT_PREFIX + '%s [%s]: ' % (text, default))
|
prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default)
|
||||||
else:
|
else:
|
||||||
prompt = purple(PROMPT_PREFIX + text + ': ')
|
prompt = PROMPT_PREFIX + text + ': '
|
||||||
|
if sys.version_info < (3, 0):
|
||||||
|
# for Python 2.x, try to get a Unicode string out of it
|
||||||
|
if prompt.encode('ascii', 'replace').decode('ascii', 'replace') \
|
||||||
|
!= prompt:
|
||||||
|
if TERM_ENCODING:
|
||||||
|
prompt = prompt.encode(TERM_ENCODING)
|
||||||
|
else:
|
||||||
|
print turquoise('* Note: non-ASCII default value provided '
|
||||||
|
'and terminal encoding unknown -- assuming '
|
||||||
|
'UTF-8 or Latin-1.')
|
||||||
|
try:
|
||||||
|
prompt = prompt.encode('utf-8')
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
prompt = prompt.encode('latin1')
|
||||||
|
prompt = purple(prompt)
|
||||||
x = term_input(prompt).strip()
|
x = term_input(prompt).strip()
|
||||||
if default and not x:
|
if default and not x:
|
||||||
x = default
|
x = default
|
||||||
@ -1058,6 +1080,7 @@ def generate(d, overwrite=True, silent=False):
|
|||||||
"""Generate project based on values in *d*."""
|
"""Generate project based on values in *d*."""
|
||||||
|
|
||||||
texescape.init()
|
texescape.init()
|
||||||
|
indent = ' ' * 4
|
||||||
|
|
||||||
if 'mastertoctree' not in d:
|
if 'mastertoctree' not in d:
|
||||||
d['mastertoctree'] = ''
|
d['mastertoctree'] = ''
|
||||||
@ -1067,12 +1090,16 @@ def generate(d, overwrite=True, silent=False):
|
|||||||
d['project_fn'] = make_filename(d['project'])
|
d['project_fn'] = make_filename(d['project'])
|
||||||
d['project_manpage'] = d['project_fn'].lower()
|
d['project_manpage'] = d['project_fn'].lower()
|
||||||
d['now'] = time.asctime()
|
d['now'] = time.asctime()
|
||||||
d['project_underline'] = len(d['project']) * '='
|
d['project_underline'] = column_width(d['project']) * '='
|
||||||
d['extensions'] = ', '.join(
|
extensions = (',\n' + indent).join(
|
||||||
repr('sphinx.ext.' + name)
|
repr('sphinx.ext.' + name)
|
||||||
for name in ('autodoc', 'doctest', 'intersphinx', 'todo', 'coverage',
|
for name in ('autodoc', 'doctest', 'intersphinx', 'todo', 'coverage',
|
||||||
'pngmath', 'mathjax', 'ifconfig', 'viewcode')
|
'pngmath', 'mathjax', 'ifconfig', 'viewcode')
|
||||||
if d.get('ext_' + name))
|
if d.get('ext_' + name))
|
||||||
|
if extensions:
|
||||||
|
d['extensions'] = '\n' + indent + extensions + ',\n'
|
||||||
|
else:
|
||||||
|
d['extensions'] = extensions
|
||||||
d['copyright'] = time.strftime('%Y') + ', ' + d['author']
|
d['copyright'] = time.strftime('%Y') + ', ' + d['author']
|
||||||
d['author_texescaped'] = unicode(d['author']).\
|
d['author_texescaped'] = unicode(d['author']).\
|
||||||
translate(texescape.tex_escape_map)
|
translate(texescape.tex_escape_map)
|
||||||
|
@ -24,6 +24,10 @@ from sphinx.util.nodes import traverse_translatable_index, extract_messages
|
|||||||
from sphinx.util.osutil import ustrftime, find_catalog
|
from sphinx.util.osutil import ustrftime, find_catalog
|
||||||
from sphinx.util.compat import docutils_version
|
from sphinx.util.compat import docutils_version
|
||||||
from sphinx.util.pycompat import all
|
from sphinx.util.pycompat import all
|
||||||
|
from sphinx.domains.std import (
|
||||||
|
make_term_from_paragraph_node,
|
||||||
|
make_termnodes_from_paragraph_node,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
default_substitutions = set([
|
default_substitutions = set([
|
||||||
@ -173,6 +177,7 @@ class Locale(Transform):
|
|||||||
|
|
||||||
parser = RSTParser()
|
parser = RSTParser()
|
||||||
|
|
||||||
|
#phase1: replace reference ids with translated names
|
||||||
for node, msg in extract_messages(self.document):
|
for node, msg in extract_messages(self.document):
|
||||||
msgstr = catalog.gettext(msg)
|
msgstr = catalog.gettext(msg)
|
||||||
# XXX add marker to untranslated parts
|
# XXX add marker to untranslated parts
|
||||||
@ -195,6 +200,102 @@ class Locale(Transform):
|
|||||||
if not isinstance(patch, nodes.paragraph):
|
if not isinstance(patch, nodes.paragraph):
|
||||||
continue # skip for now
|
continue # skip for now
|
||||||
|
|
||||||
|
processed = False # skip flag
|
||||||
|
|
||||||
|
# update title(section) target name-id mapping
|
||||||
|
if isinstance(node, nodes.title):
|
||||||
|
section_node = node.parent
|
||||||
|
new_name = nodes.fully_normalize_name(patch.astext())
|
||||||
|
old_name = nodes.fully_normalize_name(node.astext())
|
||||||
|
|
||||||
|
if old_name != new_name:
|
||||||
|
# if name would be changed, replace node names and
|
||||||
|
# document nameids mapping with new name.
|
||||||
|
names = section_node.setdefault('names', [])
|
||||||
|
names.append(new_name)
|
||||||
|
if old_name in names:
|
||||||
|
names.remove(old_name)
|
||||||
|
|
||||||
|
_id = self.document.nameids.get(old_name, None)
|
||||||
|
explicit = self.document.nametypes.get(old_name, None)
|
||||||
|
|
||||||
|
# * if explicit: _id is label. title node need another id.
|
||||||
|
# * if not explicit:
|
||||||
|
#
|
||||||
|
# * _id is None:
|
||||||
|
#
|
||||||
|
# _id is None means _id was duplicated.
|
||||||
|
# old_name entry still exists in nameids and
|
||||||
|
# nametypes for another duplicated entry.
|
||||||
|
#
|
||||||
|
# * _id is provided: bellow process
|
||||||
|
if not explicit and _id:
|
||||||
|
# _id was not duplicated.
|
||||||
|
# remove old_name entry from document ids database
|
||||||
|
# to reuse original _id.
|
||||||
|
self.document.nameids.pop(old_name, None)
|
||||||
|
self.document.nametypes.pop(old_name, None)
|
||||||
|
self.document.ids.pop(_id, None)
|
||||||
|
|
||||||
|
# re-entry with new named section node.
|
||||||
|
self.document.note_implicit_target(
|
||||||
|
section_node, section_node)
|
||||||
|
|
||||||
|
processed = True
|
||||||
|
|
||||||
|
# glossary terms update refid
|
||||||
|
if isinstance(node, nodes.term):
|
||||||
|
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
|
||||||
|
ids = []
|
||||||
|
termnodes = []
|
||||||
|
for _id in node['names']:
|
||||||
|
if _id in gloss_entries:
|
||||||
|
gloss_entries.remove(_id)
|
||||||
|
_id, _, new_termnodes = \
|
||||||
|
make_termnodes_from_paragraph_node(env, patch, _id)
|
||||||
|
ids.append(_id)
|
||||||
|
termnodes.extend(new_termnodes)
|
||||||
|
|
||||||
|
if termnodes and ids:
|
||||||
|
patch = make_term_from_paragraph_node(termnodes, ids)
|
||||||
|
node['ids'] = patch['ids']
|
||||||
|
node['names'] = patch['names']
|
||||||
|
processed = True
|
||||||
|
|
||||||
|
# update leaves with processed nodes
|
||||||
|
if processed:
|
||||||
|
for child in patch.children:
|
||||||
|
child.parent = node
|
||||||
|
node.children = patch.children
|
||||||
|
node['translated'] = True
|
||||||
|
|
||||||
|
|
||||||
|
#phase2: translation
|
||||||
|
for node, msg in extract_messages(self.document):
|
||||||
|
if node.get('translated', False):
|
||||||
|
continue
|
||||||
|
|
||||||
|
msgstr = catalog.gettext(msg)
|
||||||
|
# XXX add marker to untranslated parts
|
||||||
|
if not msgstr or msgstr == msg: # as-of-yet untranslated
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Avoid "Literal block expected; none found." warnings.
|
||||||
|
# If msgstr ends with '::' then it cause warning message at
|
||||||
|
# parser.parse() processing.
|
||||||
|
# literal-block-warning is only appear in avobe case.
|
||||||
|
if msgstr.strip().endswith('::'):
|
||||||
|
msgstr += '\n\n dummy literal'
|
||||||
|
# dummy literal node will discard by 'patch = patch[0]'
|
||||||
|
|
||||||
|
patch = new_document(source, settings)
|
||||||
|
CustomLocaleReporter(node.source, node.line).set_reporter(patch)
|
||||||
|
parser.parse(msgstr, patch)
|
||||||
|
patch = patch[0]
|
||||||
|
# XXX doctest and other block markup
|
||||||
|
if not isinstance(patch, nodes.paragraph):
|
||||||
|
continue # skip for now
|
||||||
|
|
||||||
# auto-numbered foot note reference should use original 'ids'.
|
# auto-numbered foot note reference should use original 'ids'.
|
||||||
def is_autonumber_footnote_ref(node):
|
def is_autonumber_footnote_ref(node):
|
||||||
return isinstance(node, nodes.footnote_reference) and \
|
return isinstance(node, nodes.footnote_reference) and \
|
||||||
@ -211,32 +312,31 @@ class Locale(Transform):
|
|||||||
self.document.autofootnote_refs.remove(old)
|
self.document.autofootnote_refs.remove(old)
|
||||||
self.document.note_autofootnote_ref(new)
|
self.document.note_autofootnote_ref(new)
|
||||||
|
|
||||||
# reference should use original 'refname'.
|
# reference should use new (translated) 'refname'.
|
||||||
# * reference target ".. _Python: ..." is not translatable.
|
# * reference target ".. _Python: ..." is not translatable.
|
||||||
# * section refname is not translatable.
|
# * use translated refname for section refname.
|
||||||
# * inline reference "`Python <...>`_" has no 'refname'.
|
# * inline reference "`Python <...>`_" has no 'refname'.
|
||||||
def is_refnamed_ref(node):
|
def is_refnamed_ref(node):
|
||||||
return isinstance(node, nodes.reference) and \
|
return isinstance(node, nodes.reference) and \
|
||||||
'refname' in node
|
'refname' in node
|
||||||
old_refs = node.traverse(is_refnamed_ref)
|
old_refs = node.traverse(is_refnamed_ref)
|
||||||
new_refs = patch.traverse(is_refnamed_ref)
|
new_refs = patch.traverse(is_refnamed_ref)
|
||||||
applied_refname_map = {}
|
|
||||||
if len(old_refs) != len(new_refs):
|
if len(old_refs) != len(new_refs):
|
||||||
env.warn_node('inconsistent references in '
|
env.warn_node('inconsistent references in '
|
||||||
'translated message', node)
|
'translated message', node)
|
||||||
|
old_ref_names = [r['refname'] for r in old_refs]
|
||||||
|
new_ref_names = [r['refname'] for r in new_refs]
|
||||||
|
orphans = list(set(old_ref_names) - set(new_ref_names))
|
||||||
for new in new_refs:
|
for new in new_refs:
|
||||||
if new['refname'] in applied_refname_map:
|
if not self.document.has_name(new['refname']):
|
||||||
# 2nd appearance of the reference
|
# Maybe refname is translated but target is not translated.
|
||||||
new['refname'] = applied_refname_map[new['refname']]
|
# Note: multiple translated refnames break link ordering.
|
||||||
elif old_refs:
|
if orphans:
|
||||||
# 1st appearance of the reference in old_refs
|
new['refname'] = orphans.pop(0)
|
||||||
old = old_refs.pop(0)
|
else:
|
||||||
refname = old['refname']
|
# orphan refnames is already empty!
|
||||||
new['refname'] = refname
|
# reference number is same in new_refs and old_refs.
|
||||||
applied_refname_map[new['refname']] = refname
|
pass
|
||||||
else:
|
|
||||||
# the reference is not found in old_refs
|
|
||||||
applied_refname_map[new['refname']] = new['refname']
|
|
||||||
|
|
||||||
self.document.note_refname(new)
|
self.document.note_refname(new)
|
||||||
|
|
||||||
@ -268,11 +368,22 @@ class Locale(Transform):
|
|||||||
if len(old_refs) != len(new_refs):
|
if len(old_refs) != len(new_refs):
|
||||||
env.warn_node('inconsistent term references in '
|
env.warn_node('inconsistent term references in '
|
||||||
'translated message', node)
|
'translated message', node)
|
||||||
|
def get_ref_key(node):
|
||||||
|
case = node["refdomain"], node["reftype"]
|
||||||
|
if case == ('std', 'term'):
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return (
|
||||||
|
node["refdomain"],
|
||||||
|
node["reftype"],
|
||||||
|
node['reftarget'],)
|
||||||
|
|
||||||
for old in old_refs:
|
for old in old_refs:
|
||||||
key = old["reftype"], old["refdomain"]
|
key = get_ref_key(old)
|
||||||
xref_reftarget_map[key] = old["reftarget"]
|
if key:
|
||||||
|
xref_reftarget_map[key] = old["reftarget"]
|
||||||
for new in new_refs:
|
for new in new_refs:
|
||||||
key = new["reftype"], new["refdomain"]
|
key = get_ref_key(new)
|
||||||
if key in xref_reftarget_map:
|
if key in xref_reftarget_map:
|
||||||
new['reftarget'] = xref_reftarget_map[key]
|
new['reftarget'] = xref_reftarget_map[key]
|
||||||
|
|
||||||
@ -280,6 +391,7 @@ class Locale(Transform):
|
|||||||
for child in patch.children:
|
for child in patch.children:
|
||||||
child.parent = node
|
child.parent = node
|
||||||
node.children = patch.children
|
node.children = patch.children
|
||||||
|
node['translated'] = True
|
||||||
|
|
||||||
# Extract and translate messages for index entries.
|
# Extract and translate messages for index entries.
|
||||||
for node, entries in traverse_translatable_index(self.document):
|
for node, entries in traverse_translatable_index(self.document):
|
||||||
|
@ -134,7 +134,7 @@ def copyfile(source, dest):
|
|||||||
no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
|
no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
|
||||||
|
|
||||||
def make_filename(string):
|
def make_filename(string):
|
||||||
return no_fn_re.sub('', string)
|
return no_fn_re.sub('', string) or 'sphinx'
|
||||||
|
|
||||||
if sys.version_info < (3, 0):
|
if sys.version_info < (3, 0):
|
||||||
# strftime for unicode strings
|
# strftime for unicode strings
|
||||||
|
@ -51,7 +51,8 @@ def write_png_depth(filename, depth):
|
|||||||
# overwrite it with the depth chunk
|
# overwrite it with the depth chunk
|
||||||
f.write(DEPTH_CHUNK_LEN + DEPTH_CHUNK_START + data)
|
f.write(DEPTH_CHUNK_LEN + DEPTH_CHUNK_START + data)
|
||||||
# calculate the checksum over chunk name and data
|
# calculate the checksum over chunk name and data
|
||||||
f.write(struct.pack('!i', binascii.crc32(DEPTH_CHUNK_START + data)))
|
crc = binascii.crc32(DEPTH_CHUNK_START + data) & 0xffffffff
|
||||||
|
f.write(struct.pack('!I', crc))
|
||||||
# replace the IEND chunk
|
# replace the IEND chunk
|
||||||
f.write(IEND_CHUNK)
|
f.write(IEND_CHUNK)
|
||||||
finally:
|
finally:
|
||||||
|
@ -38,3 +38,9 @@ footenotes
|
|||||||
.. rubric:: Citations
|
.. rubric:: Citations
|
||||||
|
|
||||||
.. [bar] cite
|
.. [bar] cite
|
||||||
|
|
||||||
|
|
||||||
|
missing target
|
||||||
|
--------------------
|
||||||
|
[missing]_ citation
|
||||||
|
|
||||||
|
@ -25,8 +25,23 @@ msgstr "EXTERNAL LINK TO Python_."
|
|||||||
msgid "Internal link to `i18n with external links`_."
|
msgid "Internal link to `i18n with external links`_."
|
||||||
msgstr "`EXTERNAL LINKS`_ IS INTERNAL LINK."
|
msgstr "`EXTERNAL LINKS`_ IS INTERNAL LINK."
|
||||||
|
|
||||||
msgid "Inline link by `Sphinx <http://sphinx-doc.org>`_."
|
msgid "Inline link by `Sphinx Site <http://sphinx-doc.org>`_."
|
||||||
msgstr "INLINE LINK BY `SPHINX <http://sphinx-doc.org>`_."
|
msgstr "INLINE LINK BY `THE SPHINX SITE <http://sphinx-doc.org>`_."
|
||||||
|
|
||||||
msgid "Unnamed link__."
|
msgid "Unnamed link__."
|
||||||
msgstr "UNNAMED LINK__."
|
msgstr "UNNAMED LINK__."
|
||||||
|
|
||||||
|
msgid "link target swapped translation"
|
||||||
|
msgstr "LINK TARGET SWAPPED TRANSLATION"
|
||||||
|
|
||||||
|
msgid "link to external1_ and external2_."
|
||||||
|
msgstr "LINK TO external2_ AND external1_."
|
||||||
|
|
||||||
|
msgid "link to `Sphinx Site <http://sphinx-doc.org>`_ and `Python Site <http://python.org>`_."
|
||||||
|
msgstr "LINK TO `THE PYTHON SITE <http://python.org>`_ AND `THE SPHINX SITE <http://sphinx-doc.org>`_."
|
||||||
|
|
||||||
|
msgid "Multiple references in the same line"
|
||||||
|
msgstr "MULTIPLE REFERENCES IN THE SAME LINE"
|
||||||
|
|
||||||
|
msgid "Link to `Sphinx Site <http://sphinx-doc.org>`_, `Python Site <http://python.org>`_, Python_, Unnamed__ and `i18n with external links`_."
|
||||||
|
msgstr "LINK TO `EXTERNAL LINKS`_, Python_, `THE SPHINX SITE <http://sphinx-doc.org>`_, UNNAMED__ AND `THE PYTHON SITE <http://python.org>`_."
|
||||||
|
@ -4,10 +4,32 @@ i18n with external links
|
|||||||
========================
|
========================
|
||||||
.. #1044 external-links-dont-work-in-localized-html
|
.. #1044 external-links-dont-work-in-localized-html
|
||||||
|
|
||||||
* External link to Python_.
|
External link to Python_.
|
||||||
* Internal link to `i18n with external links`_.
|
|
||||||
* Inline link by `Sphinx <http://sphinx-doc.org>`_.
|
Internal link to `i18n with external links`_.
|
||||||
* Unnamed link__.
|
|
||||||
|
Inline link by `Sphinx Site <http://sphinx-doc.org>`_.
|
||||||
|
|
||||||
|
Unnamed link__.
|
||||||
|
|
||||||
|
.. _Python: http://python.org/index.html
|
||||||
|
.. __: http://google.com
|
||||||
|
|
||||||
|
|
||||||
|
link target swapped translation
|
||||||
|
================================
|
||||||
|
|
||||||
|
link to external1_ and external2_.
|
||||||
|
|
||||||
|
link to `Sphinx Site <http://sphinx-doc.org>`_ and `Python Site <http://python.org>`_.
|
||||||
|
|
||||||
|
.. _external1: http://example.com/external1
|
||||||
|
.. _external2: http://example.com/external2
|
||||||
|
|
||||||
|
|
||||||
|
Multiple references in the same line
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
Link to `Sphinx Site <http://sphinx-doc.org>`_, `Python Site <http://python.org>`_, Python_, Unnamed__ and `i18n with external links`_.
|
||||||
|
|
||||||
.. _Python: http://python.org
|
|
||||||
.. __: http://google.com
|
.. __: http://google.com
|
||||||
|
52
tests/roots/test-intl/label_target.po
Normal file
52
tests/roots/test-intl/label_target.po
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
# SOME DESCRIPTIVE TITLE.
|
||||||
|
# Copyright (C) 2013, sphinx
|
||||||
|
# This file is distributed under the same license as the sphinx package.
|
||||||
|
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||||
|
#
|
||||||
|
#, fuzzy
|
||||||
|
msgid ""
|
||||||
|
msgstr ""
|
||||||
|
"Project-Id-Version: 1.2\n"
|
||||||
|
"Report-Msgid-Bugs-To: \n"
|
||||||
|
"POT-Creation-Date: 2013-06-19 00:33+0000\n"
|
||||||
|
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||||
|
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||||
|
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||||
|
"MIME-Version: 1.0\n"
|
||||||
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
|
|
||||||
|
msgid "section and label"
|
||||||
|
msgstr "X SECTION AND LABEL"
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
":ref:`implicit-target` point to ``implicit-target`` and "
|
||||||
|
"`section and label`_ point to ``section-and-label``."
|
||||||
|
msgstr ""
|
||||||
|
":ref:`implicit-target` POINT TO ``implicit-target`` AND "
|
||||||
|
"`X SECTION AND LABEL`_ POINT TO ``section-and-label``."
|
||||||
|
|
||||||
|
msgid "explicit-target"
|
||||||
|
msgstr "X EXPLICIT-TARGET"
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
":ref:`explicit-target` point to ``explicit-target`` and `explicit-target`_"
|
||||||
|
" point to duplicated id like ``id1``."
|
||||||
|
msgstr ""
|
||||||
|
":ref:`explicit-target` POINT TO ``explicit-target`` AND `X EXPLICIT-TARGET`_"
|
||||||
|
" POINT TO DUPLICATED ID LIKE ``id1``."
|
||||||
|
|
||||||
|
msgid "implicit section name"
|
||||||
|
msgstr "X IMPLICIT SECTION NAME"
|
||||||
|
|
||||||
|
msgid "`implicit section name`_ point to ``implicit-section-name``."
|
||||||
|
msgstr "`X IMPLICIT SECTION NAME`_ POINT TO ``implicit-section-name``."
|
||||||
|
|
||||||
|
msgid "duplicated sub section"
|
||||||
|
msgstr "X DUPLICATED SUB SECTION"
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"`duplicated sub section`_ is broken link."
|
||||||
|
msgstr ""
|
||||||
|
"`X DUPLICATED SUB SECTION`_ IS BROKEN LINK."
|
||||||
|
|
54
tests/roots/test-intl/label_target.txt
Normal file
54
tests/roots/test-intl/label_target.txt
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
:tocdepth: 2
|
||||||
|
|
||||||
|
.. _implicit-target:
|
||||||
|
|
||||||
|
section and label
|
||||||
|
==================
|
||||||
|
|
||||||
|
.. This section's label and section title are different.
|
||||||
|
.. This case, the section have 2 target id.
|
||||||
|
|
||||||
|
:ref:`implicit-target` point to ``implicit-target`` and
|
||||||
|
`section and label`_ point to ``section-and-label``.
|
||||||
|
|
||||||
|
|
||||||
|
.. _explicit-target:
|
||||||
|
|
||||||
|
explicit-target
|
||||||
|
================
|
||||||
|
|
||||||
|
.. This section's label equals to section title.
|
||||||
|
.. This case, a duplicated target id is generated by docutils.
|
||||||
|
|
||||||
|
:ref:`explicit-target` point to ``explicit-target`` and
|
||||||
|
`explicit-target`_ point to duplicated id like ``id1``.
|
||||||
|
|
||||||
|
|
||||||
|
implicit section name
|
||||||
|
======================
|
||||||
|
|
||||||
|
.. This section have no label.
|
||||||
|
.. This case, the section have one id.
|
||||||
|
|
||||||
|
`implicit section name`_ point to ``implicit-section-name``.
|
||||||
|
|
||||||
|
duplicated sub section
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
.. This section have no label, but name will be duplicated by next section.
|
||||||
|
.. This case, the section have one id.
|
||||||
|
|
||||||
|
`duplicated sub section`_ is broken link.
|
||||||
|
|
||||||
|
.. There is no way to link to this section's ``duplicated-sub-section``` by
|
||||||
|
.. using formal reStructuredText markup.
|
||||||
|
|
||||||
|
duplicated sub section
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
.. This section have no label, but the section was a duplicate name.
|
||||||
|
.. THis case, a duplicated target id is generated by docutils.
|
||||||
|
|
||||||
|
.. There is no way to link to this section's duplicated id like ``id2`` by
|
||||||
|
.. using formal reStructuredText markup.
|
||||||
|
|
@ -21,3 +21,27 @@ msgstr "I18N ROCK'N ROLE XREF"
|
|||||||
|
|
||||||
msgid "link to :term:`Some term`, :ref:`i18n-role-xref`, :doc:`contents`."
|
msgid "link to :term:`Some term`, :ref:`i18n-role-xref`, :doc:`contents`."
|
||||||
msgstr "LINK TO :ref:`i18n-role-xref`, :doc:`contents`, :term:`SOME NEW TERM`."
|
msgstr "LINK TO :ref:`i18n-role-xref`, :doc:`contents`, :term:`SOME NEW TERM`."
|
||||||
|
|
||||||
|
msgid "same type links"
|
||||||
|
msgstr "SAME TYPE LINKS"
|
||||||
|
|
||||||
|
msgid "link to :term:`Some term` and :term:`Some other term`."
|
||||||
|
msgstr "LINK TO :term:`SOME OTHER NEW TERM` AND :term:`SOME NEW TERM`."
|
||||||
|
|
||||||
|
msgid "link to :ref:`i18n-role-xref` and :ref:`same-type-links`."
|
||||||
|
msgstr "LINK TO :ref:`same-type-links` AND :ref:`i18n-role-xref`."
|
||||||
|
|
||||||
|
msgid "link to :doc:`contents` and :doc:`glossary_terms`."
|
||||||
|
msgstr "LINK TO :doc:`glossary_terms` AND :doc:`contents`."
|
||||||
|
|
||||||
|
msgid "link to :option:`-m` and :option:`--module`."
|
||||||
|
msgstr "LINK TO :option:`--module` AND :option:`-m`."
|
||||||
|
|
||||||
|
msgid "link to :envvar:`env1` and :envvar:`env2`."
|
||||||
|
msgstr "LINK TO :envvar:`env2` AND :envvar:`env1`."
|
||||||
|
|
||||||
|
msgid "link to :token:`token1` and :token:`token2`."
|
||||||
|
msgstr "LINK TO :token:`token2` AND :token:`token1`."
|
||||||
|
|
||||||
|
msgid "link to :keyword:`i18n-role-xref` and :keyword:`same-type-links`."
|
||||||
|
msgstr "LINK TO :keyword:`same-type-links` AND :keyword:`i18n-role-xref`."
|
||||||
|
@ -7,3 +7,34 @@ i18n role xref
|
|||||||
|
|
||||||
link to :term:`Some term`, :ref:`i18n-role-xref`, :doc:`contents`.
|
link to :term:`Some term`, :ref:`i18n-role-xref`, :doc:`contents`.
|
||||||
|
|
||||||
|
.. _same-type-links:
|
||||||
|
|
||||||
|
same type links
|
||||||
|
=================
|
||||||
|
|
||||||
|
link to :term:`Some term` and :term:`Some other term`.
|
||||||
|
|
||||||
|
link to :ref:`i18n-role-xref` and :ref:`same-type-links`.
|
||||||
|
|
||||||
|
link to :doc:`contents` and :doc:`glossary_terms`.
|
||||||
|
|
||||||
|
link to :option:`-m` and :option:`--module`.
|
||||||
|
|
||||||
|
link to :envvar:`env1` and :envvar:`env2`.
|
||||||
|
|
||||||
|
link to :token:`token1` and :token:`token2`.
|
||||||
|
|
||||||
|
link to :keyword:`i18n-role-xref` and :keyword:`same-type-links`.
|
||||||
|
|
||||||
|
|
||||||
|
.. option:: -m <module>
|
||||||
|
|
||||||
|
.. option:: --module <module>
|
||||||
|
|
||||||
|
.. envvar:: env1
|
||||||
|
|
||||||
|
.. envvar:: env2
|
||||||
|
|
||||||
|
.. productionlist::
|
||||||
|
token_stmt: `token1` ":" `token2`
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ except ImportError:
|
|||||||
pygments = None
|
pygments = None
|
||||||
|
|
||||||
from sphinx import __version__
|
from sphinx import __version__
|
||||||
from util import test_root, remove_unicode_literals, gen_with_app
|
from util import test_root, remove_unicode_literals, gen_with_app, with_app
|
||||||
from etree13 import ElementTree as ET
|
from etree13 import ElementTree as ET
|
||||||
|
|
||||||
|
|
||||||
@ -49,6 +49,7 @@ http://sphinx-doc.org/domains.html
|
|||||||
|
|
||||||
HTML_WARNINGS = ENV_WARNINGS + """\
|
HTML_WARNINGS = ENV_WARNINGS + """\
|
||||||
%(root)s/images.txt:20: WARNING: no matching candidate for image URI u'foo.\\*'
|
%(root)s/images.txt:20: WARNING: no matching candidate for image URI u'foo.\\*'
|
||||||
|
None:\\d+: WARNING: citation not found: missing
|
||||||
%(root)s/markup.txt:: WARNING: invalid single index entry u''
|
%(root)s/markup.txt:: WARNING: invalid single index entry u''
|
||||||
%(root)s/markup.txt:: WARNING: invalid pair index entry u''
|
%(root)s/markup.txt:: WARNING: invalid pair index entry u''
|
||||||
%(root)s/markup.txt:: WARNING: invalid pair index entry u'keyword; '
|
%(root)s/markup.txt:: WARNING: invalid pair index entry u'keyword; '
|
||||||
@ -344,3 +345,18 @@ def test_html(app):
|
|||||||
yield check_xpath, etree, fname, path, check
|
yield check_xpath, etree, fname, path, check
|
||||||
|
|
||||||
check_static_entries(app.builder.outdir)
|
check_static_entries(app.builder.outdir)
|
||||||
|
|
||||||
|
@with_app(buildername='html', srcdir='(empty)',
|
||||||
|
confoverrides={'html_sidebars': {'*': ['globaltoc.html']}},
|
||||||
|
)
|
||||||
|
def test_html_with_globaltoc_and_hidden_toctree(app):
|
||||||
|
# issue #1157: combination of 'globaltoc.html' and hidden toctree cause
|
||||||
|
# exception.
|
||||||
|
(app.srcdir / 'contents.rst').write_text(
|
||||||
|
'\n.. toctree::'
|
||||||
|
'\n'
|
||||||
|
'\n.. toctree::'
|
||||||
|
'\n :hidden:'
|
||||||
|
'\n')
|
||||||
|
app.builder.build_all()
|
||||||
|
|
||||||
|
@ -28,6 +28,7 @@ def teardown_module():
|
|||||||
latex_warnfile = StringIO()
|
latex_warnfile = StringIO()
|
||||||
|
|
||||||
LATEX_WARNINGS = ENV_WARNINGS + """\
|
LATEX_WARNINGS = ENV_WARNINGS + """\
|
||||||
|
None:None: WARNING: citation not found: missing
|
||||||
None:None: WARNING: no matching candidate for image URI u'foo.\\*'
|
None:None: WARNING: no matching candidate for image URI u'foo.\\*'
|
||||||
WARNING: invalid pair index entry u''
|
WARNING: invalid pair index entry u''
|
||||||
WARNING: invalid pair index entry u'keyword; '
|
WARNING: invalid pair index entry u'keyword; '
|
||||||
|
@ -28,6 +28,7 @@ def teardown_module():
|
|||||||
texinfo_warnfile = StringIO()
|
texinfo_warnfile = StringIO()
|
||||||
|
|
||||||
TEXINFO_WARNINGS = ENV_WARNINGS + """\
|
TEXINFO_WARNINGS = ENV_WARNINGS + """\
|
||||||
|
None:None: WARNING: citation not found: missing
|
||||||
None:None: WARNING: no matching candidate for image URI u'foo.\\*'
|
None:None: WARNING: no matching candidate for image URI u'foo.\\*'
|
||||||
None:None: WARNING: no matching candidate for image URI u'svgimg.\\*'
|
None:None: WARNING: no matching candidate for image URI u'svgimg.\\*'
|
||||||
"""
|
"""
|
||||||
|
@ -14,6 +14,7 @@ import os
|
|||||||
import re
|
import re
|
||||||
from StringIO import StringIO
|
from StringIO import StringIO
|
||||||
from subprocess import Popen, PIPE
|
from subprocess import Popen, PIPE
|
||||||
|
from xml.etree import ElementTree
|
||||||
|
|
||||||
from sphinx.util.pycompat import relpath
|
from sphinx.util.pycompat import relpath
|
||||||
|
|
||||||
@ -71,6 +72,34 @@ def teardown_module():
|
|||||||
(root / 'xx').rmtree(True)
|
(root / 'xx').rmtree(True)
|
||||||
|
|
||||||
|
|
||||||
|
def elem_gettexts(elem):
|
||||||
|
def itertext(self):
|
||||||
|
# this function copied from Python-2.7 'ElementTree.itertext'.
|
||||||
|
# for compatibility to Python-2.5, 2.6, 3.1
|
||||||
|
tag = self.tag
|
||||||
|
if not isinstance(tag, basestring) and tag is not None:
|
||||||
|
return
|
||||||
|
if self.text:
|
||||||
|
yield self.text
|
||||||
|
for e in self:
|
||||||
|
for s in itertext(e):
|
||||||
|
yield s
|
||||||
|
if e.tail:
|
||||||
|
yield e.tail
|
||||||
|
return filter(None, [s.strip() for s in itertext(elem)])
|
||||||
|
|
||||||
|
|
||||||
|
def elem_getref(elem):
|
||||||
|
return elem.attrib.get('refid') or elem.attrib.get('refuri')
|
||||||
|
|
||||||
|
|
||||||
|
def assert_elem_text_refs(elem, text, refs):
|
||||||
|
_text = elem_gettexts(elem)
|
||||||
|
assert _text == text
|
||||||
|
_refs = map(elem_getref, elem.findall('reference'))
|
||||||
|
assert _refs == refs
|
||||||
|
|
||||||
|
|
||||||
@with_intl_app(buildername='text')
|
@with_intl_app(buildername='text')
|
||||||
def test_simple(app):
|
def test_simple(app):
|
||||||
app.builder.build(['bom'])
|
app.builder.build(['bom'])
|
||||||
@ -194,48 +223,58 @@ def test_i18n_link_to_undefined_reference(app):
|
|||||||
assert len(re.findall(expected_expr, result)) == 1
|
assert len(re.findall(expected_expr, result)) == 1
|
||||||
|
|
||||||
|
|
||||||
@with_intl_app(buildername='html', cleanenv=True)
|
@with_intl_app(buildername='xml', cleanenv=True)
|
||||||
def test_i18n_keep_external_links(app):
|
def test_i18n_keep_external_links(app):
|
||||||
"""regression test for #1044"""
|
# regression test for #1044
|
||||||
app.builder.build(['external_links'])
|
app.builder.build(['external_links'])
|
||||||
result = (app.outdir / 'external_links.html').text(encoding='utf-8')
|
et = ElementTree.parse(app.outdir / 'external_links.xml')
|
||||||
|
secs = et.findall('section')
|
||||||
|
|
||||||
|
para0 = secs[0].findall('paragraph')
|
||||||
# external link check
|
# external link check
|
||||||
expect_line = (u'<li>EXTERNAL LINK TO <a class="reference external" '
|
assert_elem_text_refs(
|
||||||
u'href="http://python.org">Python</a>.</li>')
|
para0[0],
|
||||||
matched = re.search('^<li>EXTERNAL LINK TO .*$', result, re.M)
|
['EXTERNAL LINK TO', 'Python', '.'],
|
||||||
matched_line = ''
|
['http://python.org/index.html'])
|
||||||
if matched:
|
|
||||||
matched_line = matched.group()
|
|
||||||
assert expect_line == matched_line
|
|
||||||
|
|
||||||
# internal link check
|
# internal link check
|
||||||
expect_line = (u'<li><a class="reference internal" '
|
assert_elem_text_refs(
|
||||||
u'href="#i18n-with-external-links">EXTERNAL '
|
para0[1],
|
||||||
u'LINKS</a> IS INTERNAL LINK.</li>')
|
['EXTERNAL LINKS', 'IS INTERNAL LINK.'],
|
||||||
matched = re.search('^<li><a .* IS INTERNAL LINK.</li>$', result, re.M)
|
['i18n-with-external-links'])
|
||||||
matched_line = ''
|
|
||||||
if matched:
|
|
||||||
matched_line = matched.group()
|
|
||||||
assert expect_line == matched_line
|
|
||||||
|
|
||||||
# inline link check
|
# inline link check
|
||||||
expect_line = (u'<li>INLINE LINK BY <a class="reference external" '
|
assert_elem_text_refs(
|
||||||
u'href="http://sphinx-doc.org">SPHINX</a>.</li>')
|
para0[2],
|
||||||
matched = re.search('^<li>INLINE LINK BY .*$', result, re.M)
|
['INLINE LINK BY', 'THE SPHINX SITE', '.'],
|
||||||
matched_line = ''
|
['http://sphinx-doc.org'])
|
||||||
if matched:
|
|
||||||
matched_line = matched.group()
|
|
||||||
assert expect_line == matched_line
|
|
||||||
|
|
||||||
# unnamed link check
|
# unnamed link check
|
||||||
expect_line = (u'<li>UNNAMED <a class="reference external" '
|
assert_elem_text_refs(
|
||||||
u'href="http://google.com">LINK</a>.</li>')
|
para0[3],
|
||||||
matched = re.search('^<li>UNNAMED .*$', result, re.M)
|
['UNNAMED', 'LINK', '.'],
|
||||||
matched_line = ''
|
['http://google.com'])
|
||||||
if matched:
|
|
||||||
matched_line = matched.group()
|
# link target swapped translation
|
||||||
assert expect_line == matched_line
|
para1 = secs[1].findall('paragraph')
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para1[0],
|
||||||
|
['LINK TO', 'external2', 'AND', 'external1', '.'],
|
||||||
|
['http://example.com/external2', 'http://example.com/external1'])
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para1[1],
|
||||||
|
['LINK TO', 'THE PYTHON SITE', 'AND', 'THE SPHINX SITE', '.'],
|
||||||
|
['http://python.org', 'http://sphinx-doc.org'])
|
||||||
|
|
||||||
|
# multiple references in the same line
|
||||||
|
para2 = secs[2].findall('paragraph')
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para2[0],
|
||||||
|
['LINK TO', 'EXTERNAL LINKS', ',', 'Python', ',',
|
||||||
|
'THE SPHINX SITE', ',', 'UNNAMED', 'AND', 'THE PYTHON SITE', '.'],
|
||||||
|
['i18n-with-external-links', 'http://python.org/index.html',
|
||||||
|
'http://sphinx-doc.org', 'http://google.com',
|
||||||
|
'http://python.org'])
|
||||||
|
|
||||||
|
|
||||||
@with_intl_app(buildername='text', warning=warnfile, cleanenv=True)
|
@with_intl_app(buildername='text', warning=warnfile, cleanenv=True)
|
||||||
@ -292,24 +331,99 @@ def test_i18n_glossary_terms(app):
|
|||||||
assert 'term not in glossary' not in warnings
|
assert 'term not in glossary' not in warnings
|
||||||
|
|
||||||
|
|
||||||
@with_intl_app(buildername='text', warning=warnfile)
|
@with_intl_app(buildername='xml', warning=warnfile)
|
||||||
def test_i18n_role_xref(app):
|
def test_i18n_role_xref(app):
|
||||||
# regression test for #1090
|
# regression test for #1090, #1193
|
||||||
app.builddir.rmtree(True) #for warnings acceleration
|
app.builddir.rmtree(True) #for warnings acceleration
|
||||||
app.builder.build(['role_xref'])
|
app.builder.build(['role_xref'])
|
||||||
result = (app.outdir / 'role_xref.txt').text(encoding='utf-8')
|
et = ElementTree.parse(app.outdir / 'role_xref.xml')
|
||||||
expect = (
|
sec1, sec2 = et.findall('section')
|
||||||
u"\nI18N ROCK'N ROLE XREF"
|
|
||||||
u"\n*********************\n"
|
|
||||||
u"\nLINK TO *I18N ROCK'N ROLE XREF*, *CONTENTS*, *SOME NEW TERM*.\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
para1, = sec1.findall('paragraph')
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para1,
|
||||||
|
['LINK TO', "I18N ROCK'N ROLE XREF", ',', 'CONTENTS', ',',
|
||||||
|
'SOME NEW TERM', '.'],
|
||||||
|
['i18n-role-xref',
|
||||||
|
'contents',
|
||||||
|
'glossary_terms#term-some-term'])
|
||||||
|
|
||||||
|
para2 = sec2.findall('paragraph')
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para2[0],
|
||||||
|
['LINK TO', 'SOME OTHER NEW TERM', 'AND', 'SOME NEW TERM', '.'],
|
||||||
|
['glossary_terms#term-some-other-term',
|
||||||
|
'glossary_terms#term-some-term'])
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para2[1],
|
||||||
|
['LINK TO', 'SAME TYPE LINKS', 'AND', "I18N ROCK'N ROLE XREF", '.'],
|
||||||
|
['same-type-links', 'i18n-role-xref'])
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para2[2],
|
||||||
|
['LINK TO', 'I18N WITH GLOSSARY TERMS', 'AND', 'CONTENTS', '.'],
|
||||||
|
['glossary_terms', 'contents'])
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para2[3],
|
||||||
|
['LINK TO', '--module', 'AND', '-m', '.'],
|
||||||
|
['cmdoption--module', 'cmdoption-m'])
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para2[4],
|
||||||
|
['LINK TO', 'env2', 'AND', 'env1', '.'],
|
||||||
|
['envvar-env2', 'envvar-env1'])
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para2[5],
|
||||||
|
['LINK TO', 'token2', 'AND', 'token1', '.'],
|
||||||
|
[]) #TODO: how do I link token role to productionlist?
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para2[6],
|
||||||
|
['LINK TO', 'same-type-links', 'AND', "i18n-role-xref", '.'],
|
||||||
|
['same-type-links', 'i18n-role-xref'])
|
||||||
|
|
||||||
|
#warnings
|
||||||
warnings = warnfile.getvalue().replace(os.sep, '/')
|
warnings = warnfile.getvalue().replace(os.sep, '/')
|
||||||
assert 'term not in glossary' not in warnings
|
assert 'term not in glossary' not in warnings
|
||||||
assert 'undefined label' not in warnings
|
assert 'undefined label' not in warnings
|
||||||
assert 'unknown document' not in warnings
|
assert 'unknown document' not in warnings
|
||||||
|
|
||||||
assert result == expect
|
|
||||||
|
@with_intl_app(buildername='xml', warning=warnfile)
|
||||||
|
def test_i18n_label_target(app):
|
||||||
|
# regression test for #1193
|
||||||
|
app.builder.build(['label_target'])
|
||||||
|
et = ElementTree.parse(app.outdir / 'label_target.xml')
|
||||||
|
secs = et.findall('section')
|
||||||
|
|
||||||
|
#debug
|
||||||
|
print (app.outdir / 'label_target.xml').text()
|
||||||
|
|
||||||
|
para0 = secs[0].findall('paragraph')
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para0[0],
|
||||||
|
['X SECTION AND LABEL', 'POINT TO', 'implicit-target', 'AND',
|
||||||
|
'X SECTION AND LABEL', 'POINT TO', 'section-and-label', '.'],
|
||||||
|
['implicit-target', 'section-and-label'])
|
||||||
|
|
||||||
|
para1 = secs[1].findall('paragraph')
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para1[0],
|
||||||
|
['X EXPLICIT-TARGET', 'POINT TO', 'explicit-target', 'AND',
|
||||||
|
'X EXPLICIT-TARGET', 'POINT TO DUPLICATED ID LIKE', 'id1', '.'],
|
||||||
|
['explicit-target', 'id1'])
|
||||||
|
|
||||||
|
para2 = secs[2].findall('paragraph')
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para2[0],
|
||||||
|
['X IMPLICIT SECTION NAME', 'POINT TO', 'implicit-section-name',
|
||||||
|
'.'],
|
||||||
|
['implicit-section-name'])
|
||||||
|
|
||||||
|
sec2 = secs[2].findall('section')
|
||||||
|
|
||||||
|
para2_0 = sec2[0].findall('paragraph')
|
||||||
|
assert_elem_text_refs(
|
||||||
|
para2_0[0],
|
||||||
|
['`X DUPLICATED SUB SECTION`_', 'IS BROKEN LINK.'],
|
||||||
|
[])
|
||||||
|
|
||||||
|
|
||||||
@with_intl_app(buildername='text', warning=warnfile)
|
@with_intl_app(buildername='text', warning=warnfile)
|
||||||
|
@ -11,13 +11,20 @@
|
|||||||
|
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
from StringIO import StringIO
|
||||||
|
import tempfile
|
||||||
|
|
||||||
from util import raises, with_tempdir
|
from util import raises, with_tempdir, with_app
|
||||||
|
|
||||||
|
from sphinx import application
|
||||||
from sphinx import quickstart as qs
|
from sphinx import quickstart as qs
|
||||||
from sphinx.util.console import nocolor, coloron
|
from sphinx.util.console import nocolor, coloron
|
||||||
from sphinx.util.pycompat import execfile_
|
from sphinx.util.pycompat import execfile_
|
||||||
|
|
||||||
|
|
||||||
|
warnfile = StringIO()
|
||||||
|
|
||||||
|
|
||||||
def setup_module():
|
def setup_module():
|
||||||
nocolor()
|
nocolor()
|
||||||
|
|
||||||
@ -28,6 +35,12 @@ def mock_raw_input(answers, needanswer=False):
|
|||||||
raise AssertionError('answer for %r missing and no default '
|
raise AssertionError('answer for %r missing and no default '
|
||||||
'present' % prompt)
|
'present' % prompt)
|
||||||
called.add(prompt)
|
called.add(prompt)
|
||||||
|
if sys.version_info < (3, 0):
|
||||||
|
prompt = str(prompt) # Python2.x raw_input emulation
|
||||||
|
# `raw_input` encode `prompt` by default encoding to print.
|
||||||
|
else:
|
||||||
|
prompt = unicode(prompt) # Python3.x input emulation
|
||||||
|
# `input` decode prompt by default encoding before print.
|
||||||
for question in answers:
|
for question in answers:
|
||||||
if prompt.startswith(qs.PROMPT_PREFIX + question):
|
if prompt.startswith(qs.PROMPT_PREFIX + question):
|
||||||
return answers[question]
|
return answers[question]
|
||||||
@ -95,6 +108,16 @@ def test_do_prompt():
|
|||||||
raises(AssertionError, qs.do_prompt, d, 'k6', 'Q6', validator=qs.boolean)
|
raises(AssertionError, qs.do_prompt, d, 'k6', 'Q6', validator=qs.boolean)
|
||||||
|
|
||||||
|
|
||||||
|
def test_do_prompt_with_multibyte():
|
||||||
|
d = {}
|
||||||
|
answers = {
|
||||||
|
'Q1': u'\u30c9\u30a4\u30c4',
|
||||||
|
}
|
||||||
|
qs.term_input = mock_raw_input(answers)
|
||||||
|
qs.do_prompt(d, 'k1', 'Q1', default=u'\u65e5\u672c')
|
||||||
|
assert d['k1'] == u'\u30c9\u30a4\u30c4'
|
||||||
|
|
||||||
|
|
||||||
@with_tempdir
|
@with_tempdir
|
||||||
def test_quickstart_defaults(tempdir):
|
def test_quickstart_defaults(tempdir):
|
||||||
answers = {
|
answers = {
|
||||||
@ -214,3 +237,51 @@ def test_generated_files_eol(tempdir):
|
|||||||
|
|
||||||
assert_eol(tempdir / 'make.bat', '\r\n')
|
assert_eol(tempdir / 'make.bat', '\r\n')
|
||||||
assert_eol(tempdir / 'Makefile', '\n')
|
assert_eol(tempdir / 'Makefile', '\n')
|
||||||
|
|
||||||
|
|
||||||
|
@with_tempdir
|
||||||
|
def test_quickstart_and_build(tempdir):
|
||||||
|
answers = {
|
||||||
|
'Root path': tempdir,
|
||||||
|
'Project name': u'Fullwidth characters: \u30c9\u30a4\u30c4',
|
||||||
|
'Author name': 'Georg Brandl',
|
||||||
|
'Project version': '0.1',
|
||||||
|
}
|
||||||
|
qs.term_input = mock_raw_input(answers)
|
||||||
|
d = {}
|
||||||
|
qs.ask_user(d)
|
||||||
|
qs.generate(d)
|
||||||
|
|
||||||
|
app = application.Sphinx(
|
||||||
|
tempdir, #srcdir
|
||||||
|
tempdir, #confdir
|
||||||
|
(tempdir / '_build' / 'html'), #outdir
|
||||||
|
(tempdir / '_build' / '.doctree'), #doctreedir
|
||||||
|
'html', #buildername
|
||||||
|
status=StringIO(),
|
||||||
|
warning=warnfile)
|
||||||
|
app.builder.build_all()
|
||||||
|
warnings = warnfile.getvalue()
|
||||||
|
assert not warnings
|
||||||
|
|
||||||
|
|
||||||
|
@with_tempdir
|
||||||
|
def test_default_filename(tempdir):
|
||||||
|
answers = {
|
||||||
|
'Root path': tempdir,
|
||||||
|
'Project name': u'\u30c9\u30a4\u30c4', #Fullwidth characters only
|
||||||
|
'Author name': 'Georg Brandl',
|
||||||
|
'Project version': '0.1',
|
||||||
|
}
|
||||||
|
qs.term_input = mock_raw_input(answers)
|
||||||
|
d = {}
|
||||||
|
qs.ask_user(d)
|
||||||
|
qs.generate(d)
|
||||||
|
|
||||||
|
conffile = tempdir / 'conf.py'
|
||||||
|
assert conffile.isfile()
|
||||||
|
ns = {}
|
||||||
|
execfile_(conffile, ns)
|
||||||
|
assert ns['latex_documents'][0][1] == 'sphinx.tex'
|
||||||
|
assert ns['man_pages'][0][1] == 'sphinx'
|
||||||
|
assert ns['texinfo_documents'][0][1] == 'sphinx'
|
||||||
|
Loading…
Reference in New Issue
Block a user