mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Merge pull request #5377 from tk0miya/4034_fix_download_url2
Fix #2720, #4034: Incorrect links with ``:download:``, duplicate names, and parallel builds
This commit is contained in:
commit
4abc55239a
3
CHANGES
3
CHANGES
@ -9,6 +9,7 @@ Incompatible changes
|
|||||||
|
|
||||||
* #5282: html theme: refer ``pygments_style`` settings of HTML themes
|
* #5282: html theme: refer ``pygments_style`` settings of HTML themes
|
||||||
preferentially
|
preferentially
|
||||||
|
* The URL of download files are changed
|
||||||
|
|
||||||
Deprecated
|
Deprecated
|
||||||
----------
|
----------
|
||||||
@ -35,6 +36,8 @@ Bugs fixed
|
|||||||
* #4379: toctree shows confusible warning when document is excluded
|
* #4379: toctree shows confusible warning when document is excluded
|
||||||
* #2401: autodoc: ``:members:`` causes ``:special-members:`` not to be shown
|
* #2401: autodoc: ``:members:`` causes ``:special-members:`` not to be shown
|
||||||
* autodoc: ImportError is replaced by AttributeError for deeper module
|
* autodoc: ImportError is replaced by AttributeError for deeper module
|
||||||
|
* #2720, #4034: Incorrect links with ``:download:``, duplicate names, and
|
||||||
|
parallel builds
|
||||||
|
|
||||||
Testing
|
Testing
|
||||||
--------
|
--------
|
||||||
|
@ -864,10 +864,10 @@ class StandaloneHTMLBuilder(Builder):
|
|||||||
for src in status_iterator(self.env.dlfiles, __('copying downloadable files... '),
|
for src in status_iterator(self.env.dlfiles, __('copying downloadable files... '),
|
||||||
"brown", len(self.env.dlfiles), self.app.verbosity,
|
"brown", len(self.env.dlfiles), self.app.verbosity,
|
||||||
stringify_func=to_relpath):
|
stringify_func=to_relpath):
|
||||||
dest = self.env.dlfiles[src][1]
|
|
||||||
try:
|
try:
|
||||||
copyfile(path.join(self.srcdir, src),
|
dest = path.join(self.outdir, '_downloads', self.env.dlfiles[src][1])
|
||||||
path.join(self.outdir, '_downloads', dest))
|
ensuredir(path.dirname(dest))
|
||||||
|
copyfile(path.join(self.srcdir, src), dest)
|
||||||
except EnvironmentError as err:
|
except EnvironmentError as err:
|
||||||
logger.warning(__('cannot copy downloadable file %r: %s'),
|
logger.warning(__('cannot copy downloadable file %r: %s'),
|
||||||
path.join(self.srcdir, src), err)
|
path.join(self.srcdir, src), err)
|
||||||
|
@ -28,7 +28,7 @@ from sphinx.environment.adapters.toctree import TocTree
|
|||||||
from sphinx.errors import SphinxError, BuildEnvironmentError, DocumentError, ExtensionError
|
from sphinx.errors import SphinxError, BuildEnvironmentError, DocumentError, ExtensionError
|
||||||
from sphinx.locale import __
|
from sphinx.locale import __
|
||||||
from sphinx.transforms import SphinxTransformer
|
from sphinx.transforms import SphinxTransformer
|
||||||
from sphinx.util import get_matching_docs, FilenameUniqDict
|
from sphinx.util import get_matching_docs, DownloadFiles, FilenameUniqDict
|
||||||
from sphinx.util import logging
|
from sphinx.util import logging
|
||||||
from sphinx.util.docutils import LoggingReporter
|
from sphinx.util.docutils import LoggingReporter
|
||||||
from sphinx.util.i18n import find_catalog_files
|
from sphinx.util.i18n import find_catalog_files
|
||||||
@ -190,7 +190,8 @@ class BuildEnvironment(object):
|
|||||||
|
|
||||||
# these map absolute path -> (docnames, unique filename)
|
# these map absolute path -> (docnames, unique filename)
|
||||||
self.images = FilenameUniqDict() # type: FilenameUniqDict
|
self.images = FilenameUniqDict() # type: FilenameUniqDict
|
||||||
self.dlfiles = FilenameUniqDict() # type: FilenameUniqDict
|
self.dlfiles = DownloadFiles() # type: DownloadFiles
|
||||||
|
# filename -> (set of docnames, destination)
|
||||||
|
|
||||||
# the original URI for images
|
# the original URI for images
|
||||||
self.original_image_uri = {} # type: Dict[unicode, unicode]
|
self.original_image_uri = {} # type: Dict[unicode, unicode]
|
||||||
|
@ -22,6 +22,7 @@ import warnings
|
|||||||
from codecs import BOM_UTF8
|
from codecs import BOM_UTF8
|
||||||
from collections import deque
|
from collections import deque
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from hashlib import md5
|
||||||
from os import path
|
from os import path
|
||||||
from time import mktime, strptime
|
from time import mktime, strptime
|
||||||
|
|
||||||
@ -167,6 +168,37 @@ class FilenameUniqDict(dict):
|
|||||||
self._existing = state
|
self._existing = state
|
||||||
|
|
||||||
|
|
||||||
|
class DownloadFiles(dict):
|
||||||
|
"""A special dictionary for download files.
|
||||||
|
|
||||||
|
.. important:: This class would be refactored in nearly future.
|
||||||
|
Hence don't hack this directly.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def add_file(self, docname, filename):
|
||||||
|
# type: (unicode, unicode) -> None
|
||||||
|
if filename not in self:
|
||||||
|
digest = md5(filename.encode('utf-8')).hexdigest()
|
||||||
|
dest = '%s/%s' % (digest, os.path.basename(filename))
|
||||||
|
self[filename] = (set(), dest)
|
||||||
|
|
||||||
|
self[filename][0].add(docname)
|
||||||
|
return self[filename][1]
|
||||||
|
|
||||||
|
def purge_doc(self, docname):
|
||||||
|
# type: (unicode) -> None
|
||||||
|
for filename, (docs, dest) in list(self.items()):
|
||||||
|
docs.discard(docname)
|
||||||
|
if not docs:
|
||||||
|
del self[filename]
|
||||||
|
|
||||||
|
def merge_other(self, docnames, other):
|
||||||
|
# type: (Set[unicode], Dict[unicode, Tuple[Set[unicode], Any]]) -> None
|
||||||
|
for filename, (docs, dest) in other.items():
|
||||||
|
for docname in docs & set(docnames):
|
||||||
|
self.add_file(docname, filename)
|
||||||
|
|
||||||
|
|
||||||
def copy_static_entry(source, targetdir, builder, context={},
|
def copy_static_entry(source, targetdir, builder, context={},
|
||||||
exclude_matchers=(), level=0):
|
exclude_matchers=(), level=0):
|
||||||
# type: (unicode, unicode, Any, Dict, Tuple[Callable, ...], int) -> None
|
# type: (unicode, unicode, Any, Dict, Tuple[Callable, ...], int) -> None
|
||||||
|
@ -151,7 +151,7 @@ def test_html_warnings(app, warning):
|
|||||||
(".//img[@src='../_images/rimg.png']", ''),
|
(".//img[@src='../_images/rimg.png']", ''),
|
||||||
],
|
],
|
||||||
'subdir/includes.html': [
|
'subdir/includes.html': [
|
||||||
(".//a[@href='../_downloads/img.png']", ''),
|
(".//a[@class='reference download internal']", ''),
|
||||||
(".//img[@src='../_images/img.png']", ''),
|
(".//img[@src='../_images/img.png']", ''),
|
||||||
(".//p", 'This is an include file.'),
|
(".//p", 'This is an include file.'),
|
||||||
(".//pre/span", 'line 1'),
|
(".//pre/span", 'line 1'),
|
||||||
@ -159,8 +159,7 @@ def test_html_warnings(app, warning):
|
|||||||
],
|
],
|
||||||
'includes.html': [
|
'includes.html': [
|
||||||
(".//pre", u'Max Strauß'),
|
(".//pre", u'Max Strauß'),
|
||||||
(".//a[@href='_downloads/img.png']", ''),
|
(".//a[@class='reference download internal']", ''),
|
||||||
(".//a[@href='_downloads/img1.png']", ''),
|
|
||||||
(".//pre/span", u'"quotes"'),
|
(".//pre/span", u'"quotes"'),
|
||||||
(".//pre/span", u"'included'"),
|
(".//pre/span", u"'included'"),
|
||||||
(".//pre/span[@class='s2']", u'üöä'),
|
(".//pre/span[@class='s2']", u'üöä'),
|
||||||
@ -421,6 +420,31 @@ def test_html_output(app, cached_etree_parse, fname, expect):
|
|||||||
check_xpath(cached_etree_parse(app.outdir / fname), fname, *expect)
|
check_xpath(cached_etree_parse(app.outdir / fname), fname, *expect)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.sphinx('html', tags=['testtag'], confoverrides={
|
||||||
|
'html_context.hckey_co': 'hcval_co'})
|
||||||
|
@pytest.mark.test_params(shared_result='test_build_html_output')
|
||||||
|
def test_html_download(app):
|
||||||
|
app.build()
|
||||||
|
|
||||||
|
# subdir/includes.html
|
||||||
|
result = (app.outdir / 'subdir' / 'includes.html').text()
|
||||||
|
pattern = ('<a class="reference download internal" download="" '
|
||||||
|
'href="../(_downloads/.*/img.png)">')
|
||||||
|
matched = re.search(pattern, result)
|
||||||
|
assert matched
|
||||||
|
assert (app.outdir / matched.group(1)).exists()
|
||||||
|
filename = matched.group(1)
|
||||||
|
|
||||||
|
# includes.html
|
||||||
|
result = (app.outdir / 'includes.html').text()
|
||||||
|
pattern = ('<a class="reference download internal" download="" '
|
||||||
|
'href="(_downloads/.*/img.png)">')
|
||||||
|
matched = re.search(pattern, result)
|
||||||
|
assert matched
|
||||||
|
assert (app.outdir / matched.group(1)).exists()
|
||||||
|
assert matched.group(1) == filename
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.sphinx('html', testroot='build-html-translator')
|
@pytest.mark.sphinx('html', testroot='build-html-translator')
|
||||||
def test_html_translator(app):
|
def test_html_translator(app):
|
||||||
app.build()
|
app.build()
|
||||||
|
@ -14,7 +14,9 @@
|
|||||||
:license: BSD, see LICENSE for details.
|
:license: BSD, see LICENSE for details.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
import xml.etree.cElementTree as ElementTree
|
import xml.etree.cElementTree as ElementTree
|
||||||
|
from hashlib import md5
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from html5lib import getTreeBuilder, HTMLParser
|
from html5lib import getTreeBuilder, HTMLParser
|
||||||
@ -58,7 +60,7 @@ def cached_etree_parse():
|
|||||||
(".//img[@src='../_images/rimg.png']", ''),
|
(".//img[@src='../_images/rimg.png']", ''),
|
||||||
],
|
],
|
||||||
'subdir/includes.html': [
|
'subdir/includes.html': [
|
||||||
(".//a[@href='../_downloads/img.png']", ''),
|
(".//a[@class='reference download internal']", ''),
|
||||||
(".//img[@src='../_images/img.png']", ''),
|
(".//img[@src='../_images/img.png']", ''),
|
||||||
(".//p", 'This is an include file.'),
|
(".//p", 'This is an include file.'),
|
||||||
(".//pre/span", 'line 1'),
|
(".//pre/span", 'line 1'),
|
||||||
@ -66,8 +68,7 @@ def cached_etree_parse():
|
|||||||
],
|
],
|
||||||
'includes.html': [
|
'includes.html': [
|
||||||
(".//pre", u'Max Strauß'),
|
(".//pre", u'Max Strauß'),
|
||||||
(".//a[@href='_downloads/img.png']", ''),
|
(".//a[@class='reference download internal']", ''),
|
||||||
(".//a[@href='_downloads/img1.png']", ''),
|
|
||||||
(".//pre/span", u'"quotes"'),
|
(".//pre/span", u'"quotes"'),
|
||||||
(".//pre/span", u"'included'"),
|
(".//pre/span", u"'included'"),
|
||||||
(".//pre/span[@class='s2']", u'üöä'),
|
(".//pre/span[@class='s2']", u'üöä'),
|
||||||
@ -323,17 +324,45 @@ def test_html5_output(app, cached_etree_parse, fname, expect):
|
|||||||
check_xpath(cached_etree_parse(app.outdir / fname), fname, *expect)
|
check_xpath(cached_etree_parse(app.outdir / fname), fname, *expect)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.sphinx('html', tags=['testtag'], confoverrides={
|
||||||
|
'html_context.hckey_co': 'hcval_co',
|
||||||
|
'html_experimental_html5_writer': True})
|
||||||
|
@pytest.mark.test_params(shared_result='test_build_html_output')
|
||||||
|
def test_html_download(app):
|
||||||
|
app.build()
|
||||||
|
|
||||||
|
# subdir/includes.html
|
||||||
|
result = (app.outdir / 'subdir' / 'includes.html').text()
|
||||||
|
pattern = ('<a class="reference download internal" download="" '
|
||||||
|
'href="../(_downloads/.*/img.png)">')
|
||||||
|
matched = re.search(pattern, result)
|
||||||
|
assert matched
|
||||||
|
assert (app.outdir / matched.group(1)).exists()
|
||||||
|
filename = matched.group(1)
|
||||||
|
|
||||||
|
# includes.html
|
||||||
|
result = (app.outdir / 'includes.html').text()
|
||||||
|
pattern = ('<a class="reference download internal" download="" '
|
||||||
|
'href="(_downloads/.*/img.png)">')
|
||||||
|
matched = re.search(pattern, result)
|
||||||
|
assert matched
|
||||||
|
assert (app.outdir / matched.group(1)).exists()
|
||||||
|
assert matched.group(1) == filename
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.sphinx('html', testroot='roles-download',
|
@pytest.mark.sphinx('html', testroot='roles-download',
|
||||||
confoverrides={'html_experimental_html5_writer': True})
|
confoverrides={'html_experimental_html5_writer': True})
|
||||||
def test_html_download_role(app, status, warning):
|
def test_html_download_role(app, status, warning):
|
||||||
app.build()
|
app.build()
|
||||||
assert (app.outdir / '_downloads' / 'dummy.dat').exists()
|
digest = md5((app.srcdir / 'dummy.dat').encode('utf-8')).hexdigest()
|
||||||
|
assert (app.outdir / '_downloads' / digest / 'dummy.dat').exists()
|
||||||
|
|
||||||
content = (app.outdir / 'index.html').text()
|
content = (app.outdir / 'index.html').text()
|
||||||
assert ('<li><p><a class="reference download internal" download="" '
|
assert (('<li><p><a class="reference download internal" download="" '
|
||||||
'href="_downloads/dummy.dat">'
|
'href="_downloads/%s/dummy.dat">'
|
||||||
'<code class="xref download docutils literal notranslate">'
|
'<code class="xref download docutils literal notranslate">'
|
||||||
'<span class="pre">dummy.dat</span></code></a></p></li>' in content)
|
'<span class="pre">dummy.dat</span></code></a></p></li>' % digest)
|
||||||
|
in content)
|
||||||
assert ('<li><p><code class="xref download docutils literal notranslate">'
|
assert ('<li><p><code class="xref download docutils literal notranslate">'
|
||||||
'<span class="pre">not_found.dat</span></code></p></li>' in content)
|
'<span class="pre">not_found.dat</span></code></p></li>' in content)
|
||||||
assert ('<li><p><a class="reference download external" download="" '
|
assert ('<li><p><a class="reference download external" download="" '
|
||||||
|
Loading…
Reference in New Issue
Block a user