mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Rebuild all HTML files in case of a template change.
Also, record image docnames in order to be able to delete image records from the env.
This commit is contained in:
parent
ba47f283f6
commit
8057b0f80e
2
CHANGES
2
CHANGES
@ -13,6 +13,8 @@ Changes in trunk
|
||||
* sphinx.ext.autodoc: Record files from which docstrings are included
|
||||
as dependencies.
|
||||
|
||||
* sphinx.builder: Rebuild all HTML files in case of a template change.
|
||||
|
||||
* sphinx.builder: Handle unavailability of TOC relations (previous/
|
||||
next chapter) more gracefully in the HTML builder.
|
||||
|
||||
|
@ -12,7 +12,7 @@ The most important concept in Jinja is :dfn:`template inheritance`, which means
|
||||
that you can overwrite only specific blocks within a template, customizing it
|
||||
while also keeping the changes at a minimum.
|
||||
|
||||
Inheritance is done via two directives, ``extends`` and ``block``.
|
||||
Inheritance is done via two (Jinja) directives, ``extends`` and ``block``.
|
||||
|
||||
.. template path
|
||||
blocks
|
||||
|
@ -25,7 +25,8 @@ from docutils.frontend import OptionParser
|
||||
from docutils.readers.doctree import Reader as DoctreeReader
|
||||
|
||||
from sphinx import addnodes
|
||||
from sphinx.util import (get_matching_docs, ensuredir, relative_uri, SEP, os_path)
|
||||
from sphinx.util import (get_matching_docs, mtimes_of_files,
|
||||
ensuredir, relative_uri, SEP, os_path)
|
||||
from sphinx.htmlhelp import build_hhx
|
||||
from sphinx.htmlwriter import HTMLWriter, HTMLTranslator, SmartyPantsHTMLTranslator
|
||||
from sphinx.latexwriter import LaTeXWriter
|
||||
@ -83,6 +84,7 @@ class Builder(object):
|
||||
base_templates_path = path.join(path.dirname(__file__), 'templates')
|
||||
ext_templates_path = [path.join(self.srcdir, dir)
|
||||
for dir in self.config.templates_path]
|
||||
self.templates_path = [base_templates_path] + ext_templates_path
|
||||
loader = SphinxFileSystemLoader(base_templates_path, ext_templates_path)
|
||||
self.jinja_env = Environment(loader=loader,
|
||||
# disable traceback, more likely that something
|
||||
@ -277,8 +279,9 @@ class StandaloneHTMLBuilder(Builder):
|
||||
Builds standalone HTML docs.
|
||||
"""
|
||||
name = 'html'
|
||||
|
||||
copysource = True
|
||||
out_suffix = '.html'
|
||||
indexer_format = 'json'
|
||||
|
||||
def init(self):
|
||||
"""Load templates."""
|
||||
@ -485,7 +488,7 @@ class StandaloneHTMLBuilder(Builder):
|
||||
if self.env.images:
|
||||
self.info(bold('copying images...'), nonl=1)
|
||||
ensuredir(path.join(self.outdir, '_images'))
|
||||
for src, dest in self.env.images.iteritems():
|
||||
for src, (_, dest) in self.env.images.iteritems():
|
||||
self.info(' '+src, nonl=1)
|
||||
shutil.copyfile(path.join(self.srcdir, src),
|
||||
path.join(self.outdir, '_images', dest))
|
||||
@ -510,29 +513,26 @@ class StandaloneHTMLBuilder(Builder):
|
||||
# dump the search index
|
||||
self.handle_finish()
|
||||
|
||||
# --------- these are overwritten by the Pickle builder
|
||||
|
||||
def get_target_uri(self, docname, typ=None):
|
||||
return docname + '.html'
|
||||
|
||||
def get_outdated_docs(self):
|
||||
template_mtime = max(mtimes_of_files(self.templates_path, '.html'))
|
||||
for docname in self.env.found_docs:
|
||||
targetname = self.env.doc2path(docname, self.outdir, '.html')
|
||||
try:
|
||||
targetmtime = path.getmtime(targetname)
|
||||
except:
|
||||
targetmtime = 0
|
||||
if docname not in self.env.all_docs:
|
||||
yield docname
|
||||
elif path.getmtime(self.env.doc2path(docname)) > targetmtime:
|
||||
continue
|
||||
targetname = self.env.doc2path(docname, self.outdir, self.out_suffix)
|
||||
try:
|
||||
targetmtime = path.getmtime(targetname)
|
||||
except Exception:
|
||||
targetmtime = 0
|
||||
srcmtime = max(path.getmtime(self.env.doc2path(docname)), template_mtime)
|
||||
if srcmtime > targetmtime:
|
||||
yield docname
|
||||
|
||||
|
||||
def load_indexer(self, docnames):
|
||||
try:
|
||||
f = open(path.join(self.outdir, 'searchindex.json'), 'r')
|
||||
f = open(path.join(self.outdir, 'searchindex.'+self.indexer_format), 'r')
|
||||
try:
|
||||
self.indexer.load(f, 'json')
|
||||
self.indexer.load(f, self.indexer_format)
|
||||
finally:
|
||||
f.close()
|
||||
except (IOError, OSError):
|
||||
@ -545,6 +545,11 @@ class StandaloneHTMLBuilder(Builder):
|
||||
if self.indexer is not None and title:
|
||||
self.indexer.feed(pagename, title, doctree)
|
||||
|
||||
# --------- these are overwritten by the Pickle builder
|
||||
|
||||
def get_target_uri(self, docname, typ=None):
|
||||
return docname + '.html'
|
||||
|
||||
def handle_page(self, pagename, addctx, templatename='page.html'):
|
||||
ctx = self.globalcontext.copy()
|
||||
ctx['current_page_name'] = pagename
|
||||
@ -593,20 +598,12 @@ class PickleHTMLBuilder(StandaloneHTMLBuilder):
|
||||
Builds HTML docs without rendering templates.
|
||||
"""
|
||||
name = 'pickle'
|
||||
out_suffix = '.fpickle'
|
||||
indexer_format = 'pickle'
|
||||
|
||||
def init(self):
|
||||
self.init_translator_class()
|
||||
|
||||
def get_outdated_docs(self):
|
||||
for docname in self.env.found_docs:
|
||||
targetname = self.env.doc2path(docname, self.outdir, '.fpickle')
|
||||
try:
|
||||
targetmtime = path.getmtime(targetname)
|
||||
except:
|
||||
targetmtime = 0
|
||||
if path.getmtime(self.env.doc2path(docname)) > targetmtime:
|
||||
yield docname
|
||||
|
||||
def get_target_uri(self, docname, typ=None):
|
||||
if docname == 'index':
|
||||
return ''
|
||||
@ -614,23 +611,6 @@ class PickleHTMLBuilder(StandaloneHTMLBuilder):
|
||||
return docname[:-5] # up to sep
|
||||
return docname + SEP
|
||||
|
||||
def load_indexer(self, docnames):
|
||||
try:
|
||||
f = open(path.join(self.outdir, 'searchindex.pickle'), 'r')
|
||||
try:
|
||||
self.indexer.load(f, 'pickle')
|
||||
finally:
|
||||
f.close()
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
# delete all entries for files that will be rebuilt
|
||||
self.indexer.prune(set(self.env.all_docs) - set(docnames))
|
||||
|
||||
def index_page(self, pagename, doctree, title):
|
||||
# only index pages with title
|
||||
if self.indexer is not None and title:
|
||||
self.indexer.feed(pagename, title, doctree)
|
||||
|
||||
def handle_page(self, pagename, ctx, templatename='page.html'):
|
||||
ctx['current_page_name'] = pagename
|
||||
sidebarfile = self.config.html_sidebars.get(pagename, '')
|
||||
@ -815,7 +795,7 @@ class LaTeXBuilder(Builder):
|
||||
# copy image files
|
||||
if self.env.images:
|
||||
self.info(bold('copying images...'), nonl=1)
|
||||
for src, dest in self.env.images.iteritems():
|
||||
for src, (_, dest) in self.env.images.iteritems():
|
||||
self.info(' '+src, nonl=1)
|
||||
shutil.copyfile(path.join(self.srcdir, src),
|
||||
path.join(self.outdir, dest))
|
||||
|
@ -55,9 +55,9 @@ default_settings = {
|
||||
'sectsubtitle_xform': False,
|
||||
}
|
||||
|
||||
# This is increased every time a new environment attribute is added
|
||||
# to properly invalidate pickle files.
|
||||
ENV_VERSION = 20
|
||||
# This is increased every time an environment attribute is added
|
||||
# or changed to properly invalidate pickle files.
|
||||
ENV_VERSION = 21
|
||||
|
||||
|
||||
def walk_depth(node, depth, maxdepth):
|
||||
@ -251,7 +251,7 @@ class BuildEnvironment:
|
||||
# (type, string, target, aliasname)
|
||||
self.versionchanges = {} # version -> list of
|
||||
# (type, docname, lineno, module, descname, content)
|
||||
self.images = {} # absolute path -> unique filename
|
||||
self.images = {} # absolute path -> (docnames, unique filename)
|
||||
|
||||
# These are set while parsing a file
|
||||
self.docname = None # current document name
|
||||
@ -287,13 +287,14 @@ class BuildEnvironment:
|
||||
self.titles.pop(docname, None)
|
||||
self.tocs.pop(docname, None)
|
||||
self.toc_num_entries.pop(docname, None)
|
||||
self.filemodules.pop(docname, None)
|
||||
self.indexentries.pop(docname, None)
|
||||
|
||||
for subfn, fnset in self.files_to_rebuild.iteritems():
|
||||
fnset.discard(docname)
|
||||
for fullname, (fn, _) in self.descrefs.items():
|
||||
if fn == docname:
|
||||
del self.descrefs[fullname]
|
||||
self.filemodules.pop(docname, None)
|
||||
for modname, (fn, _, _, _) in self.modules.items():
|
||||
if fn == docname:
|
||||
del self.modules[modname]
|
||||
@ -303,10 +304,13 @@ class BuildEnvironment:
|
||||
for key, (fn, _) in self.reftargets.items():
|
||||
if fn == docname:
|
||||
del self.reftargets[key]
|
||||
self.indexentries.pop(docname, None)
|
||||
for version, changes in self.versionchanges.items():
|
||||
new = [change for change in changes if change[1] != docname]
|
||||
changes[:] = new
|
||||
for fullpath, (docs, _) in self.images.items():
|
||||
docs.discard(docname)
|
||||
if not docs:
|
||||
del self.images[fullpath]
|
||||
|
||||
def doc2path(self, docname, base=True, suffix=None):
|
||||
"""
|
||||
@ -501,6 +505,7 @@ class BuildEnvironment:
|
||||
"""
|
||||
Process and rewrite image URIs.
|
||||
"""
|
||||
existing_names = set(v[1] for v in self.images.itervalues())
|
||||
docdir = path.dirname(self.doc2path(docname, base=None))
|
||||
for node in doctree.traverse(nodes.image):
|
||||
imguri = node['uri']
|
||||
@ -513,15 +518,16 @@ class BuildEnvironment:
|
||||
if not os.access(path.join(self.srcdir, imgpath), os.R_OK):
|
||||
self.warn(docname, 'Image file not readable: %s' % imguri, node.line)
|
||||
if imgpath in self.images:
|
||||
self.images[imgpath][0].add(docname)
|
||||
continue
|
||||
names = set(self.images.values())
|
||||
uniquename = path.basename(imgpath)
|
||||
base, ext = path.splitext(uniquename)
|
||||
i = 0
|
||||
while uniquename in names:
|
||||
while uniquename in existing_names:
|
||||
i += 1
|
||||
uniquename = '%s%s%s' % (base, i, ext)
|
||||
self.images[imgpath] = uniquename
|
||||
self.images[imgpath] = (set([docname]), uniquename)
|
||||
existing_names.add(uniquename)
|
||||
|
||||
def process_metadata(self, docname, doctree):
|
||||
"""
|
||||
|
@ -253,7 +253,7 @@ class HTMLTranslator(BaseTranslator):
|
||||
# rewrite the URI if the environment knows about it
|
||||
if olduri in self.builder.env.images:
|
||||
node['uri'] = path.join(self.builder.imgpath,
|
||||
self.builder.env.images[olduri])
|
||||
self.builder.env.images[olduri][1])
|
||||
BaseTranslator.visit_image(self, node)
|
||||
|
||||
def visit_toctree(self, node):
|
||||
|
@ -547,7 +547,10 @@ class LaTeXTranslator(nodes.NodeVisitor):
|
||||
pre.reverse()
|
||||
self.body.extend(pre)
|
||||
# XXX: for now, don't fiddle around with graphics formats
|
||||
uri = self.builder.env.images.get(node['uri'], node['uri'])
|
||||
if node['uri'] in self.builder.env.images:
|
||||
uri = self.builder.env.images[node['uri']][1]
|
||||
else:
|
||||
uri = node['uri']
|
||||
self.body.append('\\includegraphics%s{%s}' % (include_graphics_options, uri))
|
||||
self.body.extend(post)
|
||||
def depart_image(self, node):
|
||||
|
@ -54,6 +54,8 @@ def get_matching_docs(dirname, suffix, exclude=(), prune=()):
|
||||
"""
|
||||
Get all file names (without suffix) matching a suffix in a
|
||||
directory, recursively.
|
||||
|
||||
Exclude files in *exclude*, prune directories in *prune*.
|
||||
"""
|
||||
pattern = '*' + suffix
|
||||
# dirname is a normalized absolute path.
|
||||
@ -75,6 +77,17 @@ def get_matching_docs(dirname, suffix, exclude=(), prune=()):
|
||||
yield qualified_name
|
||||
|
||||
|
||||
def mtimes_of_files(dirnames, suffix):
|
||||
for dirname in dirnames:
|
||||
for root, dirs, files in os.walk(dirname):
|
||||
for sfile in files:
|
||||
if sfile.endswith(suffix):
|
||||
try:
|
||||
yield path.getmtime(path.join(root, sfile))
|
||||
except EnvironmentError:
|
||||
pass
|
||||
|
||||
|
||||
def shorten_result(text='', keywords=[], maxlen=240, fuzz=60):
|
||||
if not text:
|
||||
text = ''
|
||||
|
Loading…
Reference in New Issue
Block a user