Apply Tim Golden's patch from #1520, which resolves confusion between

file paths and relative URIs so that building on Windows is flawlessly
possible.
This commit is contained in:
Georg Brandl 2007-12-03 22:38:25 +00:00
parent 4349b88f75
commit cfa8045490
5 changed files with 89 additions and 30 deletions

View File

@ -27,13 +27,15 @@ from docutils.readers import doctree
from docutils.frontend import OptionParser
from .util import (get_matching_files, attrdict, status_iterator,
ensuredir, get_category, relative_uri)
from .writer import HTMLWriter
from .util.console import bold, purple, green
ensuredir, get_category, relative_uri,
webify_filepath, unwebify_filepath)
from .htmlhelp import build_hhx
from .patchlevel import get_version_info, get_sys_version_info
from .htmlwriter import HTMLWriter
#from .latexwriter import LaTeXWriter
from .environment import BuildEnvironment
from .highlighting import pygments, get_stylesheet
from .util.console import bold, purple, green
# side effect: registers roles and directives
from . import roles
@ -234,7 +236,7 @@ class Builder(object):
# build all
filenames_set = set(self.env.all_files)
self.prepare_writing(filenames)
self.prepare_writing(filenames_set)
# write target files
with collect_env_warnings(self):
@ -483,12 +485,12 @@ class StandaloneHTMLBuilder(Builder):
self.srcdir, '*.rst', exclude=set(self.config.get('unused_files', ()))):
try:
targetmtime = path.getmtime(path.join(self.outdir,
filename[:-4] + '.html'))
unwebify_filepath(filename)[:-4] + '.html'))
except:
targetmtime = 0
if filename not in self.env.all_files:
yield filename
elif path.getmtime(path.join(self.srcdir, filename)) > targetmtime:
elif path.getmtime(path.join(self.srcdir, unwebify_filepath(filename))) > targetmtime:
yield filename
@ -513,7 +515,7 @@ class StandaloneHTMLBuilder(Builder):
ctx = self.globalcontext.copy()
ctx.update(context)
output = self.templates[templatename].render(ctx)
outfilename = path.join(self.outdir, filename[:-4] + '.html')
outfilename = path.join(self.outdir, unwebify_filepath(filename)[:-4] + '.html')
ensuredir(path.dirname(outfilename)) # normally different from self.outdir
try:
with codecs.open(outfilename, 'w', 'utf-8') as fp:
@ -522,7 +524,7 @@ class StandaloneHTMLBuilder(Builder):
print >>self.warning_stream, "Error writing file %s: %s" % (outfilename, err)
if self.copysource and context.get('sourcename'):
# copy the source file for the "show source" link
shutil.copyfile(path.join(self.srcdir, filename),
shutil.copyfile(path.join(self.srcdir, unwebify_filepath(filename)),
path.join(self.outdir, context['sourcename']))
def handle_finish(self):
@ -547,10 +549,10 @@ class WebHTMLBuilder(StandaloneHTMLBuilder):
self.srcdir, '*.rst', exclude=set(self.config.get('unused_files', ()))):
try:
targetmtime = path.getmtime(path.join(self.outdir,
filename[:-4] + '.fpickle'))
unwebify_filepath(filename)[:-4] + '.fpickle'))
except:
targetmtime = 0
if path.getmtime(path.join(self.srcdir, filename)) > targetmtime:
if path.getmtime(path.join(self.srcdir, unwebify_filepath(filename))) > targetmtime:
yield filename
def get_target_uri(self, source_filename):
@ -577,7 +579,7 @@ class WebHTMLBuilder(StandaloneHTMLBuilder):
self.indexer.feed(filename, category, title, doctree)
def handle_file(self, filename, context, templatename='page'):
outfilename = path.join(self.outdir, filename[:-4] + '.fpickle')
outfilename = path.join(self.outdir, unwebify_filepath(filename)[:-4] + '.fpickle')
ensuredir(path.dirname(outfilename))
context.pop('pathto', None) # can't be pickled
with file(outfilename, 'wb') as fp:
@ -587,7 +589,7 @@ class WebHTMLBuilder(StandaloneHTMLBuilder):
if context.get('sourcename'):
source_name = path.join(self.outdir, 'sources', context['sourcename'])
ensuredir(path.dirname(source_name))
shutil.copyfile(path.join(self.srcdir, filename), source_name)
shutil.copyfile(path.join(self.srcdir, unwebify_filepath(filename)), source_name)
def handle_finish(self):
# dump the global context
@ -632,8 +634,43 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
build_hhx(self, self.outdir, self.options.get('outname') or 'pydoc')
class LaTeXBuilder(Builder):
"""
Builds LaTeX output to create PDF.
"""
name = 'latex'
def init(self):
pass
def get_outdated_files(self):
# always rebuild everything for now
return self.env.all_files
def get_target_uri(self, source_filename):
# XXX: returns nothing for now
return ''
def prepare_writing(self, filenames):
self.docwriter = LaTeXWriter(self.config, self.name)
self.docsettings = OptionParser(
defaults=self.env.settings,
components=(self.docwriter,)).get_default_values()
def write_file(self, filename, doctree):
destination = StringOutput(encoding='utf-8')
doctree.settings = self.docsettings
output = self.docwriter.write(doctree, destination)
print output
def finish(self):
pass
builders = {
'html': StandaloneHTMLBuilder,
'web': WebHTMLBuilder,
'htmlhelp': HTMLHelpBuilder,
# 'latex': LaTeXBuilder,
}

View File

@ -19,6 +19,7 @@ from docutils.parsers.rst import directives, roles
from docutils.parsers.rst.directives import admonitions
from . import addnodes
from .util import webify_filepath, unwebify_filepath
# ------ index markup --------------------------------------------------------------
@ -554,7 +555,8 @@ def toctree_directive(name, arguments, options, content, lineno,
subnode = addnodes.toctree()
includefiles = filter(None, content)
# absolutize filenames
includefiles = map(lambda x: path.normpath(path.join(dirname, x)), includefiles)
includefiles = [webify_filepath(path.normpath(path.join (dirname, x))) for x in includefiles]
#~ includefiles = map(lambda x: path.normpath(path.join(dirname, x)), includefiles)
subnode['includefiles'] = includefiles
subnode['maxdepth'] = options.get('maxdepth', -1)
return [subnode]
@ -599,9 +601,9 @@ def literalinclude_directive(name, arguments, options, content, lineno,
return [state.document.reporter.warning('File insertion disabled', line=lineno)]
env = state.document.settings.env
fn = arguments[0]
source_dir = path.dirname(path.abspath(state_machine.input_lines.source(
lineno - state_machine.input_offset - 1)))
fn = path.normpath(path.join(source_dir, fn))
source_dir = webify_filepath(path.dirname(path.abspath(state_machine.input_lines.source(
lineno - state_machine.input_offset - 1))))
fn = webify_filepath(path.normpath(path.join(source_dir, fn)))
try:
with open(fn) as f:

View File

@ -38,7 +38,7 @@ Body.enum.converters['loweralpha'] = \
Body.enum.converters['upperroman'] = lambda x: None
from . import addnodes
from .util import get_matching_files
from .util import get_matching_files, unwebify_filepath, WEB_SEP
from .refcounting import Refcounts
default_settings = {
@ -278,11 +278,11 @@ class BuildEnvironment:
else:
# if the doctree file is not there, rebuild
if not path.isfile(path.join(self.doctreedir,
filename[:-3] + 'doctree')):
unwebify_filepath(filename)[:-3] + 'doctree')):
changed.append(filename)
continue
mtime, md5 = self.all_files[filename]
newmtime = path.getmtime(path.join(self.srcdir, filename))
newmtime = path.getmtime(path.join(self.srcdir, unwebify_filepath(filename)))
if newmtime == mtime:
continue
# check the MD5
@ -297,6 +297,8 @@ class BuildEnvironment:
"""
(Re-)read all files new or changed since last update.
Yields a summary and then filenames as it processes them.
Store all environment filenames as webified (ie using "/"
as a separator in place of os.path.sep).
"""
added, changed, removed = self.get_outdated_files(config)
msg = '%s added, %s changed, %s removed' % (len(added), len(changed),
@ -329,7 +331,7 @@ class BuildEnvironment:
self.clear_file(filename)
if src_path is None:
src_path = path.join(self.srcdir, filename)
src_path = path.join(self.srcdir, unwebify_filepath(filename))
self.filename = filename
doctree = publish_doctree(None, src_path, FileInput,
@ -360,7 +362,7 @@ class BuildEnvironment:
if save_parsed:
# save the parsed doctree
doctree_filename = path.join(self.doctreedir, filename[:-3] + 'doctree')
doctree_filename = path.join(self.doctreedir, unwebify_filepath(filename)[:-3] + 'doctree')
dirname = path.dirname(doctree_filename)
if not path.isdir(dirname):
os.makedirs(dirname)
@ -516,7 +518,7 @@ class BuildEnvironment:
def get_doctree(self, filename):
"""Read the doctree for a file from the pickle and return it."""
doctree_filename = path.join(self.doctreedir, filename[:-3] + 'doctree')
doctree_filename = path.join(self.doctreedir, unwebify_filepath(filename)[:-3] + 'doctree')
with file(doctree_filename, 'rb') as f:
doctree = pickle.load(f)
doctree.reporter = Reporter(filename, 2, 4, stream=self.warning_stream)
@ -862,6 +864,6 @@ class BuildEnvironment:
filename. This also resolves the special `index.rst` files. If the file
does not exist the return value will be `None`.
"""
for rstname in filename + '.rst', filename + path.sep + 'index.rst':
for rstname in filename + '.rst', filename + WEB_SEP + 'index.rst':
if rstname in self.all_files:
return rstname

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
sphinx.writer
~~~~~~~~~~~~~
sphinx.htmlwriter
~~~~~~~~~~~~~~~~~
docutils writers handling Sphinx' custom nodes.

View File

@ -15,17 +15,35 @@ import fnmatch
from os import path
#
# Define WEB_SEP as a manifest constant, not
# so much because we expect it to change in
# the future as to avoid the suspicion that
# a stray "/" in the code is a hangover from
# more *nix-oriented origins.
#
WEB_SEP = "/"
def webify_filepath(filepath):
return filepath.replace(os.path.sep, WEB_SEP)
def unwebify_filepath(webpath):
return webpath.replace(WEB_SEP, os.path.sep)
def relative_uri(base, to):
"""Return a relative URL from ``base`` to ``to``."""
b2 = base.split('/')
t2 = to.split('/')
b2 = base.split(WEB_SEP)
t2 = to.split(WEB_SEP)
# remove common segments
for x, y in zip(b2, t2):
if x != y:
break
b2.pop(0)
t2.pop(0)
return '../' * (len(b2)-1) + '/'.join(t2)
return ('..' + WEB_SEP) * (len(b2)-1) + WEB_SEP.join(t2)
def ensuredir(path):
@ -60,12 +78,12 @@ def get_matching_files(dirname, pattern, exclude=()):
qualified_name = path.join(root[dirlen:], sfile)
if qualified_name in exclude:
continue
yield qualified_name
yield webify_filepath(qualified_name)
def get_category(filename):
"""Get the "category" part of a RST filename."""
parts = filename.split('/', 1)
parts = filename.split(WEB_SEP, 1)
if len(parts) < 2:
return
return parts[0]