- rename webify/unwebify to canonical/os_path

- build all documents correctly, including howtos
- pack ACKS content into an .. acks directive, to
  make it possible to render it into a comma-separated
  list in latex
This commit is contained in:
Georg Brandl
2007-12-07 20:27:52 +00:00
parent 6f31617900
commit ef49fadf8c
8 changed files with 131 additions and 78 deletions

1
TODO
View File

@@ -10,6 +10,7 @@ Sphinx
- "seealso" links to external examples, see http://svn.python.org/projects/sandbox/trunk/seealso/ and http://effbot.org/zone/idea-seealso.htm
- write a "printable" builder (export to latex, most probably)
- "often used" combo box in sidebar
- link to keywords
- source file cross-references?
Web App

View File

@@ -52,6 +52,9 @@ class pending_xref(nodes.Element): pass
# compact paragraph -- never makes a <p>
class compact_paragraph(nodes.paragraph): pass
# for the ACKS list
class acks(nodes.Element): pass
# sets the highlighting language for literal blocks
class highlightlang(nodes.Element): pass
@@ -67,4 +70,4 @@ nodes._add_node_class_names("""index desc desc_content desc_signature desc_type
desc_classname desc_name desc_parameterlist desc_parameter desc_optional
centered versionmodified seealso productionlist production toctree
pending_xref compact_paragraph highlightlang literal_emphasis
glossary""".split())
glossary acks""".split())

View File

@@ -26,9 +26,8 @@ from docutils.utils import new_document
from docutils.readers import doctree
from docutils.frontend import OptionParser
from .util import (get_matching_files, attrdict, status_iterator,
ensuredir, get_category, relative_uri,
webify_filepath, unwebify_filepath)
from .util import (get_matching_files, attrdict, status_iterator, ensuredir,
get_category, relative_uri, os_path, SEP)
from .htmlhelp import build_hhx
from .patchlevel import get_version_info, get_sys_version_info
from .htmlwriter import HTMLWriter
@@ -491,12 +490,12 @@ class StandaloneHTMLBuilder(Builder):
self.srcdir, '*.rst', exclude=set(self.config.get('unused_files', ()))):
try:
targetmtime = path.getmtime(path.join(self.outdir,
unwebify_filepath(filename)[:-4] + '.html'))
os_path(filename)[:-4] + '.html'))
except:
targetmtime = 0
if filename not in self.env.all_files:
yield filename
elif path.getmtime(path.join(self.srcdir, unwebify_filepath(filename))) > targetmtime:
elif path.getmtime(path.join(self.srcdir, os_path(filename))) > targetmtime:
yield filename
@@ -521,7 +520,7 @@ class StandaloneHTMLBuilder(Builder):
ctx = self.globalcontext.copy()
ctx.update(context)
output = self.templates[templatename].render(ctx)
outfilename = path.join(self.outdir, unwebify_filepath(filename)[:-4] + '.html')
outfilename = path.join(self.outdir, os_path(filename)[:-4] + '.html')
ensuredir(path.dirname(outfilename)) # normally different from self.outdir
try:
with codecs.open(outfilename, 'w', 'utf-8') as fp:
@@ -530,8 +529,8 @@ class StandaloneHTMLBuilder(Builder):
print >>self.warning_stream, "Error writing file %s: %s" % (outfilename, err)
if self.copysource and context.get('sourcename'):
# copy the source file for the "show source" link
shutil.copyfile(path.join(self.srcdir, unwebify_filepath(filename)),
path.join(self.outdir, context['sourcename']))
shutil.copyfile(path.join(self.srcdir, os_path(filename)),
path.join(self.outdir, os_path(context['sourcename'])))
def handle_finish(self):
self.msg('dumping search index...')
@@ -554,19 +553,20 @@ class WebHTMLBuilder(StandaloneHTMLBuilder):
for filename in get_matching_files(
self.srcdir, '*.rst', exclude=set(self.config.get('unused_files', ()))):
try:
targetmtime = path.getmtime(path.join(self.outdir,
unwebify_filepath(filename)[:-4] + '.fpickle'))
targetmtime = path.getmtime(
path.join(self.outdir, os_path(filename)[:-4] + '.fpickle'))
except:
targetmtime = 0
if path.getmtime(path.join(self.srcdir, unwebify_filepath(filename))) > targetmtime:
if path.getmtime(path.join(self.srcdir,
os_path(filename))) > targetmtime:
yield filename
def get_target_uri(self, source_filename):
if source_filename == 'index.rst':
return ''
if source_filename.endswith('/index.rst'):
return source_filename[:-9] # up to /
return source_filename[:-4] + '/'
if source_filename.endswith(SEP+'index.rst'):
return source_filename[:-9] # up to sep
return source_filename[:-4] + SEP
def load_indexer(self, filenames):
try:
@@ -585,7 +585,7 @@ class WebHTMLBuilder(StandaloneHTMLBuilder):
self.indexer.feed(filename, category, title, doctree)
def handle_file(self, filename, context, templatename='page'):
outfilename = path.join(self.outdir, unwebify_filepath(filename)[:-4] + '.fpickle')
outfilename = path.join(self.outdir, os_path(filename)[:-4] + '.fpickle')
ensuredir(path.dirname(outfilename))
context.pop('pathto', None) # can't be pickled
with file(outfilename, 'wb') as fp:
@@ -593,9 +593,9 @@ class WebHTMLBuilder(StandaloneHTMLBuilder):
# if there is a source file, copy the source file for the "show source" link
if context.get('sourcename'):
source_name = path.join(self.outdir, 'sources', context['sourcename'])
source_name = path.join(self.outdir, 'sources', os_path(context['sourcename']))
ensuredir(path.dirname(source_name))
shutil.copyfile(path.join(self.srcdir, unwebify_filepath(filename)), source_name)
shutil.copyfile(path.join(self.srcdir, os_path(filename)), source_name)
def handle_finish(self):
# dump the global context
@@ -659,6 +659,17 @@ class LaTeXBuilder(Builder):
else:
return ''
def get_document_data(self):
for toplevel in ["c-api", "distutils", "documenting", "extending",
"install", "reference", "tutorial", "using", "library"]:
yield (toplevel + SEP + 'index.rst', toplevel+'.tex', 'manual')
yield ('whatsnew' + SEP + self.config['version'] + '.rst',
'whatsnew.tex', 'howto')
for howto in [fn for fn in self.env.all_files
if fn.startswith('howto'+SEP)
and not fn.endswith('index.rst')]:
yield (howto, 'howto-'+howto[6:-4]+'.tex', 'howto')
def write(self, filenames):
# "filenames" is ignored here...
@@ -672,26 +683,24 @@ class LaTeXBuilder(Builder):
defaults=self.env.settings,
components=(docwriter,)).get_default_values()
# XXX get names of toplevels automatically?
for docname in ["library"]:#, "distutils", "documenting", "extending",
#"howto", "install", "library", "reference",
#"tutorial", "using"]:
# XXX whatsnew missing
for sourcename, targetname, docclass in self.get_document_data():
destination = FileOutput(
destination_path=path.join(self.outdir, docname+".tex"),
destination_path=path.join(self.outdir, targetname),
encoding='utf-8')
doctree = self.assemble_doctree(path.join(docname, "index.rst"))
doctree.extend(specials)
print "processing", targetname + "...",
doctree = self.assemble_doctree(sourcename,
specials=(docclass == 'manual') and specials or [])
print "writing...",
doctree.settings = docsettings
doctree.settings.filename = docname
doctree.settings.docclass = 'manual' # XXX howto for whatsnew
doctree.settings.filename = sourcename
doctree.settings.docclass = docclass
output = docwriter.write(doctree, destination)
print "done"
def assemble_doctree(self, indexfile):
self.filenames = [indexfile]
print "processing", indexfile
def assemble_doctree(self, indexfile, specials):
self.filenames = set([indexfile, 'glossary.rst', 'about.rst',
'license.rst', 'copyright.rst'])
print green(indexfile),
def process_tree(tree):
#tree = tree.deepcopy() XXX
for toctreenode in tree.traverse(addnodes.toctree):
@@ -701,7 +710,7 @@ class LaTeXBuilder(Builder):
try:
print green(includefile),
subtree = process_tree(self.env.get_doctree(includefile))
self.filenames.append(includefile)
self.filenames.add(includefile)
except:
print >>self.warning_stream, 'WARNING: %s: toctree contains ' \
'ref to nonexisting file %r' % (filename, includefile)
@@ -710,6 +719,7 @@ class LaTeXBuilder(Builder):
toctreenode.parent.replace(toctreenode, newnodes)
return tree
largetree = process_tree(self.env.get_doctree(indexfile))
largetree.extend(specials)
print
print "resolving references..."
self.env.resolve_references(largetree, indexfile, self)

View File

@@ -12,6 +12,7 @@ from __future__ import with_statement
import re
import string
import posixpath
from os import path
from docutils import nodes
@@ -19,7 +20,6 @@ from docutils.parsers.rst import directives, roles
from docutils.parsers.rst.directives import admonitions
from . import addnodes
from .util import webify_filepath, unwebify_filepath
# ------ index markup --------------------------------------------------------------
@@ -552,13 +552,12 @@ directives.register_directive('moduleauthor', author_directive)
def toctree_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
dirname = path.dirname(env.filename)
dirname = posixpath.dirname(env.filename)
subnode = addnodes.toctree()
includefiles = filter(None, content)
# absolutize filenames
includefiles = [webify_filepath(path.normpath(path.join (dirname, x))) for x in includefiles]
#~ includefiles = map(lambda x: path.normpath(path.join(dirname, x)), includefiles)
includefiles = [posixpath.normpath(posixpath.join(dirname, x)) for x in includefiles]
subnode['includefiles'] = includefiles
subnode['maxdepth'] = options.get('maxdepth', -1)
return [subnode]
@@ -603,16 +602,16 @@ def literalinclude_directive(name, arguments, options, content, lineno,
return [state.document.reporter.warning('File insertion disabled', line=lineno)]
env = state.document.settings.env
fn = arguments[0]
source_dir = webify_filepath(path.dirname(path.abspath(state_machine.input_lines.source(
lineno - state_machine.input_offset - 1))))
fn = webify_filepath(path.normpath(path.join(source_dir, fn)))
source_dir = path.dirname(path.abspath(state_machine.input_lines.source(
lineno - state_machine.input_offset - 1)))
fn = path.normpath(path.join(source_dir, fn))
try:
with open(fn) as f:
text = f.read()
except (IOError, OSError):
retnode = state.document.reporter.warning('Include file %r not found' %
arguments[0], line=lineno)
retnode = state.document.reporter.warning(
'Include file %r not found or reading it failed' % arguments[0], line=lineno)
else:
retnode = nodes.literal_block(text, text, source=fn)
retnode.line = 1
@@ -654,3 +653,19 @@ def glossary_directive(name, arguments, options, content, lineno,
glossary_directive.content = 1
glossary_directive.arguments = (0, 0, 0)
directives.register_directive('glossary', glossary_directive)
# ------ acks directive -------------------------------------------------------------
def acks_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
node = addnodes.acks()
state.nested_parse(content, content_offset, node)
if len(node.children) != 1 or not isinstance(node.children[0], nodes.bullet_list):
return [state.document.reporter.warning('.. acks content is not a list',
line=lineno)]
return [node]
acks_directive.content = 1
acks_directive.arguments = (0, 0, 0)
directives.register_directive('acks', acks_directive)

View File

@@ -38,7 +38,7 @@ Body.enum.converters['loweralpha'] = \
Body.enum.converters['upperroman'] = lambda x: None
from . import addnodes
from .util import get_matching_files, unwebify_filepath, WEB_SEP
from .util import get_matching_files, os_path, SEP
from .refcounting import Refcounts
default_settings = {
@@ -283,11 +283,11 @@ class BuildEnvironment:
else:
# if the doctree file is not there, rebuild
if not path.isfile(path.join(self.doctreedir,
unwebify_filepath(filename)[:-3] + 'doctree')):
os_path(filename)[:-3] + 'doctree')):
changed.append(filename)
continue
mtime, md5 = self.all_files[filename]
newmtime = path.getmtime(path.join(self.srcdir, unwebify_filepath(filename)))
newmtime = path.getmtime(path.join(self.srcdir, os_path(filename)))
if newmtime == mtime:
continue
# check the MD5
@@ -302,8 +302,8 @@ class BuildEnvironment:
"""
(Re-)read all files new or changed since last update.
Yields a summary and then filenames as it processes them.
Store all environment filenames as webified (ie using "/"
as a separator in place of os.path.sep).
Store all environment filenames in the canonical format
(ie using SEP as a separator in place of os.path.sep).
"""
added, changed, removed = self.get_outdated_files(config)
msg = '%s added, %s changed, %s removed' % (len(added), len(changed),
@@ -336,7 +336,7 @@ class BuildEnvironment:
self.clear_file(filename)
if src_path is None:
src_path = path.join(self.srcdir, unwebify_filepath(filename))
src_path = path.join(self.srcdir, os_path(filename))
self.filename = filename
doctree = publish_doctree(None, src_path, FileInput,
@@ -367,7 +367,7 @@ class BuildEnvironment:
if save_parsed:
# save the parsed doctree
doctree_filename = path.join(self.doctreedir, unwebify_filepath(filename)[:-3] + 'doctree')
doctree_filename = path.join(self.doctreedir, os_path(filename)[:-3] + 'doctree')
dirname = path.dirname(doctree_filename)
if not path.isdir(dirname):
os.makedirs(dirname)
@@ -523,7 +523,7 @@ class BuildEnvironment:
def get_doctree(self, filename):
"""Read the doctree for a file from the pickle and return it."""
doctree_filename = path.join(self.doctreedir, unwebify_filepath(filename)[:-3] + 'doctree')
doctree_filename = path.join(self.doctreedir, os_path(filename)[:-3] + 'doctree')
with file(doctree_filename, 'rb') as f:
doctree = pickle.load(f)
doctree.reporter = Reporter(filename, 2, 4, stream=self.warning_stream)
@@ -597,12 +597,16 @@ class BuildEnvironment:
'%s: undefined label: %s' % (docfilename, target)
else:
newnode = nodes.reference('', '')
innernode = nodes.emphasis(sectname, sectname)
if filename == docfilename:
newnode['refid'] = labelid
else:
# in case the following calls raises NoUri...
# else the final node will contain a label name
contnode = innernode
newnode['refuri'] = builder.get_relative_uri(
docfilename, filename) + '#' + labelid
newnode.append(nodes.emphasis(sectname, sectname))
newnode.append(innernode)
elif typ in ('token', 'term', 'envvar', 'option'):
filename, labelid = self.reftargets.get((typ, target), ('', ''))
if not filename:
@@ -874,6 +878,6 @@ class BuildEnvironment:
filename. This also resolves the special `index.rst` files. If the file
does not exist the return value will be `None`.
"""
for rstname in filename + '.rst', filename + WEB_SEP + 'index.rst':
for rstname in filename + '.rst', filename + SEP + 'index.rst':
if rstname in self.all_files:
return rstname

View File

@@ -227,6 +227,11 @@ def translator_class(config, buildername):
def depart_glossary(self, node):
pass
def visit_acks(self, node):
pass
def depart_acks(self, node):
pass
# these are only handled specially in the SmartyPantsHTMLTranslator
def visit_literal_emphasis(self, node):
return self.visit_emphasis(node)

View File

@@ -24,6 +24,7 @@ from . import highlighting
HEADER = r'''%% Generated by Sphinx.
\documentclass[%(papersize)s,%(pointsize)s]{%(docclass)s}
\usepackage[utf8]{inputenc}
\usepackage[colorlinks]{hyperref}
\title{%(title)s}
\date{%(date)s}
@@ -91,7 +92,8 @@ class LaTeXTranslator(nodes.NodeVisitor):
def __init__(self, document, config):
nodes.NodeVisitor.__init__(self, document)
self.body = []
self.options = {'docclass': document.settings.docclass,
docclass = document.settings.docclass
self.options = {'docclass': docclass,
'papersize': 'a4paper', # XXX
'pointsize': '12pt',
'filename': document.settings.filename,
@@ -103,6 +105,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.descstack = []
self.highlightlang = 'python'
self.written_ids = set()
self.top_sectionlevel = 0 if docclass == 'manual' else 1
# flags
self.verbatim = None
self.in_title = 0
@@ -123,7 +126,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
elif self.first_document == 0:
self.body.append('\n\\appendix\n')
self.first_document = -1
self.sectionlevel = 0
self.sectionlevel = self.top_sectionlevel
def depart_document(self, node):
pass
@@ -146,6 +149,12 @@ class LaTeXTranslator(nodes.NodeVisitor):
def depart_section(self, node):
self.sectionlevel -= 1
def visit_topic(self, node):
raise nodes.SkipNode # XXX
def visit_sidebar(self, node):
raise nodes.SkipNode # XXX
def visit_glossary(self, node):
raise nodes.SkipNode # XXX
@@ -365,6 +374,14 @@ class LaTeXTranslator(nodes.NodeVisitor):
else:
self.body.append('}')
def visit_acks(self, node):
# this is a list in the source, but should be rendered as a
# comma-separated list here
self.body.append('\n\n')
self.body.append(', '.join(n.astext() for n in node.children[0].children))
self.body.append('\n\n')
raise nodes.SkipNode
def visit_bullet_list(self, node):
self.body.append('\\begin{itemize}\n' )
def depart_bullet_list(self, node):
@@ -502,7 +519,9 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('}')
def visit_title_reference(self, node):
raise RuntimeError("XXX title reference node found")
self.body.append(r'\emph{')
def depart_title_reference(self, node):
self.body.append('}')
def visit_literal(self, node):
content = self.encode(node.astext().strip())
@@ -519,13 +538,13 @@ class LaTeXTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_literal_block(self, node):
#self.body.append('\n\\begin{Verbatim}\n')
self.verbatim = ''
def depart_literal_block(self, node):
#self.body.append('\n\\end{Verbatim}\n')
self.body.append('\n' + highlighting.highlight_block(self.verbatim,
self.highlightlang,
'latex'))
hlcode = highlighting.highlight_block(self.verbatim.rstrip('\n'),
self.highlightlang, 'latex')
# workaround for Pygments bug
hlcode = hlcode.replace('\n\\end{Verbatim}', '\\end{Verbatim}')
self.body.append('\n' + hlcode)
self.verbatim = None
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
@@ -600,7 +619,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
def visit_Text(self, node):
if self.verbatim is not None:
self.verbatim += node.astext().replace('@', '@@')
self.verbatim += node.astext()
else:
self.body.append(self.encode(node.astext()))
def depart_Text(self, node):

View File

@@ -15,35 +15,31 @@ import fnmatch
from os import path
# SEP separates path elements in the canonical file names
#
# Define WEB_SEP as a manifest constant, not
# so much because we expect it to change in
# the future as to avoid the suspicion that
# a stray "/" in the code is a hangover from
# more *nix-oriented origins.
#
WEB_SEP = "/"
# Define SEP as a manifest constant, not so much because we expect it to change
# in the future as to avoid the suspicion that a stray "/" in the code is a
# hangover from more *nix-oriented origins.
SEP = "/"
def canonical_path(ospath):
return ospath.replace(os.path.sep, SEP)
def webify_filepath(filepath):
return filepath.replace(os.path.sep, WEB_SEP)
def unwebify_filepath(webpath):
return webpath.replace(WEB_SEP, os.path.sep)
def os_path(canpath):
return canpath.replace(SEP, os.path.sep)
def relative_uri(base, to):
"""Return a relative URL from ``base`` to ``to``."""
b2 = base.split(WEB_SEP)
t2 = to.split(WEB_SEP)
b2 = base.split(SEP)
t2 = to.split(SEP)
# remove common segments
for x, y in zip(b2, t2):
if x != y:
break
b2.pop(0)
t2.pop(0)
return ('..' + WEB_SEP) * (len(b2)-1) + WEB_SEP.join(t2)
return ('..' + SEP) * (len(b2)-1) + SEP.join(t2)
def ensuredir(path):
@@ -78,12 +74,12 @@ def get_matching_files(dirname, pattern, exclude=()):
qualified_name = path.join(root[dirlen:], sfile)
if qualified_name in exclude:
continue
yield webify_filepath(qualified_name)
yield canonical_path(qualified_name)
def get_category(filename):
"""Get the "category" part of a RST filename."""
parts = filename.split(WEB_SEP, 1)
parts = filename.split(SEP, 1)
if len(parts) < 2:
return
return parts[0]