diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..f1f52a878 --- /dev/null +++ b/Makefile @@ -0,0 +1,24 @@ +PYTHON ?= python + +export PYTHONPATH = $(shell echo "$$PYTHONPATH"):./sphinx + +.PHONY: all check clean clean-pyc pylint reindent testserver + +all: clean-pyc check + +check: + @$(PYTHON) utils/check_sources.py -i sphinx/style/jquery.js sphinx + @$(PYTHON) utils/check_sources.py converter + +clean: clean-pyc + +clean-pyc: + find . -name '*.pyc' -exec rm -f {} + + find . -name '*.pyo' -exec rm -f {} + + find . -name '*~' -exec rm -f {} + + +pylint: + @pylint --rcfile utils/pylintrc sphinx converter + +reindent: + @$(PYTHON) utils/reindent.py -r -B . diff --git a/README b/README new file mode 100644 index 000000000..9cf2e75c8 --- /dev/null +++ b/README @@ -0,0 +1,79 @@ +py-rest-doc +=========== + +This sandbox project is about moving the official Python documentation +to reStructuredText. + + +What you need to know +--------------------- + +This project uses Python 2.5 features, so you'll need a working Python +2.5 setup. + +If you want code highlighting, you need Pygments >= 0.8, easily +installable from PyPI. Jinja, the template engine, is included as a +SVN external. + +For the rest of this document, let's assume that you have a Python +checkout (you need the 2.6 line, i.e. the trunk) in ~/devel/python and +this checkout in the current directory. + +To convert the LaTeX doc to reST, you first have to apply the patch in +``etc/inst.diff`` to the ``inst/inst.tex`` LaTeX file in the Python +checkout:: + + patch -d ~/devel/python/Doc -p0 < etc/inst.diff + +Then, create a target directory for the reST sources and run the +converter script:: + + mkdir sources + python convert.py ~/devel/python/Doc sources + +This will convert all LaTeX sources to reST files in the ``sources`` +directory. + +The ``sources`` directory contains a ``conf.py`` file which contains +general configuration for the build process, such as the Python +version that should be shown, or the date format for "last updated on" +notes. + + +Building the HTML version +------------------------- + +Then, create a target directory and run :: + + mkdir build-html + python sphinx-build.py -b html sources build-html + +This will create HTML files in the ``build-html`` directory. + +The ``build-html`` directory will also contain a ``.doctrees`` +directory, which caches pickles containing the docutils doctrees for +all source files, as well as an ``environment.pickle`` file that +collects all meta-information and data that's needed to +cross-reference the sources and generate indices. + + +Running the online (web) version +-------------------------------- + +First, you need to build the source with the "web" builder:: + + mkdir build-web + python sphinx-build.py -b web sources build-web + +This will create files with pickled contents for the web application +in the target directory. + +Then, you can run :: + + python sphinx-web.py build-web + +which will start a webserver using wsgiref on ``localhost:3000``. The +web application has a configuration file ``build-web/webconf.py``, +where you can configure the server and port for the application as +well as different other settings specific to the web app. + diff --git a/TODO b/TODO new file mode 100644 index 000000000..0ff5081e1 --- /dev/null +++ b/TODO @@ -0,0 +1,13 @@ +Global TODO +=========== + +- discuss and debug comments system +- write new Makefile, handle automatic version info and checkout +- write a "printable" builder (export to latex, most probably) +- discuss the default role +- discuss lib -> ref section move +- prepare for databases other than sqlite for comments +- look at the old tools/ scripts, what functionality should be rewritten +- add search via Xapian? +- optionally have a contents tree view in the sidebar (AJAX based)? + diff --git a/convert.py b/convert.py new file mode 100644 index 000000000..3654a1fca --- /dev/null +++ b/convert.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +""" + Convert the Python documentation to Sphinx + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +import sys +import os + +from converter import convert_dir + +if __name__ == '__main__': + try: + rootdir = sys.argv[1] + destdir = os.path.abspath(sys.argv[2]) + except IndexError: + print "usage: convert.py docrootdir destdir" + sys.exit() + + assert os.path.isdir(os.path.join(rootdir, 'texinputs')) + os.chdir(rootdir) + convert_dir(destdir, *sys.argv[3:]) diff --git a/converter/__init__.py b/converter/__init__.py new file mode 100644 index 000000000..82b90e5e0 --- /dev/null +++ b/converter/__init__.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +""" + Documentation converter - high level functions + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +import sys +import os +import glob +import shutil +import codecs +from os import path + +from .tokenizer import Tokenizer +from .latexparser import DocParser +from .restwriter import RestWriter +from .filenamemap import (fn_mapping, copyfiles_mapping, newfiles_mapping, + rename_mapping, dirs_to_make, toctree_mapping, + amendments_mapping) +from .console import red, green + +def convert_file(infile, outfile, doraise=True, splitchap=False, + toctree=None, deflang=None, labelprefix=''): + inf = codecs.open(infile, 'r', 'latin1') + p = DocParser(Tokenizer(inf.read()).tokenize(), infile) + if not splitchap: + outf = codecs.open(outfile, 'w', 'utf-8') + else: + outf = None + r = RestWriter(outf, splitchap, toctree, deflang, labelprefix) + try: + r.write_document(p.parse()) + if splitchap: + for i, chapter in enumerate(r.chapters[1:]): + coutf = codecs.open('%s/%d_%s' % ( + path.dirname(outfile), i+1, path.basename(outfile)), + 'w', 'utf-8') + coutf.write(chapter.getvalue()) + coutf.close() + else: + outf.close() + return 1, r.warnings + except Exception, err: + if doraise: + raise + return 0, str(err) + + +def convert_dir(outdirname, *args): + # make directories + for dirname in dirs_to_make: + try: + os.mkdir(path.join(outdirname, dirname)) + except OSError: + pass + + # copy files (currently only non-tex includes) + for oldfn, newfn in copyfiles_mapping.iteritems(): + newpathfn = path.join(outdirname, newfn) + globfns = glob.glob(oldfn) + if len(globfns) == 1 and not path.isdir(newpathfn): + shutil.copyfile(globfns[0], newpathfn) + else: + for globfn in globfns: + shutil.copyfile(globfn, path.join(newpathfn, + path.basename(globfn))) + + # convert tex files + # "doc" is not converted. It must be rewritten anyway. + for subdir in ('api', 'dist', 'ext', 'inst', 'commontex', + 'lib', 'mac', 'ref', 'tut', 'whatsnew'): + if args and subdir not in args: + continue + if subdir not in fn_mapping: + continue + newsubdir = fn_mapping[subdir]['__newname__'] + deflang = fn_mapping[subdir].get('__defaulthighlightlang__') + labelprefix = fn_mapping[subdir].get('__labelprefix__', '') + for filename in sorted(os.listdir(subdir)): + if not filename.endswith('.tex'): + continue + filename = filename[:-4] # strip extension + newname = fn_mapping[subdir][filename] + if newname is None: + continue + if newname.endswith(':split'): + newname = newname[:-6] + splitchap = True + else: + splitchap = False + if '/' not in newname: + outfilename = path.join(outdirname, newsubdir, newname + '.rst') + else: + outfilename = path.join(outdirname, newname + '.rst') + toctree = toctree_mapping.get(path.join(subdir, filename)) + infilename = path.join(subdir, filename + '.tex') + print green(infilename), + success, state = convert_file(infilename, outfilename, False, + splitchap, toctree, deflang, labelprefix) + if not success: + print red("ERROR:") + print red(" " + state) + else: + if state: + print "warnings:" + for warning in state: + print " " + warning + + # rename files, e.g. splitted ones + for oldfn, newfn in rename_mapping.iteritems(): + try: + if newfn is None: + os.unlink(path.join(outdirname, oldfn)) + else: + os.rename(path.join(outdirname, oldfn), + path.join(outdirname, newfn)) + except OSError, err: + if err.errno == 2: + continue + raise + + # copy new files + srcdirname = path.join(path.dirname(__file__), 'newfiles') + for fn, newfn in newfiles_mapping.iteritems(): + shutil.copyfile(path.join(srcdirname, fn), + path.join(outdirname, newfn)) + + # make amendments + for newfn, (pre, post) in amendments_mapping.iteritems(): + fn = path.join(outdirname, newfn) + try: + ft = open(fn).read() + except Exception, err: + print "Error making amendments to %s: %s" % (newfn, err) + continue + else: + fw = open(fn, 'w') + fw.write(pre) + fw.write(ft) + fw.write(post) + fw.close() diff --git a/converter/console.py b/converter/console.py new file mode 100644 index 000000000..a5e201a7e --- /dev/null +++ b/converter/console.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +""" + Console utils + ~~~~~~~~~~~~~ + + Format colored console output. + + :copyright: 1998-2004 by the Gentoo Foundation. + :copyright: 2006-2007 by Georg Brandl. + :license: GNU GPL. +""" + +esc_seq = "\x1b[" + +codes = {} +codes["reset"] = esc_seq + "39;49;00m" + +codes["bold"] = esc_seq + "01m" +codes["faint"] = esc_seq + "02m" +codes["standout"] = esc_seq + "03m" +codes["underline"] = esc_seq + "04m" +codes["blink"] = esc_seq + "05m" +codes["overline"] = esc_seq + "06m" # Who made this up? Seriously. + +ansi_color_codes = [] +for x in xrange(30, 38): + ansi_color_codes.append("%im" % x) + ansi_color_codes.append("%i;01m" % x) + +rgb_ansi_colors = [ + '0x000000', '0x555555', '0xAA0000', '0xFF5555', + '0x00AA00', '0x55FF55', '0xAA5500', '0xFFFF55', + '0x0000AA', '0x5555FF', '0xAA00AA', '0xFF55FF', + '0x00AAAA', '0x55FFFF', '0xAAAAAA', '0xFFFFFF' +] + +for x in xrange(len(rgb_ansi_colors)): + codes[rgb_ansi_colors[x]] = esc_seq + ansi_color_codes[x] + +del x + +codes["black"] = codes["0x000000"] +codes["darkgray"] = codes["0x555555"] + +codes["red"] = codes["0xFF5555"] +codes["darkred"] = codes["0xAA0000"] + +codes["green"] = codes["0x55FF55"] +codes["darkgreen"] = codes["0x00AA00"] + +codes["yellow"] = codes["0xFFFF55"] +codes["brown"] = codes["0xAA5500"] + +codes["blue"] = codes["0x5555FF"] +codes["darkblue"] = codes["0x0000AA"] + +codes["fuchsia"] = codes["0xFF55FF"] +codes["purple"] = codes["0xAA00AA"] + +codes["teal"] = codes["0x00AAAA"] +codes["turquoise"] = codes["0x55FFFF"] + +codes["white"] = codes["0xFFFFFF"] +codes["lightgray"] = codes["0xAAAAAA"] + +codes["darkteal"] = codes["turquoise"] +codes["darkyellow"] = codes["brown"] +codes["fuscia"] = codes["fuchsia"] +codes["white"] = codes["bold"] + +def nocolor(): + "turn off colorization" + for code in codes: + codes[code] = "" + +def reset_color(): + return codes["reset"] + +def colorize(color_key, text): + return codes[color_key] + text + codes["reset"] + +functions_colors = [ + "bold", "white", "teal", "turquoise", "darkteal", + "fuscia", "fuchsia", "purple", "blue", "darkblue", + "green", "darkgreen", "yellow", "brown", + "darkyellow", "red", "darkred" +] + +def create_color_func(color_key): + """ + Return a function that formats its argument in the given color. + """ + def derived_func(text): + return colorize(color_key, text) + return derived_func + +ns = locals() +for c in functions_colors: + ns[c] = create_color_func(c) + +del c, ns diff --git a/converter/docnodes.py b/converter/docnodes.py new file mode 100644 index 000000000..336499e9d --- /dev/null +++ b/converter/docnodes.py @@ -0,0 +1,297 @@ +# -*- coding: utf-8 -*- +""" + Python documentation LaTeX parser - document nodes + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + + +class DocNode(object): + """ A node in the document tree. """ + def __repr__(self): + return '%s()' % self.__class__.__name__ + + def __str__(self): + raise RuntimeError('cannot stringify docnodes') + + def walk(self): + return [] + + +class CommentNode(DocNode): + """ A comment. """ + def __init__(self, comment): + assert isinstance(comment, basestring) + self.comment = comment + + def __repr__(self): + return 'CommentNode(%r)' % self.comment + + +class RootNode(DocNode): + """ A whole document. """ + def __init__(self, filename, children): + self.filename = filename + self.children = children + self.params = {} + self.labels = {} + + def __repr__(self): + return 'RootNode(%r, %r)' % (self.filename, self.children) + + def walk(self): + return self.children + + def transform(self): + """ Do restructurings not possible during parsing. """ + def do_descenvs(node): + r""" Make \xxxlines an attribute of the parent xxxdesc node. """ + for subnode in node.walk(): + do_descenvs(subnode) + if isinstance(node, DescEnvironmentNode): + for subnode in node.content.walk(): + if isinstance(subnode, DescLineCommandNode): + node.additional.append((subnode.cmdname, subnode.args)) + + do_descenvs(self) + + +class NodeList(DocNode, list): + """ A list of subnodes. """ + def __init__(self, children=None): + list.__init__(self, children or []) + + def __repr__(self): + return 'NL%s' % list.__repr__(self) + + def walk(self): + return self + + def append(self, node): + assert isinstance(node, DocNode) + if type(node) is EmptyNode: + return + elif self and isinstance(node, TextNode) and \ + type(self[-1]) is TextNode: + self[-1].text += node.text + elif type(node) is NodeList: + list.extend(self, node) + elif type(node) is VerbatimNode and self and \ + isinstance(self[-1], ParaSepNode): + # don't allow a ParaSepNode before VerbatimNode + # because this breaks ReST's '::' + self[-1] = node + else: + list.append(self, node) + + def flatten(self): + if len(self) > 1: + return self + elif len(self) == 1: + return self[0] + else: + return EmptyNode() + + +class ParaSepNode(DocNode): + """ A node for paragraph separator. """ + def __repr__(self): + return 'Para' + + +class TextNode(DocNode): + """ A node containing text. """ + def __init__(self, text): + assert isinstance(text, basestring) + self.text = text + + def __repr__(self): + if type(self) is TextNode: + return 'T%r' % self.text + else: + return '%s(%r)' % (self.__class__.__name__, self.text) + + +class EmptyNode(TextNode): + """ An empty node. """ + def __init__(self, *args): + self.text = '' + + +class NbspNode(TextNode): + """ A non-breaking space. """ + def __init__(self, *args): + # this breaks ReST markup (!) + #self.text = u'\N{NO-BREAK SPACE}' + self.text = ' ' + + def __repr__(self): + return 'NBSP' + + +simplecmd_mapping = { + 'ldots': u'...', + 'moreargs': '...', + 'unspecified': '...', + 'ASCII': 'ASCII', + 'UNIX': 'Unix', + 'Unix': 'Unix', + 'POSIX': 'POSIX', + 'LaTeX': 'LaTeX', + 'EOF': 'EOF', + 'Cpp': 'C++', + 'C': 'C', + 'sub': u'--> ', + 'textbackslash': '\\\\', + 'textunderscore': '_', + 'texteuro': u'\N{EURO SIGN}', + 'textasciicircum': u'^', + 'textasciitilde': u'~', + 'textgreater': '>', + 'textless': '<', + 'textbar': '|', + 'backslash': '\\\\', + 'tilde': '~', + 'copyright': u'\N{COPYRIGHT SIGN}', + # \e is mostly inside \code and therefore not escaped. + 'e': '\\', + 'infinity': u'\N{INFINITY}', + 'plusminus': u'\N{PLUS-MINUS SIGN}', + 'leq': u'\N{LESS-THAN OR EQUAL TO}', + 'geq': u'\N{GREATER-THAN OR EQUAL TO}', + 'pi': u'\N{GREEK SMALL LETTER PI}', + 'AA': u'\N{LATIN CAPITAL LETTER A WITH RING ABOVE}', +} + +class SimpleCmdNode(TextNode): + """ A command resulting in simple text. """ + def __init__(self, cmdname, args): + self.text = simplecmd_mapping[cmdname] + + +class BreakNode(DocNode): + """ A line break. """ + def __repr__(self): + return 'BR' + + +class CommandNode(DocNode): + """ A general command. """ + def __init__(self, cmdname, args): + self.cmdname = cmdname + self.args = args + + def __repr__(self): + return '%s(%r, %r)' % (self.__class__.__name__, self.cmdname, self.args) + + def walk(self): + return self.args + + +class DescLineCommandNode(CommandNode): + """ A \\xxxline command. """ + + +class InlineNode(CommandNode): + """ A node with inline markup. """ + def walk(self): + return [] + + +class IndexNode(InlineNode): + """ An index-generating command. """ + def __init__(self, cmdname, args): + self.cmdname = cmdname + # tricky -- this is to make this silent in paragraphs + # while still generating index entries for textonly() + self.args = [] + self.indexargs = args + + +class SectioningNode(CommandNode): + """ A heading node. """ + + +class EnvironmentNode(DocNode): + """ An environment. """ + def __init__(self, envname, args, content): + self.envname = envname + self.args = args + self.content = content + + def __repr__(self): + return 'EnvironmentNode(%r, %r, %r)' % (self.envname, + self.args, self.content) + + def walk(self): + return [self.content] + + +class DescEnvironmentNode(EnvironmentNode): + """ An xxxdesc environment. """ + def __init__(self, envname, args, content): + self.envname = envname + self.args = args + self.additional = [] + self.content = content + + def __repr__(self): + return 'DescEnvironmentNode(%r, %r, %r)' % (self.envname, + self.args, self.content) + + +class TableNode(EnvironmentNode): + def __init__(self, numcols, headings, lines): + self.numcols = numcols + self.headings = headings + self.lines = lines + + def __repr__(self): + return 'TableNode(%r, %r, %r)' % (self.numcols, + self.headings, self.lines) + + def walk(self): + return [] + + +class VerbatimNode(DocNode): + """ A verbatim code block. """ + def __init__(self, content): + self.content = content + + def __repr__(self): + return 'VerbatimNode(%r)' % self.content + + +class ListNode(DocNode): + """ A list. """ + def __init__(self, items): + self.items = items + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, self.items) + + def walk(self): + return [item[1] for item in self.items] + + +class ItemizeNode(ListNode): + """ An enumeration with bullets. """ + + +class EnumerateNode(ListNode): + """ An enumeration with numbers. """ + + +class DescriptionNode(ListNode): + """ A description list. """ + + +class DefinitionsNode(ListNode): + """ A definition list. """ + + +class ProductionListNode(ListNode): + """ A grammar production list. """ diff --git a/converter/filenamemap.py b/converter/filenamemap.py new file mode 100644 index 000000000..6f556d83a --- /dev/null +++ b/converter/filenamemap.py @@ -0,0 +1,632 @@ +# -*- coding: utf-8 -*- +""" + Map LaTeX filenames to ReST filenames + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +# '' means: use same name, strip prefix if applicable. +# None means: don't translate at all. + +_mapping = { + 'lib': { + '__newname__' : 'modules', + + 'asttable': '', + 'compiler': '', + 'distutils': '', + 'email': '', + 'emailcharsets': 'email.charset', + 'emailencoders': 'email.encoders', + 'emailexc': 'email.errors', + 'emailgenerator': 'email.generator', + 'emailheaders': 'email.header', + 'emailiter': 'email.iterators', + 'emailmessage': 'email.message', + 'emailmimebase': 'email.mime', + 'emailparser': 'email.parser', + 'emailutil': 'email.util', + 'libaifc': '', + 'libanydbm': '', + 'libarray': '', + 'libascii': 'curses.ascii', + 'libast': '', + 'libasynchat': '', + 'libasyncore': '', + 'libatexit': '', + 'libaudioop': '', + 'libbase64': '', + 'libbasehttp': 'basehttpserver', + 'libbastion': '', + 'libbinascii': '', + 'libbinhex': '', + 'libbisect': '', + 'libbltin': '__builtin__', + 'libbsddb': '', + 'libbz2': '', + 'libcalendar': '', + 'libcfgparser': 'configparser', + 'libcgihttp': 'cgihttpserver', + 'libcgi': '', + 'libcgitb': '', + 'libchunk': '', + 'libcmath': '', + 'libcmd': '', + 'libcodecs': '', + 'libcodeop': '', + 'libcode': '', + 'libcollections': '', + 'libcolorsys': '', + 'libcommands': '', + 'libcompileall': '', + 'libcontextlib': '', + 'libcookielib': '', + 'libcookie': '', + 'libcopyreg': 'copy_reg', + 'libcopy': '', + 'libcrypt': '', + 'libcsv': '', + 'libctypes': '', + 'libcursespanel': 'curses.panel', + 'libcurses': '', + 'libdatetime': '', + 'libdbhash': '', + 'libdbm': '', + 'libdecimal': '', + 'libdifflib': '', + 'libdircache': '', + 'libdis': '', + 'libdl': '', + 'libdoctest': '', + 'libdocxmlrpc': 'docxmlrpcserver', + 'libdumbdbm': '', + 'libdummythreading': 'dummy_threading', + 'libdummythread': 'dummy_thread', + 'liberrno': '', + 'libetree': 'xml.etree.elementtree', + 'libfcntl': '', + 'libfilecmp': '', + 'libfileinput': '', + 'libfnmatch': '', + 'libformatter': '', + 'libfpectl': '', + 'libfpformat': '', + 'libftplib': '', + 'libfunctools': '', + 'libfuture': '__future__', + 'libgc': '', + 'libgdbm': '', + 'libgetopt': '', + 'libgetpass': '', + 'libgettext': '', + 'libglob': '', + 'libgrp': '', + 'libgzip': '', + 'libhashlib': '', + 'libheapq': '', + 'libhmac': '', + 'libhotshot': '', + 'libhtmllib': '', + 'libhtmlparser': '', + 'libhttplib': '', + 'libimageop': '', + 'libimaplib': '', + 'libimgfile': '', + 'libimghdr': '', + 'libimp': '', + 'libinspect': '', + 'libitertools': '', + 'libjpeg': '', + 'libkeyword': '', + 'liblinecache': '', + 'liblocale': '', + 'liblogging': '', + 'libmailbox': '', + 'libmailcap': '', + 'libmain': '__main__', + 'libmarshal': '', + 'libmath': '', + 'libmd5': '', + 'libmhlib': '', + 'libmimetools': '', + 'libmimetypes': '', + 'libmimewriter': '', + 'libmimify': '', + 'libmmap': '', + 'libmodulefinder': '', + 'libmsilib': '', + 'libmsvcrt': '', + 'libmultifile': '', + 'libmutex': '', + 'libnetrc': '', + 'libnew': '', + 'libnis': '', + 'libnntplib': '', + 'liboperator': '', + 'liboptparse': '', + 'libos': '', + 'libossaudiodev': '', + 'libparser': '', + 'libpdb': '', + 'libpickle': '', + 'libpickletools': '', + 'libpipes': '', + 'libpkgutil': '', + 'libplatform': '', + 'libpopen2': '', + 'libpoplib': '', + 'libposixpath': 'os.path', + 'libposix': '', + 'libpprint': '', + 'libprofile': '', + 'libpty': '', + 'libpwd': '', + 'libpyclbr': '', + 'libpycompile': 'py_compile', + 'libpydoc': '', + 'libpyexpat': '', + 'libqueue': '', + 'libquopri': '', + 'librandom': '', + 'libreadline': '', + 'librepr': '', + 'libre': '', + 'libresource': '', + 'librexec': '', + 'librfc822': '', + 'librlcompleter': '', + 'librobotparser': '', + 'librunpy': '', + 'libsched': '', + 'libselect': '', + 'libsets': '', + 'libsgmllib': '', + 'libsha': '', + 'libshelve': '', + 'libshlex': '', + 'libshutil': '', + 'libsignal': '', + 'libsimplehttp': 'simplehttpserver', + 'libsimplexmlrpc': 'simplexmlrpcserver', + 'libsite': '', + 'libsmtpd': '', + 'libsmtplib': '', + 'libsndhdr': '', + 'libsocket': '', + 'libsocksvr': 'socketserver', + 'libspwd': '', + 'libsqlite3': '', + 'libstat': '', + 'libstatvfs': '', + 'libstringio': '', + 'libstringprep': '', + 'libstring': '', + 'libstruct': '', + 'libsunaudio': '', + 'libsunau': '', + 'libsubprocess': '', + 'libsymbol': '', + 'libsyslog': '', + 'libsys': '', + 'libtabnanny': '', + 'libtarfile': '', + 'libtelnetlib': '', + 'libtempfile': '', + 'libtermios': '', + 'libtest': '', + 'libtextwrap': '', + 'libthreading': '', + 'libthread': '', + 'libtimeit': '', + 'libtime': '', + 'libtokenize': '', + 'libtoken': '', + 'libtraceback': '', + 'libtrace': '', + 'libtty': '', + 'libturtle': '', + 'libtypes': '', + 'libunicodedata': '', + 'libunittest': '', + 'liburllib2': '', + 'liburllib': '', + 'liburlparse': '', + 'libuserdict': '', + 'libuser': '', + 'libuuid': '', + 'libuu': '', + 'libwarnings': '', + 'libwave': '', + 'libweakref': '', + 'libwebbrowser': '', + 'libwhichdb': '', + 'libwinreg': '_winreg', + 'libwinsound': '', + 'libwsgiref': '', + 'libxdrlib': '', + 'libxmllib': '', + 'libxmlrpclib': '', + 'libzipfile': '', + 'libzipimport': '', + 'libzlib': '', + 'tkinter': '', + 'xmldomminidom': 'xml.dom.minidom', + 'xmldompulldom': 'xml.dom.pulldom', + 'xmldom': 'xml.dom', + 'xmletree': 'xml.etree', + 'xmlsaxhandler': 'xml.sax.handler', + 'xmlsaxreader': 'xml.sax.reader', + 'xmlsax': 'xml.sax', + 'xmlsaxutils': 'xml.sax.utils', + 'libal': '', + 'libcd': '', + 'libfl': '', + 'libfm': '', + 'libgl': '', + 'libposixfile': '', + + # specials + 'libundoc': '', + 'libintro': '', + + # -> ref + 'libconsts': 'reference/consts', + 'libexcs': 'reference/exceptions', + 'libfuncs': 'reference/functions', + 'libobjs': 'reference/objects', + 'libstdtypes': 'reference/stdtypes', + + # mainfiles + 'lib': None, + 'mimelib': None, + + # obsolete + 'libni': None, + 'libcmpcache': None, + 'libcmp': None, + + # chapter overviews + 'fileformats': '', + 'filesys': '', + 'frameworks': '', + 'i18n': '', + 'internet': '', + 'ipc': '', + 'language': '', + 'archiving': '', + 'custominterp': '', + 'datatypes': '', + 'development': '', + 'markup': '', + 'modules': '', + 'netdata': '', + 'numeric': '', + 'persistence': '', + 'windows': '', + 'libsun': '', + 'libmm': '', + 'liballos': '', + 'libcrypto': '', + 'libsomeos': '', + 'libsgi': '', + 'libmisc': '', + 'libpython': '', + 'librestricted': '', + 'libstrings': '', + 'libunix': '', + }, + + 'ref': { + '__newname__': 'reference', + 'ref': None, + 'ref1': 'introduction', + 'ref2': 'lexical_analysis', + 'ref3': 'datamodel', + 'ref4': 'executionmodel', + 'ref5': 'expressions', + 'ref6': 'simple_stmts', + 'ref7': 'compound_stmts', + 'ref8': 'toplevel_components', + }, + + 'tut': { + '__newname__': 'tutorial', + '__labelprefix__': 'tut-', + 'tut': 'tutorial:split', + 'glossary': 'glossary', + }, + + 'api': { + '__newname__': 'c-api', + '__defaulthighlightlang__': 'c', + 'api': None, + + 'abstract': '', + 'concrete': '', + 'exceptions': '', + 'init': '', + 'intro': '', + 'memory': '', + 'newtypes': '', + 'refcounting': '', + 'utilities': '', + 'veryhigh': '', + }, + + 'ext': { + '__newname__': 'extending', + '__defaulthighlightlang__': 'c', + 'ext': None, + + 'building': '', + 'embedding': '', + 'extending': 'extending', + 'newtypes': '', + 'windows': '', + }, + + 'dist': { + '__newname__': 'distutils', + 'dist': 'distutils:split', + 'sysconfig': '', + }, + + 'mac': { + '__newname__': 'macmodules', + 'mac': None, + + 'libaepack': 'aepack', + 'libaetools': 'aetools', + 'libaetypes': 'aetypes', + 'libautogil': 'autogil', + 'libcolorpicker': 'colorpicker', + 'libframework': 'framework', + 'libgensuitemodule': 'gensuitemodule', + 'libmacic': 'macic', + 'libmacos': 'macos', + 'libmacostools': 'macostools', + 'libmac': 'mac', + 'libmacui': 'macui', + 'libminiae': 'miniae', + 'libscrap': 'scrap', + 'scripting': '', + 'toolbox': '', + 'undoc': '', + 'using': '', + + }, + + 'inst': { + '__newname__': 'install', + '__defaulthighlightlang__': 'none', + 'inst': 'index', + }, + + 'whatsnew': { + '__newname__': 'whatsnew', + 'whatsnew20': '2.0', + 'whatsnew21': '2.1', + 'whatsnew22': '2.2', + 'whatsnew23': '2.3', + 'whatsnew24': '2.4', + 'whatsnew25': '2.5', + 'whatsnew26': '2.6', + }, + + 'commontex': { + '__newname__': '', + 'boilerplate': None, + 'patchlevel': None, + 'copyright': '', + 'license': '', + 'reportingbugs': 'bugs', + }, +} + +fn_mapping = {} + +for dir, files in _mapping.iteritems(): + newmap = fn_mapping[dir] = {} + for fn in files: + if not fn.startswith('_') and files[fn] == '': + if fn.startswith(dir): + newmap[fn] = fn[len(dir):] + else: + newmap[fn] = fn + else: + newmap[fn] = files[fn] + + +# new directories to create +dirs_to_make = [ + 'c-api', + 'data', + 'distutils', + 'documenting', + 'extending', + 'includes', + 'includes/sqlite3', + 'install', + 'macmodules', + 'modules', + 'reference', + 'tutorial', + 'whatsnew', +] + +# includefiles for \verbatiminput and \input +includes_mapping = { + '../../Parser/Python.asdl': None, # XXX + '../../Lib/test/exception_hierarchy.txt': None, + 'emailmessage': 'email.message.rst', + 'emailparser': 'email.parser.rst', + 'emailgenerator': 'email.generator.rst', + 'emailmimebase': 'email.mime.rst', + 'emailheaders': 'email.header.rst', + 'emailcharsets': 'email.charset.rst', + 'emailencoders': 'email.encoders.rst', + 'emailexc': 'email.errors.rst', + 'emailutil': 'email.util.rst', + 'emailiter': 'email.iterators.rst', +} + +# new files to copy from converter/newfiles +newfiles_mapping = { + 'conf.py': 'conf.py', + 'TODO': 'TODO', + + 'ref_index.rst': 'reference/index.rst', + 'tutorial_index.rst': 'tutorial/index.rst', + 'modules_index.rst': 'modules/index.rst', + 'mac_index.rst': 'macmodules/index.rst', + 'ext_index.rst': 'extending/index.rst', + 'api_index.rst': 'c-api/index.rst', + 'dist_index.rst': 'distutils/index.rst', + 'contents.rst': 'contents.rst', + 'about.rst': 'about.rst', + + 'doc.rst': 'documenting/index.rst', + 'doc_intro.rst': 'documenting/intro.rst', + 'doc_style.rst': 'documenting/style.rst', + 'doc_sphinx.rst': 'documenting/sphinx.rst', + 'doc_rest.rst': 'documenting/rest.rst', + 'doc_markup.rst': 'documenting/markup.rst', +} + +# copy files from the old doc tree +copyfiles_mapping = { + 'api/refcounts.dat': 'data', + 'lib/email-*.py': 'includes', + 'lib/minidom-example.py': 'includes', + 'lib/tzinfo-examples.py': 'includes', + 'lib/sqlite3/*.py': 'includes/sqlite3', + 'ext/*.c': 'includes', + 'ext/*.py': 'includes', + 'commontex/typestruct.h': 'includes', +} + +# files to rename +rename_mapping = { + 'tutorial/1_tutorial.rst': None, # delete + 'tutorial/2_tutorial.rst': 'tutorial/appetite.rst', + 'tutorial/3_tutorial.rst': 'tutorial/interpreter.rst', + 'tutorial/4_tutorial.rst': 'tutorial/introduction.rst', + 'tutorial/5_tutorial.rst': 'tutorial/controlflow.rst', + 'tutorial/6_tutorial.rst': 'tutorial/datastructures.rst', + 'tutorial/7_tutorial.rst': 'tutorial/modules.rst', + 'tutorial/8_tutorial.rst': 'tutorial/inputoutput.rst', + 'tutorial/9_tutorial.rst': 'tutorial/errors.rst', + 'tutorial/10_tutorial.rst': 'tutorial/classes.rst', + 'tutorial/11_tutorial.rst': 'tutorial/stdlib.rst', + 'tutorial/12_tutorial.rst': 'tutorial/stdlib2.rst', + 'tutorial/13_tutorial.rst': 'tutorial/whatnow.rst', + 'tutorial/14_tutorial.rst': 'tutorial/interactive.rst', + 'tutorial/15_tutorial.rst': 'tutorial/floatingpoint.rst', + 'tutorial/16_tutorial.rst': None, # delete + + 'distutils/1_distutils.rst': 'distutils/introduction.rst', + 'distutils/2_distutils.rst': 'distutils/setupscript.rst', + 'distutils/3_distutils.rst': 'distutils/configfile.rst', + 'distutils/4_distutils.rst': 'distutils/sourcedist.rst', + 'distutils/5_distutils.rst': 'distutils/builtdist.rst', + 'distutils/6_distutils.rst': 'distutils/packageindex.rst', + 'distutils/7_distutils.rst': 'distutils/uploading.rst', + 'distutils/8_distutils.rst': 'distutils/examples.rst', + 'distutils/9_distutils.rst': 'distutils/extending.rst', + 'distutils/10_distutils.rst': 'distutils/commandref.rst', + 'distutils/11_distutils.rst': 'distutils/apiref.rst', +} + +# toctree entries +toctree_mapping = { + 'mac/scripting': ['gensuitemodule', 'aetools', 'aepack', 'aetypes', 'miniae'], + 'mac/toolbox': ['colorpicker'], + 'lib/libstrings': ['string', 're', 'struct', 'difflib', 'stringio', 'textwrap', + 'codecs', 'unicodedata', 'stringprep', 'fpformat'], + 'lib/datatypes': ['datetime', 'calendar', 'collections', 'heapq', 'bisect', + 'array', 'sets', 'sched', 'mutex', 'queue', 'weakref', + 'userdict', 'types', 'new', 'copy', 'pprint', 'repr'], + 'lib/numeric': ['math', 'cmath', 'decimal', 'random', 'itertools', 'functools', + 'operator'], + 'lib/netdata': ['email', 'mailcap', 'mailbox', 'mhlib', 'mimetools', 'mimetypes', + 'mimewriter', 'mimify', 'multifile', 'rfc822', + 'base64', 'binhex', 'binascii', 'quopri', 'uu'], + 'lib/markup': ['htmlparser', 'sgmllib', 'htmllib', 'pyexpat', 'xml.dom', + 'xml.dom.minidom', 'xml.dom.pulldom', 'xml.sax', 'xml.sax.handler', + 'xml.sax.utils', 'xml.sax.reader', 'xml.etree.elementtree'], + 'lib/fileformats': ['csv', 'configparser', 'robotparser', 'netrc', 'xdrlib'], + 'lib/libcrypto': ['hashlib', 'hmac', 'md5', 'sha'], + 'lib/filesys': ['os.path', 'fileinput', 'stat', 'statvfs', 'filecmp', + 'tempfile', 'glob', 'fnmatch', 'linecache', 'shutil', 'dircache'], + 'lib/archiving': ['zlib', 'gzip', 'bz2', 'zipfile', 'tarfile'], + 'lib/persistence': ['pickle', 'copy_reg', 'shelve', 'marshal', 'anydbm', + 'whichdb', 'dbm', 'gdbm', 'dbhash', 'bsddb', 'dumbdbm', + 'sqlite3'], + 'lib/liballos': ['os', 'time', 'optparse', 'getopt', 'logging', 'getpass', + 'curses', 'curses.ascii', 'curses.panel', 'platform', + 'errno', 'ctypes'], + 'lib/libsomeos': ['select', 'thread', 'threading', 'dummy_thread', 'dummy_threading', + 'mmap', 'readline', 'rlcompleter'], + 'lib/libunix': ['posix', 'pwd', 'spwd', 'grp', 'crypt', 'dl', 'termios', 'tty', + 'pty', 'fcntl', 'pipes', 'posixfile', 'resource', 'nis', + 'syslog', 'commands'], + 'lib/ipc': ['subprocess', 'socket', 'signal', 'popen2', 'asyncore', 'asynchat'], + 'lib/internet': ['webbrowser', 'cgi', 'cgitb', 'wsgiref', 'urllib', 'urllib2', + 'httplib', 'ftplib', 'poplib', 'imaplib', + 'nntplib', 'smtplib', 'smtpd', 'telnetlib', 'uuid', 'urlparse', + 'socketserver', 'basehttpserver', 'simplehttpserver', + 'cgihttpserver', 'cookielib', 'cookie', 'xmlrpclib', + 'simplexmlrpcserver', 'docxmlrpcserver'], + 'lib/libmm': ['audioop', 'imageop', 'aifc', 'sunau', 'wave', 'chunk', + 'colorsys', 'imghdr', 'sndhdr', 'ossaudiodev'], + 'lib/i18n': ['gettext', 'locale'], + 'lib/frameworks': ['cmd', 'shlex'], + 'lib/development': ['pydoc', 'doctest', 'unittest', 'test'], + 'lib/libpython': ['sys', '__builtin__', '__main__', 'warnings', 'contextlib', + 'atexit', 'traceback', '__future__', 'gc', 'inspect', + 'site', 'user', 'fpectl'], + 'lib/custominterp': ['code', 'codeop'], + 'lib/librestricted': ['rexec', 'bastion'], + 'lib/modules': ['imp', 'zipimport', 'pkgutil', 'modulefinder', 'runpy'], + 'lib/language': ['parser', 'symbol', 'token', 'keyword', 'tokenize', + 'tabnanny', 'pyclbr', 'py_compile', 'compileall', 'dis', + 'pickletools', 'distutils'], + 'lib/compiler': ['ast'], + 'lib/libmisc': ['formatter'], + 'lib/libsgi': ['al', 'cd', 'fl', 'fm', 'gl', 'imgfile', 'jpeg'], + 'lib/libsun': ['sunaudio'], + 'lib/windows': ['msilib', 'msvcrt', '_winreg', 'winsound'], +} + +# map sourcefilename to [pre, post] +amendments_mapping = { + 'license.rst': ['''\ +.. highlightlang:: none + +******************* +History and License +******************* + +''', ''], + + 'bugs.rst': ['''\ +************** +Reporting Bugs +************** + +''', ''], + + 'copyright.rst': ['''\ +********* +Copyright +********* + +''', ''], + + 'install/index.rst': ['''\ +.. _install-index: + +''', ''], +} diff --git a/converter/latexparser.py b/converter/latexparser.py new file mode 100644 index 000000000..2ca126d76 --- /dev/null +++ b/converter/latexparser.py @@ -0,0 +1,697 @@ +# -*- coding: utf-8 -*- +""" + Python documentation LaTeX file parser + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + For more documentation, look into the ``restwriter.py`` file. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +from .docnodes import CommentNode, RootNode, NodeList, ParaSepNode, \ + TextNode, EmptyNode, NbspNode, SimpleCmdNode, BreakNode, CommandNode, \ + DescLineCommandNode, InlineNode, IndexNode, SectioningNode, \ + EnvironmentNode, DescEnvironmentNode, TableNode, VerbatimNode, \ + ListNode, ItemizeNode, EnumerateNode, DescriptionNode, \ + DefinitionsNode, ProductionListNode + +from .util import umlaut, empty + + +class ParserError(Exception): + def __init__(self, msg, lineno): + Exception.__init__(self, msg, lineno) + + def __str__(self): + return '%s, line %s' % self.args + + +def generic_command(name, argspec, nodetype=CommandNode): + def handle(self): + args = self.parse_args('\\'+name, argspec) + return nodetype(name, args) + return handle + +def sectioning_command(name): + """ Special handling for sectioning commands: move labels directly following + a sectioning command before it, as required by reST. """ + def handle(self): + args = self.parse_args('\\'+name, 'M') + snode = SectioningNode(name, args) + for l, t, v, r in self.tokens: + if t == 'command' and v == 'label': + largs = self.parse_args('\\label', 'T') + snode.args[0] = NodeList([snode.args[0], CommandNode('label', largs)]) + break + if t == 'text': + if not v.strip(): + # discard whitespace; after a section that's no problem + continue + self.tokens.push((l, t, v, r)) + break + # no label followed + return snode + return handle + +def generic_environment(name, argspec, nodetype=EnvironmentNode): + def handle(self): + args = self.parse_args(name, argspec) + return nodetype(name, args, self.parse_until(self.environment_end)) + return handle + + +class DocParserMeta(type): + def __init__(cls, name, bases, dict): + for nodetype, commands in cls.generic_commands.iteritems(): + for cmdname, argspec in commands.iteritems(): + setattr(cls, 'handle_' + cmdname, + generic_command(cmdname, argspec, nodetype)) + + for cmdname in cls.sectioning_commands: + setattr(cls, 'handle_' + cmdname, sectioning_command(cmdname)) + + for nodetype, envs in cls.generic_envs.iteritems(): + for envname, argspec in envs.iteritems(): + setattr(cls, 'handle_%s_env' % envname, + generic_environment(envname, argspec, nodetype)) + + +class DocParser(object): + """ Parse a Python documentation LaTeX file. """ + __metaclass__ = DocParserMeta + + def __init__(self, tokenstream, filename): + self.tokens = tokenstream + self.filename = filename + + def parse(self): + self.rootnode = RootNode(self.filename, None) + self.rootnode.children = self.parse_until(None) + self.rootnode.transform() + return self.rootnode + + def parse_until(self, condition=None, endatbrace=False): + nodelist = NodeList() + bracelevel = 0 + for l, t, v, r in self.tokens: + if condition and condition(t, v, bracelevel): + return nodelist.flatten() + if t == 'command': + if len(v) == 1 and not v.isalpha(): + nodelist.append(self.handle_special_command(v)) + continue + handler = getattr(self, 'handle_' + v, None) + if not handler: + raise ParserError('no handler for \\%s command' % v, l) + nodelist.append(handler()) + elif t == 'bgroup': + bracelevel += 1 + elif t == 'egroup': + if bracelevel == 0 and endatbrace: + return nodelist.flatten() + bracelevel -= 1 + elif t == 'comment': + nodelist.append(CommentNode(v)) + elif t == 'tilde': + nodelist.append(NbspNode()) + elif t == 'mathmode': + pass # ignore math mode + elif t == 'parasep': + nodelist.append(ParaSepNode()) + else: + # includes 'boptional' and 'eoptional' which don't have a + # special meaning in text + nodelist.append(TextNode(v)) + return nodelist.flatten() + + def parse_args(self, cmdname, argspec): + """ Helper to parse arguments of a command. """ + # argspec: M = mandatory, T = mandatory, check text-only, + # O = optional, Q = optional, check text-only + args = [] + def optional_end(type, value, bracelevel): + return type == 'eoptional' and bracelevel == 0 + + for i, c in enumerate(argspec): + assert c in 'OMTQ' + nextl, nextt, nextv, nextr = self.tokens.pop() + while nextt == 'comment' or (nextt == 'text' and nextv.isspace()): + nextl, nextt, nextv, nextr = self.tokens.pop() + + if c in 'OQ': + if nextt == 'boptional': + arg = self.parse_until(optional_end) + if c == 'Q' and not isinstance(arg, TextNode): + raise ParserError('%s: argument %d must be text only' % + (cmdname, i), nextl) + args.append(arg) + else: + # not given + args.append(EmptyNode()) + self.tokens.push((nextl, nextt, nextv, nextr)) + continue + + if nextt == 'bgroup': + arg = self.parse_until(None, endatbrace=True) + if c == 'T' and not isinstance(arg, TextNode): + raise ParserError('%s: argument %d must be text only' % + (cmdname, i), nextl) + args.append(arg) + else: + if nextt != 'text': + raise ParserError('%s: non-grouped non-text arguments not ' + 'supported' % cmdname, nextl) + args.append(TextNode(nextv[0])) + self.tokens.push((nextl, nextt, nextv[1:], nextr[1:])) + return args + + sectioning_commands = [ + 'chapter', + 'chapter*', + 'section', + 'subsection', + 'subsubsection', + 'paragraph', + ] + + generic_commands = { + CommandNode: { + 'label': 'T', + + 'localmoduletable': '', + 'verbatiminput': 'T', + 'input': 'T', + 'centerline': 'M', + + # Pydoc specific commands + 'versionadded': 'OT', + 'versionchanged': 'OT', + 'deprecated': 'TM', + 'XX' 'X': 'M', # used in dist.tex ;) + + # module-specific + 'declaremodule': 'QTT', + 'platform': 'T', + 'modulesynopsis': 'M', + 'moduleauthor': 'TT', + 'sectionauthor': 'TT', + + # reference lists + 'seelink': 'TMM', + 'seemodule': 'QTM', + 'seepep': 'TMM', + 'seerfc': 'TTM', + 'seetext': 'M', + 'seetitle': 'OMM', + 'seeurl': 'MM', + }, + + DescLineCommandNode: { + # additional items for ...desc + 'funcline': 'TM', + 'funclineni': 'TM', + 'methodline': 'QTM', + 'methodlineni': 'QTM', + 'memberline': 'QT', + 'memberlineni': 'QT', + 'dataline': 'T', + 'datalineni': 'T', + 'cfuncline': 'MTM', + 'cmemberline': 'TTT', + 'csimplemacroline': 'T', + 'ctypeline': 'QT', + 'cvarline': 'TT', + }, + + InlineNode: { + # specials + 'footnote': 'M', + 'frac': 'TT', + 'refmodule': 'QT', + 'citetitle': 'QT', + 'ulink': 'MT', + 'url': 'M', + + # mapped to normal + 'textrm': 'M', + 'b': 'M', + 'email': 'M', # email addresses are recognized by ReST + + # mapped to **strong** + 'textbf': 'M', + 'strong': 'M', + + # mapped to *emphasized* + 'textit': 'M', + 'emph': 'M', + + # mapped to ``code`` + 'bfcode': 'M', + 'code': 'M', + 'samp': 'M', + 'character': 'M', + 'texttt': 'M', + + # mapped to `default role` + 'var': 'M', + + # mapped to [brackets] + 'optional': 'M', + + # mapped to :role:`text` + 'cdata': 'M', + 'cfunction': 'M', # -> :cfunc: + 'class': 'M', + 'command': 'M', + 'constant': 'M', # -> :const: + 'csimplemacro': 'M', # -> :cmacro: + 'ctype': 'M', + 'data': 'M', # NEW + 'dfn': 'M', + 'envvar': 'M', + 'exception': 'M', # -> :exc: + 'file': 'M', + 'filenq': 'M', + 'filevar': 'M', + 'function': 'M', # -> :func: + 'grammartoken': 'M', # -> :token: + 'guilabel': 'M', + 'kbd': 'M', + 'keyword': 'M', + 'mailheader': 'M', + 'makevar': 'M', + 'manpage': 'MM', + 'member': 'M', + 'menuselection': 'M', + 'method': 'M', # -> :meth: + 'mimetype': 'M', + 'module': 'M', # -> :mod: + 'newsgroup': 'M', + 'option': 'M', + 'pep': 'M', + 'program': 'M', + 'programopt': 'M', # -> :option: + 'longprogramopt': 'M', # -> :option: + 'ref': 'T', + 'regexp': 'M', + 'rfc': 'M', + 'token': 'M', + + 'NULL': '', + # these are defined via substitutions + 'shortversion': '', + 'version': '', + 'today': '', + }, + + SimpleCmdNode: { + # these are directly mapped to text + 'AA': '', # A as in Angstrom + 'ASCII': '', + 'C': '', + 'Cpp': '', + 'EOF': '', + 'LaTeX': '', + 'POSIX': '', + 'UNIX': '', + 'Unix': '', + 'backslash': '', + 'copyright': '', + 'e': '', # backslash + 'geq': '', + 'infinity': '', + 'ldots': '', + 'leq': '', + 'moreargs': '', + 'pi': '', + 'plusminus': '', + 'sub': '', # menu separator + 'textbackslash': '', + 'textunderscore': '', + 'texteuro': '', + 'textasciicircum': '', + 'textasciitilde': '', + 'textgreater': '', + 'textless': '', + 'textbar': '', + 'tilde': '', + 'unspecified': '', + }, + + IndexNode: { + 'bifuncindex': 'T', + 'exindex': 'T', + 'kwindex': 'T', + 'obindex': 'T', + 'opindex': 'T', + 'refmodindex': 'T', + 'refexmodindex': 'T', + 'refbimodindex': 'T', + 'refstmodindex': 'T', + 'stindex': 'T', + 'index': 'M', + 'indexii': 'TT', + 'indexiii': 'TTT', + 'indexiv': 'TTTT', + 'ttindex': 'T', + 'withsubitem': 'TM', + }, + + # These can be safely ignored + EmptyNode: { + 'setindexsubitem': 'T', + 'tableofcontents': '', + 'makeindex': '', + 'makemodindex': '', + 'maketitle': '', + 'appendix': '', + 'documentclass': 'OM', + 'usepackage': 'OM', + 'noindent': '', + 'protect': '', + 'ifhtml': '', + 'fi': '', + }, + } + + generic_envs = { + EnvironmentNode: { + # generic LaTeX environments + 'abstract': '', + 'quote': '', + 'quotation': '', + + 'notice': 'Q', + 'seealso': '', + 'seealso*': '', + }, + + DescEnvironmentNode: { + # information units + 'datadesc': 'T', + 'datadescni': 'T', + 'excclassdesc': 'TM', + 'excdesc': 'T', + 'funcdesc': 'TM', + 'funcdescni': 'TM', + 'classdesc': 'TM', + 'classdesc*': 'T', + 'memberdesc': 'QT', + 'memberdescni': 'QT', + 'methoddesc': 'QMM', + 'methoddescni': 'QMM', + 'opcodedesc': 'TT', + + 'cfuncdesc': 'MTM', + 'cmemberdesc': 'TTT', + 'csimplemacrodesc': 'T', + 'ctypedesc': 'QT', + 'cvardesc': 'TT', + }, + } + + # ------------------------- special handlers ----------------------------- + + def handle_special_command(self, cmdname): + if cmdname in '{}%$^#&_ ': + # these are just escapes for special LaTeX commands + return TextNode(cmdname) + elif cmdname in '\'`~"c': + # accents and umlauts + nextl, nextt, nextv, nextr = self.tokens.next() + if nextt == 'bgroup': + _, nextt, _, _ = self.tokens.next() + if nextt != 'egroup': + raise ParserError('wrong argtype for \\%s' % cmdname, nextl) + return TextNode(cmdname) + if nextt != 'text': + # not nice, but {\~} = ~ + self.tokens.push((nextl, nextt, nextv, nextr)) + return TextNode(cmdname) + c = umlaut(cmdname, nextv[0]) + self.tokens.push((nextl, nextt, nextv[1:], nextr[1:])) + return TextNode(c) + elif cmdname == '\\': + return BreakNode() + raise ParserError('no handler for \\%s command' % cmdname, + self.tokens.peek()[0]) + + def handle_begin(self): + envname, = self.parse_args('begin', 'T') + handler = getattr(self, 'handle_%s_env' % envname.text, None) + if not handler: + raise ParserError('no handler for %s environment' % envname.text, + self.tokens.peek()[0]) + return handler() + + # ------------------------- command handlers ----------------------------- + + def mk_metadata_handler(self, name, mdname=None): + if mdname is None: + mdname = name + def handler(self): + data, = self.parse_args('\\'+name, 'M') + self.rootnode.params[mdname] = data + return EmptyNode() + return handler + + handle_title = mk_metadata_handler(None, 'title') + handle_author = mk_metadata_handler(None, 'author') + handle_authoraddress = mk_metadata_handler(None, 'authoraddress') + handle_date = mk_metadata_handler(None, 'date') + handle_release = mk_metadata_handler(None, 'release') + handle_setshortversion = mk_metadata_handler(None, 'setshortversion', + 'shortversion') + handle_setreleaseinfo = mk_metadata_handler(None, 'setreleaseinfo', + 'releaseinfo') + + def handle_note(self): + note = self.parse_args('\\note', 'M')[0] + return EnvironmentNode('notice', [TextNode('note')], note) + + def handle_warning(self): + warning = self.parse_args('\\warning', 'M')[0] + return EnvironmentNode('notice', [TextNode('warning')], warning) + + def handle_ifx(self): + for l, t, v, r in self.tokens: + if t == 'command' and v == 'fi': + break + return EmptyNode() + + def handle_c(self): + return self.handle_special_command('c') + + def handle_mbox(self): + return self.parse_args('\\mbox', 'M')[0] + + def handle_leftline(self): + return self.parse_args('\\leftline', 'M')[0] + + def handle_Large(self): + return self.parse_args('\\Large', 'M')[0] + + def handle_pytype(self): + # \pytype{x} is synonymous to \class{x} now + return self.handle_class() + + def handle_nodename(self): + return self.handle_label() + + def handle_verb(self): + # skip delimiter + l, t, v, r = self.tokens.next() + l, t, v, r = self.tokens.next() + assert t == 'text' + node = InlineNode('code', [TextNode(r)]) + # skip delimiter + l, t, v, r = self.tokens.next() + return node + + def handle_locallinewidth(self): + return EmptyNode() + + def handle_linewidth(self): + return EmptyNode() + + def handle_setlength(self): + self.parse_args('\\setlength', 'MM') + return EmptyNode() + + def handle_stmodindex(self): + arg, = self.parse_args('\\stmodindex', 'T') + return CommandNode('declaremodule', [EmptyNode(), + TextNode(u'standard'), + arg]) + + def handle_indexname(self): + return EmptyNode() + + def handle_renewcommand(self): + self.parse_args('\\renewcommand', 'MM') + return EmptyNode() + + # ------------------------- environment handlers ------------------------- + + def handle_document_env(self): + return self.parse_until(self.environment_end) + + handle_sloppypar_env = handle_document_env + handle_flushleft_env = handle_document_env + handle_math_env = handle_document_env + + def handle_verbatim_env(self): + text = [] + for l, t, v, r in self.tokens: + if t == 'command' and v == 'end' : + tok = self.tokens.peekmany(3) + if tok[0][1] == 'bgroup' and \ + tok[1][1] == 'text' and \ + tok[1][2] == 'verbatim' and \ + tok[2][1] == 'egroup': + self.tokens.popmany(3) + break + text.append(r) + return VerbatimNode(TextNode(''.join(text))) + + # involved math markup must be corrected manually + def handle_displaymath_env(self): + text = ['XXX: translate this math'] + for l, t, v, r in self.tokens: + if t == 'command' and v == 'end' : + tok = self.tokens.peekmany(3) + if tok[0][1] == 'bgroup' and \ + tok[1][1] == 'text' and \ + tok[1][2] == 'displaymath' and \ + tok[2][1] == 'egroup': + self.tokens.popmany(3) + break + text.append(r) + return VerbatimNode(TextNode(''.join(text))) + + # alltt is different from verbatim because it allows markup + def handle_alltt_env(self): + nodelist = NodeList() + for l, t, v, r in self.tokens: + if self.environment_end(t, v): + break + if t == 'command': + if len(v) == 1 and not v.isalpha(): + nodelist.append(self.handle_special_command(v)) + continue + handler = getattr(self, 'handle_' + v, None) + if not handler: + raise ParserError('no handler for \\%s command' % v, l) + nodelist.append(handler()) + elif t == 'comment': + nodelist.append(CommentNode(v)) + else: + # all else is appended raw + nodelist.append(TextNode(r)) + return VerbatimNode(nodelist.flatten()) + + def handle_itemize_env(self, nodetype=ItemizeNode): + items = [] + # a usecase for nonlocal :) + running = [False] + + def item_condition(t, v, bracelevel): + if self.environment_end(t, v): + del running[:] + return True + if t == 'command' and v == 'item': + return True + return False + + # the text until the first \item is discarded + self.parse_until(item_condition) + while running: + itemname, = self.parse_args('\\item', 'O') + itemcontent = self.parse_until(item_condition) + items.append([itemname, itemcontent]) + return nodetype(items) + + def handle_enumerate_env(self): + return self.handle_itemize_env(EnumerateNode) + + def handle_description_env(self): + return self.handle_itemize_env(DescriptionNode) + + def handle_definitions_env(self): + items = [] + running = [False] + + def item_condition(t, v, bracelevel): + if self.environment_end(t, v): + del running[:] + return True + if t == 'command' and v == 'term': + return True + return False + + # the text until the first \item is discarded + self.parse_until(item_condition) + while running: + itemname, = self.parse_args('\\term', 'M') + itemcontent = self.parse_until(item_condition) + items.append([itemname, itemcontent]) + return DefinitionsNode(items) + + def mk_table_handler(self, envname, numcols): + def handle_table(self): + args = self.parse_args('table'+envname, 'TT' + 'M'*numcols) + firstcolformat = args[1].text + headings = args[2:] + lines = [] + for l, t, v, r in self.tokens: + # XXX: everything outside of \linexxx is lost here + if t == 'command': + if v == 'line'+envname: + lines.append(self.parse_args('\\line'+envname, + 'M'*numcols)) + elif v == 'end': + arg = self.parse_args('\\end', 'T') + assert arg[0].text.endswith('table'+envname), arg[0].text + break + for line in lines: + if not empty(line[0]): + line[0] = InlineNode(firstcolformat, [line[0]]) + return TableNode(numcols, headings, lines) + return handle_table + + handle_tableii_env = mk_table_handler(None, 'ii', 2) + handle_longtableii_env = handle_tableii_env + handle_tableiii_env = mk_table_handler(None, 'iii', 3) + handle_longtableiii_env = handle_tableiii_env + handle_tableiv_env = mk_table_handler(None, 'iv', 4) + handle_longtableiv_env = handle_tableiv_env + handle_tablev_env = mk_table_handler(None, 'v', 5) + handle_longtablev_env = handle_tablev_env + + def handle_productionlist_env(self): + env_args = self.parse_args('productionlist', 'Q') + items = [] + for l, t, v, r in self.tokens: + # XXX: everything outside of \production is lost here + if t == 'command': + if v == 'production': + items.append(self.parse_args('\\production', 'TM')) + elif v == 'productioncont': + args = self.parse_args('\\productioncont', 'M') + args.insert(0, EmptyNode()) + items.append(args) + elif v == 'end': + arg = self.parse_args('\\end', 'T') + assert arg[0].text == 'productionlist' + break + node = ProductionListNode(items) + # the argument specifies a production group + node.arg = env_args[0] + return node + + def environment_end(self, t, v, bracelevel=0): + if t == 'command' and v == 'end': + self.parse_args('\\end', 'T') + return True + return False diff --git a/converter/newfiles/TODO b/converter/newfiles/TODO new file mode 100644 index 000000000..b5a7ca908 --- /dev/null +++ b/converter/newfiles/TODO @@ -0,0 +1,18 @@ +To do after conversion +====================== + +* fix all references and links marked with `XXX` +* adjust all literal include paths +* remove all non-literal includes +* fix all duplicate labels and undefined label references +* fix the email package docs: add a toctree +* split very large files and add toctrees +* integrate standalone HOWTOs +* find out which files get "comments disabled" metadata +* double backslashes in production lists +* add synopses for each module +* write "About these documents" +* finish "Documenting Python" +* extend copyright.rst +* merge ACKS into about.rst +* fix the "quadruple" index term diff --git a/converter/newfiles/about.rst b/converter/newfiles/about.rst new file mode 100644 index 000000000..043d5214c --- /dev/null +++ b/converter/newfiles/about.rst @@ -0,0 +1,16 @@ +===================== +About these documents +===================== + +These documents are generated from `reStructuredText +`_ sources by *Sphinx*, a document processor +specifically written for the Python documentation. + +In the online version of these documents, you can submit comments and suggest +changes directly on the documentation pages. + +Development of the documentation and its toolchain takes place on the +docs@python.org mailing list. We're always looking for volunteers wanting +to help with the docs, so feel free to send a mail there! + +See :ref:`reporting-bugs` for information how to report bugs in Python itself. \ No newline at end of file diff --git a/converter/newfiles/api_index.rst b/converter/newfiles/api_index.rst new file mode 100644 index 000000000..c64331253 --- /dev/null +++ b/converter/newfiles/api_index.rst @@ -0,0 +1,33 @@ +.. _c-api-index: + +################################## + Python/C API Reference Manual +################################## + +:Release: |version| +:Date: |today| + +This manual documents the API used by C and C++ programmers who want to write +extension modules or embed Python. It is a companion to :ref:`extending-index`, +which describes the general principles of extension writing but does not +document the API functions in detail. + +.. warning:: + + The current version of this document is somewhat incomplete. However, most of + the important functions, types and structures are described. + + +.. toctree:: + :maxdepth: 2 + + intro.rst + veryhigh.rst + refcounting.rst + exceptions.rst + utilities.rst + abstract.rst + concrete.rst + init.rst + memory.rst + newtypes.rst diff --git a/converter/newfiles/conf.py b/converter/newfiles/conf.py new file mode 100644 index 000000000..f6cfd0ff4 --- /dev/null +++ b/converter/newfiles/conf.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# +# Python documentation build configuration file +# +# The contents of this file are pickled, so don't put values in the namespace +# that aren't pickleable (module imports are okay, they're removed automatically). +# + +# The default replacements for |version| and |release|: +# The short X.Y version. +version = '2.6' +# The full version, including alpha/beta/rc tags. +release = '2.6a0' +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +today = '' +# Else, today_fmt is used as the format for a strftime call. +today_fmt = '%B %d, %Y' + +# List of files that shouldn't be included in the build. +unused_files = [ + 'whatsnew/2.0.rst', + 'whatsnew/2.1.rst', + 'whatsnew/2.2.rst', + 'whatsnew/2.3.rst', + 'whatsnew/2.4.rst', + 'whatsnew/2.5.rst', + 'macmodules/scrap.rst', + 'modules/xmllib.rst', +] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +last_updated_format = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +use_smartypants = True + +# If true, trailing '()' will be stripped from :func: etc. cross-references. +strip_trailing_parentheses = False diff --git a/converter/newfiles/contents.rst b/converter/newfiles/contents.rst new file mode 100644 index 000000000..bafda1cec --- /dev/null +++ b/converter/newfiles/contents.rst @@ -0,0 +1,21 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + Python Documentation contents +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +.. toctree:: + + whatsnew/2.6.rst + tutorial/index.rst + reference/index.rst + modules/index.rst + macmodules/index.rst + extending/index.rst + c-api/index.rst + distutils/index.rst + install/index.rst + documenting/index.rst + + bugs.rst + about.rst + license.rst + copyright.rst diff --git a/converter/newfiles/dist_index.rst b/converter/newfiles/dist_index.rst new file mode 100644 index 000000000..34eef86db --- /dev/null +++ b/converter/newfiles/dist_index.rst @@ -0,0 +1,28 @@ +.. _distutils-index: + +############################### + Distributing Python Modules +############################### + +:Release: |version| +:Date: |today| + +This document describes the Python Distribution Utilities ("Distutils") from +the module developer's point of view, describing how to use the Distutils to +make Python modules and extensions easily available to a wider audience with +very little overhead for build/release/install mechanics. + +.. toctree:: + :maxdepth: 2 + + introduction.rst + setupscript.rst + configfile.rst + sourcedist.rst + builtdist.rst + packageindex.rst + uploading.rst + examples.rst + extending.rst + commandref.rst + apiref.rst diff --git a/converter/newfiles/doc.rst b/converter/newfiles/doc.rst new file mode 100644 index 000000000..1a3778b09 --- /dev/null +++ b/converter/newfiles/doc.rst @@ -0,0 +1,33 @@ +.. _documenting-index: + +###################### + Documenting Python +###################### + + +The Python language has a substantial body of documentation, much of it +contributed by various authors. The markup used for the Python documentation is +`reStructuredText`_, developed by the `docutils`_ project, amended by custom +directives and using a toolset named *Sphinx* to postprocess the HTML output. + +This document describes the style guide for our documentation, the custom +reStructuredText markup introduced to support Python documentation and how it +should be used, as well as the Sphinx build system. + +.. _reStructuredText: http://docutils.sf.net/rst.html +.. _docutils: http://docutils.sf.net/ + +If you're interested in contributing to Python's documentation, there's no need +to write reStructuredText if you're not so inclined; plain text contributions +are more than welcome as well. + +.. toctree:: + + intro.rst + style.rst + rest.rst + markup.rst + sphinx.rst + +.. XXX add credits, thanks etc. + diff --git a/converter/newfiles/doc_intro.rst b/converter/newfiles/doc_intro.rst new file mode 100644 index 000000000..e02ad7de2 --- /dev/null +++ b/converter/newfiles/doc_intro.rst @@ -0,0 +1,29 @@ +Introduction +============ + +Python's documentation has long been considered to be good for a free +programming language. There are a number of reasons for this, the most +important being the early commitment of Python's creator, Guido van Rossum, to +providing documentation on the language and its libraries, and the continuing +involvement of the user community in providing assistance for creating and +maintaining documentation. + +The involvement of the community takes many forms, from authoring to bug reports +to just plain complaining when the documentation could be more complete or +easier to use. + +This document is aimed at authors and potential authors of documentation for +Python. More specifically, it is for people contributing to the standard +documentation and developing additional documents using the same tools as the +standard documents. This guide will be less useful for authors using the Python +documentation tools for topics other than Python, and less useful still for +authors not using the tools at all. + +If your interest is in contributing to the Python documentation, but you don't +have the time or inclination to learn reStructuredText and the markup structures +documented here, there's a welcoming place for you among the Python contributors +as well. Any time you feel that you can clarify existing documentation or +provide documentation that's missing, the existing documentation team will +gladly work with you to integrate your text, dealing with the markup for you. +Please don't let the material in this document stand between the documentation +and your desire to help out! \ No newline at end of file diff --git a/converter/newfiles/doc_markup.rst b/converter/newfiles/doc_markup.rst new file mode 100644 index 000000000..59fed0ed1 --- /dev/null +++ b/converter/newfiles/doc_markup.rst @@ -0,0 +1,738 @@ +.. highlightlang:: rest + +Additional Markup Constructs +============================ + +Sphinx adds a lot of new directives and interpreted text roles to standard reST +markup. This section contains the reference material for these facilities. +Documentation for "standard" reST constructs is not included here, though +they are used in the Python documentation. + +XXX: file-wide metadata + +Meta-information markup +----------------------- + +.. describe:: sectionauthor + + Identifies the author of the current section. The argument should include + the author's name such that it can be used for presentation (though it isn't) + and email address. The domain name portion of the address should be lower + case. Example:: + + .. sectionauthor:: Guido van Rossum + + Currently, this markup isn't reflected in the output in any way, but it helps + keep track of contributions. + + +Module-specific markup +---------------------- + +The markup described in this section is used to provide information about a +module being documented. Each module should be documented in its own file. +Normally this markup appears after the title heading of that file; a typical +file might start like this:: + + :mod:`parrot` -- Dead parrot access + =================================== + + .. module:: parrot + :platform: Unix, Windows + :synopsis: Analyze and reanimate dead parrots. + .. moduleauthor:: Eric Cleese + .. moduleauthor:: John Idle + +As you can see, the module-specific markup consists of two directives, the +``module`` directive and the ``moduleauthor`` directive. + +.. describe:: module + + This directive marks the beginning of the description of a module (or package + submodule, in which case the name should be fully qualified, including the + package name). + + The ``platform`` option, if present, is a comma-separated list of the + platforms on which the module is available (if it is available on all + platforms, the option should be omitted). The keys are short identifiers; + examples that are in use include "IRIX", "Mac", "Windows", and "Unix". It is + important to use a key which has already been used when applicable. + + The ``synopsis`` option should consist of one sentence describing the + module's purpose -- it is currently only used in the Global Module Index. + +.. describe:: moduleauthor + + The ``moduleauthor`` directive, which can appear multiple times, names the + authors of the module code, just like ``sectionauthor`` names the author(s) + of a piece of documentation. It too does not result in any output currently. + + +.. note:: + + It is important to make the section title of a module-describing file + meaningful since that value will be inserted in the table-of-contents trees + in overview files. + + +Information units +----------------- + +There are a number of directives used to describe specific features provided by +modules. Each directive requires one or more signatures to provide basic +information about what is being described, and the content should be the +description. The basic version makes entries in the general index; if no index +entry is desired, you can give the directive option flag ``:noindex:``. The +following example shows all of the features of this directive type:: + + .. function:: spam(eggs) + ham(eggs) + :noindex: + + Spam or ham the foo. + +The signatures of object methods or data attributes should always include the +type name (``.. method:: FileInput.input(...)``), even if it is obvious from the +context which type they belong to; this is to enable consistent +cross-references. If you describe methods belonging to an abstract protocol, +such as "context managers", include a (pseudo-)type name too to make the +index entries more informative. + +The directives are: + +.. describe:: cfunction + + Describes a C function. The signature should be given as in C, e.g.:: + + .. cfunction:: PyObject* PyType_GenericAlloc(PyTypeObject *type, Py_ssize_t nitems) + + This is also used to describe function-like preprocessor macros. The names + of the arguments should be given so they may be used in the description. + + Note that you don't have to backslash-escape asterisks in the signature, + as it is not parsed by the reST inliner. + +.. describe:: cmember + + Describes a C struct member. Example signature:: + + .. cmember:: PyObject* PyTypeObject.tp_bases + + The text of the description should include the range of values allowed, how + the value should be interpreted, and whether the value can be changed. + References to structure members in text should use the ``member`` role. + +.. describe:: cmacro + + Describes a "simple" C macro. Simple macros are macros which are used + for code expansion, but which do not take arguments so cannot be described as + functions. This is not to be used for simple constant definitions. Examples + of its use in the Python documentation include :cmacro:`PyObject_HEAD` and + :cmacro:`Py_BEGIN_ALLOW_THREADS`. + +.. describe:: ctype + + Describes a C type. The signature should just be the type name. + +.. describe:: cvar + + Describes a global C variable. The signature should include the type, such + as:: + + .. cvar:: PyObject* PyClass_Type + +.. describe:: data + + Describes global data in a module, including both variables and values used + as "defined constants." Class and object attributes are not documented + using this environment. + +.. describe:: exception + + Describes an exception class. The signature can, but need not include + parentheses with constructor arguments. + +.. describe:: function + + Describes a module-level function. The signature should include the + parameters, enclosing optional parameters in brackets. Default values can be + given if it enhances clarity. For example:: + + .. function:: Timer.repeat([repeat=3[, number=1000000]]) + + Object methods are not documented using this directive. Bound object methods + placed in the module namespace as part of the public interface of the module + are documented using this, as they are equivalent to normal functions for + most purposes. + + The description should include information about the parameters required and + how they are used (especially whether mutable objects passed as parameters + are modified), side effects, and possible exceptions. A small example may be + provided. + +.. describe:: class + + Describes a class. The signature can include parentheses with parameters + which will be shown as the constructor arguments. + +.. describe:: attribute + + Describes an object data attribute. The description should include + information about the type of the data to be expected and whether it may be + changed directly. + +.. describe:: method + + Describes an object method. The parameters should not include the ``self`` + parameter. The description should include similar information to that + described for ``function``. + +.. describe:: opcode + + Describes a Python bytecode instruction. + + +There is also a generic version of these directives: + +.. describe:: describe + + This directive produces the same formatting as the specific ones explained + above but does not create index entries or cross-referencing targets. It is + used, for example, to describe the directives in this document. Example:: + + .. describe:: opcode + + Describes a Python bytecode instruction. + + +Showing code examples +--------------------- + +Examples of Python source code or interactive sessions are represented using +standard reST literal blocks. They are started by a ``::`` at the end of the +preceding paragraph and delimited by indentation. + +Representing an interactive session requires including the prompts and output +along with the Python code. No special markup is required for interactive +sessions. After the last line of input or output presented, there should not be +an "unused" primary prompt; this is an example of what *not* to do:: + + >>> 1 + 1 + 2 + >>> + +Syntax highlighting is handled in a smart way: + +* There is a "highlighting language" for each source file. Per default, + this is ``'python'`` as the majority of files will have to highlight Python + snippets. + +* Within Python highlighting mode, interactive sessions are recognized + automatically and highlighted appropriately. + +* The highlighting language can be changed using the ``highlightlang`` + directive, used as follows:: + + .. highlightlang:: c + + This language is used until the next ``highlightlang`` directive is + encountered. + +* The valid values for the highlighting language are: + + * ``python`` (the default) + * ``c`` + * ``rest`` + * ``none`` (no highlighting) + +* If highlighting with the current language fails, the block is not highlighted + in any way. + +Longer displays of verbatim text may be included by storing the example text in +an external file containing only plain text. The file may be included using the +standard ``include`` directive with the ``literal`` option flag. For example, +to include the Python source file :file:`example.py`, use:: + + .. include:: example.py + :literal: + + +Inline markup +------------- + +As said before, Sphinx uses interpreted text roles to insert semantic markup in +documents. + +The default role is ``var``, as that was one of the most common macros used in +the old LaTeX docs. That means that you can use ```var``` to refer to a +variable named "var". + +For all other roles, you have to write ``:rolename:`content```. + +The following roles refer to objects in modules and are possibly hyperlinked if +a matching identifier is found: + +.. describe:: mod + + The name of a module; a dotted name may be used. This should also be used for + package names. + +.. describe:: func + + The name of a Python function; dotted names may be used. The role text + should include trailing parentheses to enhance readability. The parentheses + are stripped when searching for identifiers. + +.. describe:: data + + The name of a module-level variable. + +.. describe:: const + + The name of a "defined" constant. This may be a C-language ``#define`` + or a Python variable that is not intended to be changed. + +.. describe:: class + + A class name; a dotted name may be used. + +.. describe:: meth + + The name of a method of an object. The role text should include the type + name, method name and the trailing parentheses. A dotted name may be used. + +.. describe:: attr + + The name of a data attribute of an object. + +.. describe:: exc + + The name of an exception. A dotted name may be used. + +The name enclosed in this markup can include a module name and/or a class name. +For example, ``:func:`filter``` could refer to a function named ``filter`` in +the current module, or the built-in function of that name. In contrast, +``:func:`foo.filter``` clearly refers to the ``filter`` function in the ``foo`` +module. + +A similar heuristic is used to determine whether the name is an attribute of +the currently documented class. + +The following roles create cross-references to C-language constructs if they +are defined in the API documentation: + +.. describe:: cdata + + The name of a C-language variable. + +.. describe:: cfunc + + The name of a C-language function. Should include trailing parentheses. + +.. describe:: cmacro + + The name of a "simple" C macro, as defined above. + +.. describe:: ctype + + The name of a C-language type. + + +The following role does possibly create a cross-reference, but does not refer +to objects: + +.. describe:: token + + The name of a grammar token (used in the reference manual to create links + between production displays). + +--------- + +The following roles don't do anything special except formatting the text +in a different style: + +.. describe:: command + + The name of an OS-level command, such as ``rm``. + +.. describe:: dfn + + Mark the defining instance of a term in the text. (No index entries are + generated.) + +.. describe:: envvar + + An environment variable. Index entries are generated. + +.. describe:: file + + The name of a file or directory. + +.. XXX: filenq, filevar + +.. describe:: guilabel + + Labels presented as part of an interactive user interface should be marked + using ``guilabel``. This includes labels from text-based interfaces such as + those created using :mod:`curses` or other text-based libraries. Any label + used in the interface should be marked with this role, including button + labels, window titles, field names, menu and menu selection names, and even + values in selection lists. + +.. describe:: kbd + + Mark a sequence of keystrokes. What form the key sequence takes may depend + on platform- or application-specific conventions. When there are no relevant + conventions, the names of modifier keys should be spelled out, to improve + accessibility for new users and non-native speakers. For example, an + *xemacs* key sequence may be marked like ``:kbd:`C-x C-f```, but without + reference to a specific application or platform, the same sequence should be + marked as ``:kbd:`Control-x Control-f```. + +.. describe:: keyword + + The name of a keyword in a programming language. + +.. describe:: mailheader + + The name of an RFC 822-style mail header. This markup does not imply that + the header is being used in an email message, but can be used to refer to any + header of the same "style." This is also used for headers defined by the + various MIME specifications. The header name should be entered in the same + way it would normally be found in practice, with the camel-casing conventions + being preferred where there is more than one common usage. For example: + ``:mailheader:`Content-Type```. + +.. describe:: makevar + + The name of a :command:`make` variable. + +.. describe:: manpage + + A reference to a Unix manual page including the section, + e.g. ``:manpage:`ls(1)```. + +.. describe:: menuselection + + Menu selections should be marked using the ``menuselection`` role. This is + used to mark a complete sequence of menu selections, including selecting + submenus and choosing a specific operation, or any subsequence of such a + sequence. The names of individual selections should be separated by + ``-->``. + + For example, to mark the selection "Start > Programs", use this markup:: + + :menuselection:`Start --> Programs` + + When including a selection that includes some trailing indicator, such as the + ellipsis some operating systems use to indicate that the command opens a + dialog, the indicator should be omitted from the selection name. + +.. describe:: mimetype + + The name of a MIME type, or a component of a MIME type (the major or minor + portion, taken alone). + +.. describe:: newsgroup + + The name of a Usenet newsgroup. + +.. describe:: option + + A command-line option to an executable program. The leading hyphen(s) must + be included. + +.. describe:: program + + The name of an executable program. This may differ from the file name for + the executable for some platforms. In particular, the ``.exe`` (or other) + extension should be omitted for Windows programs. + +.. describe:: regexp + + A regular expression. Quotes should not be included. + +.. describe:: var + + A Python or C variable or parameter name. + + +The following roles generate external links: + +.. describe:: pep + + A reference to a Python Enhancement Proposal. This generates appropriate + index entries. The text "PEP *number*\ " is generated; in the HTML output, + this text is a hyperlink to an online copy of the specified PEP. + +.. describe:: rfc + + A reference to an Internet Request for Comments. This generates appropriate + index entries. The text "RFC *number*\ " is generated; in the HTML output, + this text is a hyperlink to an online copy of the specified RFC. + + +Note that there are no special roles for including hyperlinks as you can use +the standard reST markup for that purpose. + + +.. _doc-ref-role: + +Cross-linking markup +-------------------- + +To support cross-referencing to arbitrary sections in the documentation, the +standard reST labels are "abused" a bit: Every label must precede a section +title; and every label name must be unique throughout the entire documentation +source. + +You can then reference to these sections using the ``:ref:`label-name``` role. + +Example:: + + .. _my-reference-label: + + Section to cross-reference + -------------------------- + + This is the text of the section. + + It refers to the section itself, see :ref:`my-reference-label`. + +The ``:ref:`` invocation is replaced with the section title. + + +Paragraph-level markup +---------------------- + +These directives create short paragraphs and can be used inside information +units as well as normal text: + +.. describe:: note + + An especially important bit of information about an API that a user should be + aware of when using whatever bit of API the note pertains to. The content of + the directive should be written in complete sentences and include all + appropriate punctuation. + + Example:: + + .. note:: + + This function is not suitable for sending spam e-mails. + +.. describe:: warning + + An important bit of information about an API that a user should be very aware + of when using whatever bit of API the warning pertains to. The content of + the directive should be written in complete sentences and include all + appropriate punctuation. This differs from ``note`` in that it is recommended + over ``note`` for information regarding security. + +.. describe:: versionadded + + This directive documents the version of Python which added the described + feature to the library or C API. When this applies to an entire module, it + should be placed at the top of the module section before any prose. + + The first argument must be given and is the version in question; you can add + a second argument consisting of a *brief* explanation of the change. + + Example:: + + .. versionadded:: 2.5 + The `spam` parameter. + + Note that there must be no blank line between the directive head and the + explanation; this is to make these blocks visually continuous in the markup. + +.. describe:: versionchanged + + Similar to ``versionadded``, but describes when and what changed in the named + feature in some way (new parameters, changed side effects, etc.). + +-------------- + +.. describe:: seealso + + Many sections include a list of references to module documentation or + external documents. These lists are created using the ``seealso`` directive. + + The ``seealso`` directive is typically placed in a section just before any + sub-sections. For the HTML output, it is shown boxed off from the main flow + of the text. + + The content of the ``seealso`` directive should be a reST definition list. + Example:: + + .. seealso:: + + Module :mod:`zipfile` + Documentation of the :mod:`zipfile` standard module. + + `GNU tar manual, Basic Tar Format `_ + Documentation for tar archive files, including GNU tar extensions. + +.. describe:: rubric + + This directive creates a paragraph heading that is not used to create a + table of contents node. It is currently used for the "Footnotes" caption. + +.. describe:: centered + + This directive creates a centered boldfaced paragraph. Use it as follows:: + + .. centered:: + + Paragraph contents. + + +Table-of-contents markup +------------------------ + +Since reST does not have facilities to interconnect several documents, or split +documents into multiple output files, Sphinx uses a custom directive to add +relations between the single files the documentation is made of, as well as +tables of contents. The ``toctree`` directive is the central element. + +.. describe:: toctree + + This directive inserts a "TOC tree" at the current location, using the + individual TOCs (including "sub-TOC trees") of the files given in the + directive body. A numeric ``maxdepth`` option may be given to indicate the + depth of the tree; by default, all levels are included. + + Consider this example (taken from the library reference index):: + + .. toctree:: + :maxdepth: 2 + + intro.rst + strings.rst + datatypes.rst + numeric.rst + (many more files listed here) + + This accomplishes two things: + + * Tables of contents from all those files are inserted, with a maximum depth + of two, that means one nested heading. ``toctree`` directives in those + files are also taken into account. + * Sphinx knows that the relative order of the files ``intro.rst``, + ``strings.rst`` and so forth, and it knows that they are children of the + shown file, the library index. From this information it generates "next + chapter", "previous chapter" and "parent chapter" links. + + In the end, all files included in the build process must occur in one + ``toctree`` directive; Sphinx will emit a warning if it finds a file that is + not included, because that means that this file will not be reachable through + standard navigation. + + The special file ``contents.rst`` at the root of the source directory is the + "root" of the TOC tree hierarchy; from it the "Contents" page is generated. + + +Index-generating markup +----------------------- + +Sphinx automatically creates index entries from all information units (like +functions, classes or attributes) like discussed before. + +However, there is also an explicit directive available, to make the index more +comprehensive and enable index entries in documents where information is not +mainly contained in information units, such as the language reference. + +The directive is ``index`` and contains one or more index entries. Each entry +consists of a type and a value, separated by a colon. + +For example:: + + .. index:: + single: execution!context + module: __main__ + module: sys + triple: module; search; path + +This directive contains five entries, which will be converted to entries in the +generated index which link to the exact location of the index statement (or, in +case of offline media, the corresponding page number). + +The possible entry types are: + +single + Creates a single index entry. Can be made a subentry by separating the + subentry text with a semicolon (this is also used below to describe what + entries are created). +pair + ``pair: loop; statement`` is a shortcut that creates two index entries, + namely ``loop; statement`` and ``statement; loop``. +triple + Likewise, ``triple: module; search; path`` is a shortcut that creates three + index entries, which are ``module; search path``, ``search; path, module`` and + ``path; module search``. +module, keyword, operator, object, exception, statement, builtin + These all create two index entries. For example, ``module: hashlib`` creates + the entries ``module; hashlib`` and ``hashlib; module``. + + +Grammar production displays +--------------------------- + +Special markup is available for displaying the productions of a formal grammar. +The markup is simple and does not attempt to model all aspects of BNF (or any +derived forms), but provides enough to allow context-free grammars to be +displayed in a way that causes uses of a symbol to be rendered as hyperlinks to +the definition of the symbol. There is this directive: + +.. describe:: productionlist + + This directive is used to enclose a group of productions. Each production is + given on a single line and consists of a name, separated by a colon from the + following definition. If the definition spans multiple lines, each + continuation line must begin with a colon placed at the same column as in the + first line. + + Blank lines are not allowed within ``productionlist`` directive arguments. + + The definition can contain token names which are marked as interpreted text + (e.g. ``sum ::= `integer` "+" `integer```) -- this generates cross-references + to the productions of these tokens. Note that vertical bars used to indicate + alternatives must be escaped with backslashes because otherwise they would + indicate a substitution reference to the reST parser. + + +.. XXX describe optional first parameter + +The following is an example taken from the Python Reference Manual:: + + .. productionlist:: + try_stmt: try1_stmt \| try2_stmt + try1_stmt: "try" ":" :token:`suite` + : ("except" [:token:`expression` ["," :token:`target`]] ":" :token:`suite`)+ + : ["else" ":" :token:`suite`] + : ["finally" ":" :token:`suite`] + try2_stmt: "try" ":" :token:`suite` + : "finally" ":" :token:`suite` + + +Substitutions +------------- + +The documentation system provides three substitutions that are defined by default. +They are set in the build configuration file, see :ref:`doc-build-config`. + +.. describe:: |release| + + Replaced by the Python release the documentation refers to. This is the full + version string including alpha/beta/release candidate tags, e.g. ``2.5.2b3``. + +.. describe:: |version| + + Replaced by the Python version the documentation refers to. This consists + only of the major and minor version parts, e.g. ``2.5``, even for version + 2.5.1. + +.. describe:: |today| + + Replaced by either today's date, or the date set in the build configuration + file. Normally has the format ``April 14, 2007``. diff --git a/converter/newfiles/doc_rest.rst b/converter/newfiles/doc_rest.rst new file mode 100644 index 000000000..064d576df --- /dev/null +++ b/converter/newfiles/doc_rest.rst @@ -0,0 +1,229 @@ +.. highlightlang:: rest + +reStructuredText Primer +======================= + +This section is a brief introduction to reStructuredText (reST) concepts and +syntax, to provide authors enough information to autor documents productively. +Since reST was designed to be a simple, unobtrusive markup language, this will +not take too long. + +.. seealso:: + + The authoritative `reStructuredText User + Documentation `_. + + +Paragraphs +---------- + +The most basic block a reST document is made of. Paragraphs are chunks of text +separated by one ore more blank lines. As in Python, indentation is significant +in reST, so all lines of a paragraph must be left-aligned. + + +Inline markup +------------- + +The standard reST inline markup is quite simple: use + +* one asterisk: ``*text*`` for emphasis (italics), +* two asterisks: ``**text**`` for strong emphasis (boldface), and +* backquotes: ````text```` for code samples. + +If asterisks or backquotes appear in running text and could be confused with +inline markup delimiters, they have to be escaped with a backslash. + +Be aware of some restrictions of this markup: + +* it may not be nested, +* content may not start or end with whitespace: ``* text*`` is wrong, +* it must be separated from surrounding text by non-word characters. Use a + backslash escaped space to work around that: ``thisis\ *one*\ word``. + +These restrictions may be lifted in future versions of the docutils. + +reST also allows for custom "interpreted text roles"', which signify that the +enclosed text should be interpreted in a specific way. Sphinx uses this to +provide semantic markup and cross-referencing of identifiers, as described in +the appropriate section. The general syntax is ``:rolename:`content```. + + +Lists and Quotes +---------------- + +List markup is natural: just place an asterisk at the start of a paragraph and +indent properly. The same goes for numbered lists; they can also be +autonumbered using a ``#`` sign:: + + * This is a bulleted list. + * It has two items, the second + item uses two lines. + + #. This is a numbered list. + #. It has two items too. + +Nested lists are possible, but be aware that they must be separated from the +parent list items by blank lines:: + + * this is + * a list + + * with a nested list + * and some subitems + + * and here the parent list continues + +Definition lists are created as follows:: + + term (up to a line of text) + Definition of the term, which must be indented + + and can even consist of multiple paragraphs + + next term + Description. + + +Paragraphs are quoted by just indenting them more than the surrounding +paragraphs. + + +Source Code +----------- + +Literal code blocks are introduced by ending a paragraph with the special marker +``::``. The literal block must be indented, to be able to include blank lines:: + + This is a normal text paragraph. The next paragraph is a code sample:: + + It is not processed in any way, except + that the indentation is removed. + + It can span multiple lines. + + This is a normal text paragraph again. + +The handling of the ``::`` marker is smart: + +* If it occurs as a paragraph of its own, that paragraph is completely left + out of the document. +* If it is preceded by whitespace, the marker is removed. +* If it is preceded by non-whitespace, the marker is replaced by a single + colon. + +That way, the second sentence in the above example's first paragraph would be +rendered as "The next paragraph is a code sample:". + + +Hyperlinks +---------- + +External links +^^^^^^^^^^^^^^ + +Use ```Link text `_`` for inline web links. If the link text +should be the web address, you don't need special markup at all, the parser +finds links and mail addresses in ordinary text. + +Internal links +^^^^^^^^^^^^^^ + +Internal linking is done via a special reST role, see the section on specific +markup, :ref:`doc-ref-role`. + + +Sections +-------- + +Section headers are created by underlining (and optionally overlining) the +section title with a punctuation character, at least as long as the text:: + + ================= + This is a heading + ================= + +Normally, there are no heading levels assigned to certain characters as the +structure is determined from the succession of headings. However, for the +Python documentation, we use this convention: + +* ``#`` with overline, for parts +* ``*`` with overline, for chapters +* ``=``, for sections +* ``-``, for subsections +* ``^``, for subsubsections +* ``"``, for paragraphs + + +Explicit Markup +--------------- + +"Explicit markup" is used in reST for most constructs that need special +handling, such as footnotes, specially-highlighted paragraphs, comments, and +generic directives. + +An explicit markup block begins with a line starting with ``..`` followed by +whitespace and is terminated by the next paragraph at the same level of +indentation. (There needs to be a blank line between explicit markup and normal +paragraphs. This may all sound a bit complicated, but it is intuitive enough +when you write it.) + + +Directives +---------- + +A directive is a generic block of explicit markup. Besides roles, it is one of +the extension mechanisms of reST, and Sphinx makes heavy use of it. + +Basically, a directive consists of a name, arguments, options and content. (Keep +this terminology in mind, it is used in the next chapter describing custom +directives.) Looking at this example, :: + + .. function:: foo(x) + foo(y, z) + :bar: no + + Return a line of text input from the user. + +``function`` is the directive name. It is given two arguments here, the +remainder of the first line and the second line, as well as one option ``bar`` +(as you can see, options are given in the lines immediately following the +arguments and indicated by the colons). + +The directive content follows after a blank line and is indented relative to the +directive start. + + +Footnotes +--------- + +For footnotes, use ``[#]_`` to mark the footnote location, and add the footnote +body at the bottom of the document after a "Footnotes" rubric heading, like so:: + + Lorem ipsum [#]_ dolor sit amet ... [#]_ + + .. rubric:: Footnotes + + .. [#] Text of the first footnote. + .. [#] Text of the second footnote. + + +Comments +-------- + +Every explicit markup block which isn't a valid markup construct (like the +footnotes above) is regared as a comment. + + +Source encoding +--------------- + +Since the easiest way to include special characters like em dashes or copyright +signs in reST is to directly write them as Unicode characters, one has to +specify an encoding: + +All Python documentation source files must be in UTF-8 encoding, and the HTML +documents written from them will be in that encoding as well. + + +XXX: Gotchas \ No newline at end of file diff --git a/converter/newfiles/doc_sphinx.rst b/converter/newfiles/doc_sphinx.rst new file mode 100644 index 000000000..e87ef5e86 --- /dev/null +++ b/converter/newfiles/doc_sphinx.rst @@ -0,0 +1,55 @@ +.. highlightlang:: rest + +The Sphinx build system +======================= + +XXX: intro... + +.. _doc-build-config: + +The build configuration file +---------------------------- + +The documentation root, that is the ``Doc`` subdirectory of the source +distribution, contains a file named ``conf.py``. This file is called the "build +configuration file", and it contains several variables that are read and used +during a build run. + +These variables are: + +release : string + A string that is used as a replacement for the ``|release|`` reST + substitution. It should be the full version string including + alpha/beta/release candidate tags, e.g. ``2.5.2b3``. + +version : string + A string that is used as a replacement for the ``|version|`` reST + substitution. It should be the Python version the documentation refers to. + This consists only of the major and minor version parts, e.g. ``2.5``, even + for version 2.5.1. + +today_fmt : string + A ``strftime`` format that is used to format a replacement for the + ``|today|`` reST substitution. + +today : string + A string that can contain a date that should be written to the documentation + output literally. If this is nonzero, it is used instead of + ``strftime(today_fmt)``. + +unused_file : list of strings + A list of reST filenames that are to be disregarded during building. This + could be docs for temporarily disabled modules or documentation that's not + yet ready for public consumption. + +last_updated_format : string + If this is not an empty string, it will be given to ``time.strftime()`` and + written to each generated output file after "last updated on:". + +use_smartypants : bool + If true, use SmartyPants to convert quotes and dashes to the typographically + correct entities. + +strip_trailing_parentheses : bool + If true, trailing parentheses will be stripped from ``:func:`` etc. + crossreferences. \ No newline at end of file diff --git a/converter/newfiles/doc_style.rst b/converter/newfiles/doc_style.rst new file mode 100644 index 000000000..64d7c36b1 --- /dev/null +++ b/converter/newfiles/doc_style.rst @@ -0,0 +1,57 @@ +.. highlightlang:: rest + +Style Guide +=========== + +The Python documentation should follow the `Apple Publications Style Guide`_ +wherever possible. This particular style guide was selected mostly because it +seems reasonable and is easy to get online. + +.. _Apple Publications Style Guide: http://developer.apple.com/documentation/UserExperience/Conceptual/APStyleGuide/AppleStyleGuide2003.pdf + +Topics which are not covered in the Apple's style guide will be discussed in +this document if necessary. + +Footnotes are generally discouraged, though they may be used when they are the +best way to present specific information. When a footnote reference is added at +the end of the sentence, it should follow the sentence-ending punctuation. The +reST markup should appear something like this:: + + This sentence has a footnote reference. [#]_ This is the next sentence. + +Footnotes should be gathered at the end of a file, or if the file is very long, +at the end of a section. The docutils will automatically create backlinks to the +footnote reference. + +Footnotes may appear in the middle of sentences where appropriate. + +Many special names are used in the Python documentation, including the names of +operating systems, programming languages, standards bodies, and the like. Most +of these entities are not assigned any special markup, but the preferred +spellings are given here to aid authors in maintaining the consistency of +presentation in the Python documentation. + +Other terms and words deserve special mention as well; these conventions should +be used to ensure consistency throughout the documentation: + +CPU + For "central processing unit." Many style guides say this should be spelled + out on the first use (and if you must use it, do so!). For the Python + documentation, this abbreviation should be avoided since there's no + reasonable way to predict which occurrence will be the first seen by the + reader. It is better to use the word "processor" instead. + +POSIX + The name assigned to a particular group of standards. This is always + uppercase. + +Python + The name of our favorite programming language is always capitalized. + +Unicode + The name of a character set and matching encoding. This is always written + capitalized. + +Unix + The name of the operating system developed at AT&T Bell Labs in the early + 1970s. \ No newline at end of file diff --git a/converter/newfiles/ext_index.rst b/converter/newfiles/ext_index.rst new file mode 100644 index 000000000..13332dccb --- /dev/null +++ b/converter/newfiles/ext_index.rst @@ -0,0 +1,34 @@ +.. _extending-index: + +################################################## + Extending and Embedding the Python Interpreter +################################################## + +:Release: |version| +:Date: |today| + +This document describes how to write modules in C or C++ to extend the Python +interpreter with new modules. Those modules can define new functions but also +new object types and their methods. The document also describes how to embed +the Python interpreter in another application, for use as an extension language. +Finally, it shows how to compile and link extension modules so that they can be +loaded dynamically (at run time) into the interpreter, if the underlying +operating system supports this feature. + +This document assumes basic knowledge about Python. For an informal +introduction to the language, see :ref:`tutorial-index`. :ref:`reference-index` +gives a more formal definition of the language. :ref:`modules-index` documents +the existing object types, functions and modules (both built-in and written in +Python) that give the language its wide application range. + +For a detailed description of the whole Python/C API, see the separate +:ref:`c-api-index`. + +.. toctree:: + :maxdepth: 2 + + extending.rst + newtypes.rst + building.rst + windows.rst + embedding.rst diff --git a/converter/newfiles/mac_index.rst b/converter/newfiles/mac_index.rst new file mode 100644 index 000000000..cd6d6946e --- /dev/null +++ b/converter/newfiles/mac_index.rst @@ -0,0 +1,34 @@ +.. _macmodules-index: + +############################## + Macintosh Library Modules +############################## + +:Release: |version| +:Date: |today| + +This library reference manual documents Python's extensions for the Macintosh. +It should be used in conjunction with :ref:`modules-index`, which documents the +standard library and built-in types. + +This manual assumes basic knowledge about the Python language. For an informal +introduction to Python, see :ref:`tutorial-index`; :ref:`reference-index` +remains the highest authority on syntactic and semantic questions. Finally, the +manual entitled :ref:`extending-index` describes how to add new extensions to +Python and how to embed it in other applications. + +.. toctree:: + :maxdepth: 2 + + using.rst + mac.rst + macic.rst + macos.rst + macostools.rst + macui.rst + framework.rst + autogil.rst + scripting.rst + toolbox.rst + colorpicker.rst + undoc.rst diff --git a/converter/newfiles/modules_index.rst b/converter/newfiles/modules_index.rst new file mode 100644 index 000000000..1baeb3872 --- /dev/null +++ b/converter/newfiles/modules_index.rst @@ -0,0 +1,67 @@ +.. _modules-index: + +############################### + The Python standard library +############################### + +:Release: |version| +:Date: |today| + +While :ref:`reference-index` describes the exact syntax and semantics of the +language, it does not describe the standard library that is distributed with the +language, and which greatly enhances its immediate usability. This library +contains built-in modules (written in C) that provide access to system +functionality such as file I/O that would otherwise be inaccessible to Python +programmers, as well as modules written in Python that provide standardized +solutions for many problems that occur in everyday programming. Some of these +modules are explicitly designed to encourage and enhance the portability of +Python programs. + +This library reference manual documents Python's standard library, as well as +many optional library modules (which may or may not be available, depending on +whether the underlying platform supports them and on the configuration choices +made at compile time). It also documents the standard types of the language and +its built-in functions and exceptions, many of which are not or incompletely +documented in the Reference Manual. + + +.. toctree:: + :maxdepth: 2 + + intro.rst + strings.rst + datatypes.rst + numeric.rst + netdata.rst + markup.rst + fileformats.rst + crypto.rst + filesys.rst + archiving.rst + persistence.rst + allos.rst + someos.rst + unix.rst + ipc.rst + internet.rst + mm.rst + tkinter.rst + i18n.rst + frameworks.rst + development.rst + pdb.rst + profile.rst + hotshot.rst + timeit.rst + trace.rst + python.rst + custominterp.rst + restricted.rst + modules.rst + language.rst + compiler.rst + misc.rst + sgi.rst + sun.rst + windows.rst + undoc.rst diff --git a/converter/newfiles/ref_index.rst b/converter/newfiles/ref_index.rst new file mode 100644 index 000000000..4d9ceb552 --- /dev/null +++ b/converter/newfiles/ref_index.rst @@ -0,0 +1,34 @@ +.. _reference-index: + +################################# + The Python language reference +################################# + +:Release: |version| +:Date: |today| + +This reference manual describes the syntax and "core semantics" of the +language. It is terse, but attempts to be exact and complete. The semantics of +non-essential built-in object types and of the built-in functions and modules +are described in :ref:`modules-index`. For an informal introduction to the +language, see :ref:`tutorial-index`. For C or C++ programmers, two additional +manuals exist: :ref:`extending-index` describes the high-level picture of how to +write a Python extension module, and the :ref:`c-api-index` describes the +interfaces available to C/C++ programmers in detail. + +.. toctree:: + :maxdepth: 2 + + introduction.rst + lexical_analysis.rst + datamodel.rst + executionmodel.rst + expressions.rst + simple_stmts.rst + compound_stmts.rst + toplevel_components.rst + functions.rst + consts.rst + objects.rst + stdtypes.rst + exceptions.rst diff --git a/converter/newfiles/tutorial_index.rst b/converter/newfiles/tutorial_index.rst new file mode 100644 index 000000000..7309b7c7a --- /dev/null +++ b/converter/newfiles/tutorial_index.rst @@ -0,0 +1,60 @@ +.. _tutorial-index: + +###################### + The Python tutorial +###################### + +:Release: |version| +:Date: |today| + +Python is an easy to learn, powerful programming language. It has efficient +high-level data structures and a simple but effective approach to +object-oriented programming. Python's elegant syntax and dynamic typing, +together with its interpreted nature, make it an ideal language for scripting +and rapid application development in many areas on most platforms. + +The Python interpreter and the extensive standard library are freely available +in source or binary form for all major platforms from the Python Web site, +http://www.python.org/, and may be freely distributed. The same site also +contains distributions of and pointers to many free third party Python modules, +programs and tools, and additional documentation. + +The Python interpreter is easily extended with new functions and data types +implemented in C or C++ (or other languages callable from C). Python is also +suitable as an extension language for customizable applications. + +This tutorial introduces the reader informally to the basic concepts and +features of the Python language and system. It helps to have a Python +interpreter handy for hands-on experience, but all examples are self-contained, +so the tutorial can be read off-line as well. + +For a description of standard objects and modules, see the Python Library +Reference document. The Python Reference Manual gives a more formal definition +of the language. To write extensions in C or C++, read Extending and Embedding +the Python Interpreter and Python/C API Reference. There are also several books +covering Python in depth. + +This tutorial does not attempt to be comprehensive and cover every single +feature, or even every commonly used feature. Instead, it introduces many of +Python's most noteworthy features, and will give you a good idea of the +language's flavor and style. After reading it, you will be able to read and +write Python modules and programs, and you will be ready to learn more about the +various Python library modules described in the Python Library Reference. + +.. toctree:: + + appetite.rst + interpreter.rst + introduction.rst + controlflow.rst + datastructures.rst + modules.rst + inputoutput.rst + errors.rst + classes.rst + stdlib.rst + stdlib2.rst + whatnow.rst + interactive.rst + floatingpoint.rst + glossary.rst diff --git a/converter/restwriter.py b/converter/restwriter.py new file mode 100644 index 000000000..62693d34f --- /dev/null +++ b/converter/restwriter.py @@ -0,0 +1,959 @@ +# -*- coding: utf-8 -*- +""" + Python documentation ReST writer + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + How the converter works + ======================= + + A LaTeX document is tokenized by a `Tokenizer`. The tokens are processed by + the `DocParser` class which emits a tree of `DocNode`\s. The `RestWriter` + now walks this node tree and generates ReST from that. + + There are some intricacies while writing ReST: + + - Paragraph text must be rewrapped in order to avoid ragged lines. The + `textwrap` module does that nicely, but it must obviously operate on a + whole paragraph at a time. Therefore the contents of the current paragraph + are cached in `self.curpar`. Every time a block level element is + encountered, its node handler calls `self.flush_par()` which writes out a + paragraph. Because this can be detrimental for the markup at several + stages, the `self.noflush` context manager can be used to forbid paragraph + flushing temporarily, which means that no block level nodes can be + processed. + + - There are no inline comments in ReST. Therefore comments are stored in + `self.comments` and written out every time the paragraph is flushed. + + - A similar thing goes for footnotes: `self.footnotes`. + + - Some inline markup cannot contain nested markup. Therefore the function + `textonly()` exists which returns a node similar to its argument, but + stripped of inline markup. + + - Some constructs need to format non-block-level nodes, but without writing + the result to the current paragraph. These use `self.get_node_text()` + which writes to a temporary paragraph and returns the resulting markup. + + - Indentation is important. The `self.indent` context manager helps keeping + track of indentation levels. + + - Some blocks, like lists, need to prevent the first line from being + indented because the indentation space is already filled (e.g. by a + bullet). Therefore the `self.indent` context manager accepts a + `firstline` flag which can be set to ``False``, resulting in the first + line not being indented. + + + There are some restrictions on markup compared to LaTeX: + + - Table cells may not contain blocks. + + - Hard line breaks don't exist. + + - Block level markup inside "alltt" environments doesn't work. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +# yay! +from __future__ import with_statement + +import re +import StringIO +import textwrap + +WIDTH = 80 +INDENT = 3 + +new_wordsep_re = re.compile( + r'(\s+|' # any whitespace + r'(?<=\s)(?::[a-z-]+:)?`\S+|' # interpreted text start + r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words + r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash + +import textwrap +# monkey-patch... +textwrap.TextWrapper.wordsep_re = new_wordsep_re +wrapper = textwrap.TextWrapper(width=WIDTH, break_long_words=False) + +from .docnodes import RootNode, TextNode, NodeList, InlineNode, \ + CommentNode, EmptyNode +from .util import fixup_text, empty, text, my_make_id, \ + repair_bad_inline_markup +from .filenamemap import includes_mapping + +class WriterError(Exception): + pass + + +class Indenter(object): + """ Context manager factory for indentation. """ + def __init__(self, writer): + class IndenterManager(object): + def __init__(self, indentlevel, flush, firstline): + self.indentlevel = indentlevel + self.flush = flush + self.firstline = firstline + + def __enter__(self): + writer.indentation += (self.indentlevel * ' ') + writer.indentfirstline = self.firstline + return self + + def __exit__(self, *ignored): + if self.flush: + writer.flush_par() + writer.indentation = writer.indentation[:-self.indentlevel] + + self.manager = IndenterManager + + def __call__(self, indentlevel=INDENT, flush=True, firstline=True): + return self.manager(indentlevel, flush, firstline) + + +class NoFlush(object): + """ Convenience context manager. """ + def __init__(self, writer): + self.writer = writer + + def __enter__(self): + self.writer.no_flushing += 1 + + def __exit__(self, *ignored): + self.writer.no_flushing -= 1 + + +class SectionMeta(object): + def __init__(self): + self.modname = '' + self.platform = '' + self.synopsis = [] + self.modauthors = [] + self.sectauthors = [] + + +class RestWriter(object): + """ Write ReST from a node tree. """ + + def __init__(self, fp, splitchap=False, toctree=None, deflang=None, labelprefix=''): + self.splitchap = splitchap # split output at chapters? + if splitchap: + self.fp = StringIO.StringIO() # dummy one + self.chapters = [self.fp] + else: + self.fp = fp # file pointer + self.toctree = toctree # entries for the TOC tree + self.deflang = deflang # default highlighting language + self.labelprefix = labelprefix # prefix for all label names + + # indentation tools + self.indentation = '' # current indentation string + self.indentfirstline = True # indent the first line of next paragraph? + self.indented = Indenter(self) # convenience context manager + + # paragraph flushing tools + self.flush_cb = None # callback run on next paragraph flush, used + # for properly separating field lists from + # the following paragraph + self.no_flushing = 0 # raise an error on paragraph flush? + self.noflush = NoFlush(self) # convenience context manager + + # collected items to output later + self.curpar = [] # text in current paragraph + self.comments = [] # comments to be output after flushing + self.indexentries = [] # indexentries to be output before flushing + self.footnotes = [] # footnotes to be output at document end + self.warnings = [] # warnings while writing + + # specials + self.sectionlabel = '' # most recent \label command + self.thisclass = '' # most recent classdesc name + self.sectionmeta = None # current section metadata + self.noescape = 0 # don't escape text nodes + self.indexsubitem = '' # current \withsubitem text + + def write_document(self, rootnode): + """ Write a document, represented by a RootNode. """ + assert type(rootnode) is RootNode + + if self.deflang: + self.write_directive('highlightlang', self.deflang) + + self.visit_node(rootnode) + self.write_footnotes() + + def new_chapter(self): + """ Called if self.splitchap is True. Create a new file pointer + and set self.fp to it. """ + new_fp = StringIO.StringIO() + self.chapters.append(new_fp) + self.fp = new_fp + + def write(self, text='', nl=True, first=False): + """ Write a string to the output file. """ + if first: + self.fp.write((self.indentation if self.indentfirstline else '') + text) + self.indentfirstline = True + elif text: # don't write indentation only + self.fp.write(self.indentation + text) + if nl: + self.fp.write('\n') + + def write_footnotes(self): + """ Write the current footnotes, if any. """ + self.flush_par() + if self.footnotes: + self.write('.. rubric:: Footnotes\n') + footnotes = self.footnotes + self.footnotes = [] # first clear since indented() will flush + for footnode in footnotes: + self.write('.. [#] ', nl=False) + with self.indented(3, firstline=False): + self.visit_node(footnode) + + def write_directive(self, name, args='', node=None, spabove=False, spbelow=True): + """ Helper to write a ReST directive. """ + if spabove: + self.write() + self.write('.. %s::%s' % (name, args and ' '+args)) + if spbelow: + self.write() + with self.indented(): + if node is not None: + self.visit_node(node) + + def write_sectionmeta(self): + mod = self.sectionmeta + self.sectionmeta = None + if not mod: + return + if mod.modname: + self.write('.. module:: %s' % mod.modname) + if mod.platform: + self.write(' :platform: %s' % mod.platform) + if mod.synopsis: + self.write(' :synopsis: %s' % mod.synopsis[0]) + for line in mod.synopsis[1:]: + self.write(' %s' % line) + if mod.modauthors: + for author in mod.modauthors: + self.write('.. moduleauthor:: %s' % author) + if mod.sectauthors: + for author in mod.sectauthors: + self.write('.. sectionauthor:: %s' % author) + self.write() + self.write() + + indexentry_mapping = { + 'index': 'single', + 'indexii': 'pair', + 'indexiii': 'triple', + 'indexiv': 'quadruple', + 'stindex': 'statement', + 'ttindex': 'single', + 'obindex': 'object', + 'opindex': 'operator', + 'kwindex': 'keyword', + 'exindex': 'exception', + 'bifuncindex': 'builtin', + 'refmodindex': 'module', + 'refbimodindex': 'module', + 'refexmodindex': 'module', + 'refstmodindex': 'module', + } + + def get_indexentries(self, entries): + """ Return a list of lines for the index entries. """ + def format_entry(cmdname, args, subitem): + textargs = [] + for arg in args: + if isinstance(arg, TextNode): + textarg = text(arg) + else: + textarg = self.get_node_text(self.get_textonly_node(arg, warn=0)) + if ';' in textarg: + raise WriterError("semicolon in index args: " + textarg) + textarg += subitem + textarg = textarg.replace('!', '; ') + textargs.append(textarg) + return '%s: %s' % (self.indexentry_mapping[cmdname], + '; '.join(textarg for textarg in textargs + if not empty(arg))) + + ret = [] + if len(entries) == 1: + ret.append('.. index:: %s' % format_entry(*entries[0])) + else: + ret.append('.. index::') + for entry in entries: + ret.append(' %s' % format_entry(*entry)) + return ret + + def get_par(self, wrap, width=None): + """ Get the contents of the current paragraph. + Returns a list if wrap and not indent, else a string. """ + if not self.curpar: + if wrap: + return [] + else: + return '' + text = ''.join(self.curpar).lstrip() + text = repair_bad_inline_markup(text) + self.curpar = [] + if wrap: + # returns a list! + wrapper.width = width or WIDTH + return wrapper.wrap(text) + else: + return text + + no_warn_textonly = set(( + 'var', 'code', 'textrm', 'emph', 'keyword', 'textit', 'programopt', + 'cfunction', 'texttt', 'email', 'constant', + )) + + def get_textonly_node(self, node, cmd='', warn=1): + """ Return a similar Node or NodeList that only has TextNode subnodes. + + Warning values: + - 0: never warn + - 1: warn for markup losing information + """ + if cmd == 'code': + warn = 0 + def do(subnode): + if isinstance(subnode, TextNode): + return subnode + if isinstance(subnode, NodeList): + return NodeList(do(subsubnode) for subsubnode in subnode) + if isinstance(subnode, CommentNode): + # loses comments, but huh + return EmptyNode() + if isinstance(subnode, InlineNode): + if subnode.cmdname == 'optional': + # this is not mapped to ReST markup + return subnode + if len(subnode.args) == 1: + if warn == 1 and subnode.cmdname not in self.no_warn_textonly: + self.warnings.append('%r: Discarding %s markup in %r' % + (cmd, subnode.cmdname, node)) + return do(subnode.args[0]) + elif len(subnode.args) == 0: + # should only happen for IndexNodes which stay in + return subnode + elif len(subnode.args) == 2 and subnode.cmdname == 'refmodule': + if not warn: + return do(subnode.args[1]) + raise WriterError('get_textonly_node() failed for %r' % subnode) + return do(node) + + def get_node_text(self, node, wrap=False, width=None): + """ Write the node to a temporary paragraph and return the result + as a string. """ + with self.noflush: + self._old_curpar = self.curpar + self.curpar = [] + self.visit_node(node) + ret = self.get_par(wrap, width=width) + self.curpar = self._old_curpar + return ret + + def flush_par(self, nocb=False, nocomments=False): + """ Write the current paragraph to the output file. + Prepend index entries, append comments and footnotes. """ + if self.no_flushing: + raise WriterError('called flush_par() while noflush active') + if self.indexentries: + for line in self.get_indexentries(self.indexentries): + self.write(line) + self.write() + self.indexentries = [] + if self.flush_cb and not nocb: + self.flush_cb() + self.flush_cb = None + par = self.get_par(wrap=True) + if par: + for i, line in enumerate(par): + self.write(line, first=(i==0)) + self.write() + if self.comments and not nocomments: + for comment in self.comments: + self.write('.. % ' + comment) + self.write() + self.comments = [] + + def visit_wrapped(self, pre, node, post, noescape=False): + """ Write a node within a paragraph, wrapped with pre and post strings. """ + if noescape: + self.noescape += 1 + self.curpar.append(pre) + with self.noflush: + self.visit_node(node) + self.curpar.append(post) + if noescape: + self.noescape -= 1 + + def visit_node(self, node): + """ "Write" a node (appends to curpar or writes something). """ + visitfunc = getattr(self, 'visit_' + node.__class__.__name__, None) + if not visitfunc: + raise WriterError('no visit function for %s node' % node.__class__) + visitfunc(node) + + # ------------------------- node handlers ----------------------------- + + def visit_RootNode(self, node): + if node.params.get('title'): + title = self.get_node_text(node.params['title']) + hl = len(title) + self.write('*' * (hl+4)) + self.write(' %s ' % title) + self.write('*' * (hl+4)) + self.write() + + if node.params.get('author'): + self.write(':Author: %s%s' % + (self.get_node_text(node.params['author']), + (' <%s>' % self.get_node_text(node.params['authoremail']) + if 'authoremail' in node.params else ''))) + self.write() + + if node.params.get('date'): + self.write(':Date: %s' % self.get_node_text(node.params['date'])) + self.write() + + if node.params.get('release'): + self.write('.. |release| replace:: %s' % + self.get_node_text(node.params['release'])) + self.write() + + self.visit_NodeList(node.children) + + def visit_NodeList(self, nodelist): + for node in nodelist: + self.visit_node(node) + + def visit_CommentNode(self, node): + # no inline comments -> they are all output at the start of a new paragraph + self.comments.append(node.comment.strip()) + + sectchars = { + 'chapter': '*', + 'chapter*': '*', + 'section': '=', + 'subsection': '-', + 'subsubsection': '^', + 'paragraph': '"', + } + + sectdoubleline = [ + 'chapter', + 'chapter*', + ] + + def visit_SectioningNode(self, node): + self.flush_par() + self.sectionlabel = '' + self.thisclass = '' + self.write() + + if self.splitchap and node.cmdname.startswith('chapter'): + self.write_footnotes() + self.new_chapter() + + heading = self.get_node_text(node.args[0]).strip() + if self.sectionlabel: + self.write('.. _%s:\n' % self.sectionlabel) + hl = len(heading) + if node.cmdname in self.sectdoubleline: + self.write(self.sectchars[node.cmdname] * hl) + self.write(heading) + self.write(self.sectchars[node.cmdname] * hl) + self.write() + + def visit_EnvironmentNode(self, node): + self.flush_par() + envname = node.envname + if envname == 'notice': + type = text(node.args[0]) or 'note' + self.write_directive(type, '', node.content) + elif envname in ('seealso', 'seealso*'): + self.write_directive('seealso', '', node.content, spabove=True) + elif envname == 'abstract': + self.write_directive('topic', 'Abstract', node.content, spabove=True) + elif envname == 'quote': + with self.indented(): + self.visit_node(node.content) + self.write() + elif envname == 'quotation': + self.write_directive('epigraph', '', node.content, spabove=True) + else: + raise WriterError('no handler for %s environment' % envname) + + descmap = { + 'funcdesc': ('function', '0(1)'), + 'funcdescni': ('function', '0(1)'), + 'classdesc': ('class', '0(1)'), + 'classdesc*': ('class', '0'), + 'methoddesc': ('method', '0.1(2)'), + 'methoddescni': ('method', '0.1(2)'), + 'excdesc': ('exception', '0'), + 'excclassdesc': ('exception', '0(1)'), + 'datadesc': ('data', '0'), + 'datadescni': ('data', '0'), + 'memberdesc': ('attribute', '0.1'), + 'memberdescni': ('attribute', '0.1'), + 'opcodedesc': ('opcode', '0 (1)'), + + 'cfuncdesc': ('cfunction', '0 1(2)'), + 'cmemberdesc': ('cmember', '1 0.2'), + 'csimplemacrodesc': ('cmacro', '0'), + 'ctypedesc': ('ctype', '1'), + 'cvardesc': ('cvar', '0 1'), + } + + def _write_sig(self, spec, args): + # don't escape "*" in signatures + self.noescape += 1 + for c in spec: + if c.isdigit(): + self.visit_node(self.get_textonly_node(args[int(c)])) + else: + self.curpar.append(c) + self.noescape -= 1 + + def visit_DescEnvironmentNode(self, node): + envname = node.envname + if envname not in self.descmap: + raise WriterError('no handler for %s environment' % envname) + + self.flush_par() + # automatically fill in the class name if not given + if envname[:9] == 'classdesc' or envname[:12] == 'excclassdesc': + self.thisclass = text(node.args[0]) + elif envname[:10] in ('methoddesc', 'memberdesc') and not \ + text(node.args[0]): + if not self.thisclass: + raise WriterError('No current class for %s member' % + text(node.args[1])) + node.args[0] = TextNode(self.thisclass) + directivename, sigspec = self.descmap[envname] + self._write_sig(sigspec, node.args) + signature = self.get_par(wrap=False) + self.write() + self.write('.. %s:: %s' % (directivename, signature)) + if node.additional: + for cmdname, add in node.additional: + entry = self.descmap[cmdname.replace('line', 'desc')] + if envname[:10] in ('methoddesc', 'memberdesc') and not \ + text(add[0]): + if not self.thisclass: + raise WriterError('No current class for %s member' % + text(add[1])) + add[0] = TextNode(self.thisclass) + self._write_sig(entry[1], add) + signature = self.get_par(wrap=False) + self.write(' %s%s' % (' ' * (len(directivename) - 2), + signature)) + if envname.endswith('ni'): + self.write(' :noindex:') + self.write() + with self.indented(): + self.visit_node(node.content) + + + def visit_CommandNode(self, node): + cmdname = node.cmdname + if cmdname == 'label': + labelname = self.labelprefix + text(node.args[0]).lower() + if self.no_flushing: + # in section + self.sectionlabel = labelname + else: + self.flush_par() + self.write('.. _%s:\n' % labelname) + return + + elif cmdname in ('declaremodule', 'modulesynopsis', + 'moduleauthor', 'sectionauthor', 'platform'): + self.flush_par(nocb=True, nocomments=True) + if not self.sectionmeta: + self.sectionmeta = SectionMeta() + if cmdname == 'declaremodule': + self.sectionmeta.modname = text(node.args[2]) + elif cmdname == 'modulesynopsis': + self.sectionmeta.synopsis = self.get_node_text( + self.get_textonly_node(node.args[0], warn=0), wrap=True) + elif cmdname == 'moduleauthor': + email = text(node.args[1]) + self.sectionmeta.modauthors.append( + '%s%s' % (text(node.args[0]), (email and ' <%s>' % email))) + elif cmdname == 'sectionauthor': + email = text(node.args[1]) + self.sectionmeta.sectauthors.append( + '%s%s' % (text(node.args[0]), (email and ' <%s>' % email))) + elif cmdname == 'platform': + self.sectionmeta.platform = text(node.args[0]) + self.flush_cb = lambda: self.write_sectionmeta() + return + + self.flush_par() + if cmdname.startswith('see'): + i = 2 + if cmdname == 'seemodule': + self.write('Module :mod:`%s`' % text(node.args[1])) + elif cmdname == 'seelink': + linktext = self.get_node_text(node.args[1]) + self.write('`%s <%s>`_' % (linktext, text(node.args[0]))) + elif cmdname == 'seepep': + self.write(':pep:`%s` - %s' % (text(node.args[0]), + self.get_node_text(node.args[1]))) + elif cmdname == 'seerfc': + self.write(':rfc:`%s` - %s' % (text(node.args[0]), + text(node.args[1]))) + elif cmdname == 'seetitle': + if empty(node.args[0]): + self.write('%s' % text(node.args[1])) + else: + self.write('`%s <%s>`_' % (text(node.args[1]), + text(node.args[0]))) + elif cmdname == 'seeurl': + i = 1 + self.write('%s' % text(node.args[0])) + elif cmdname == 'seetext': + self.visit_node(node.args[0]) + return + with self.indented(): + self.visit_node(node.args[i]) + elif cmdname in ('versionchanged', 'versionadded'): + self.write('.. %s:: %s' % (cmdname, text(node.args[1]))) + if not empty(node.args[0]): + with self.indented(): + self.visit_node(node.args[0]) + self.curpar.append('.') + else: + self.write() + elif cmdname == 'deprecated': + self.write_directive('deprecated', text(node.args[0]), node.args[1], + spbelow=False) + elif cmdname == 'localmoduletable': + if self.toctree: + self.write_directive('toctree', '', spbelow=True, spabove=True) + with self.indented(): + for entry in self.toctree: + self.write(entry + '.rst') + else: + self.warnings.append('no toctree given, but \\localmoduletable in file') + elif cmdname == 'verbatiminput': + inclname = text(node.args[0]) + newname = includes_mapping.get(inclname, '../includes/' + inclname) + if newname is None: + self.write() + self.write('.. XXX includefile %s' % inclname) + return + self.write() + self.write('.. include:: %s' % newname) + self.write(' :literal:') + self.write() + elif cmdname == 'input': + inclname = text(node.args[0]) + newname = includes_mapping.get(inclname, None) + if newname is None: + self.write('X' 'XX: input{%s} :XX' 'X' % inclname) + return + self.write_directive('include', newname, spabove=True) + elif cmdname == 'centerline': + self.write_directive('centered', self.get_node_text(node.args[0]), + spabove=True, spbelow=True) + elif cmdname == 'XX' 'X': + self.visit_wrapped(r'**\*\*** ', node.args[0], ' **\*\***') + else: + raise WriterError('no handler for %s command' % cmdname) + + def visit_DescLineCommandNode(self, node): + # these have already been written as arguments of the corresponding + # DescEnvironmentNode + pass + + def visit_ParaSepNode(self, node): + self.flush_par() + + def visit_VerbatimNode(self, node): + if self.comments: + # these interfer with the literal block + self.flush_par() + if self.curpar: + last = self.curpar[-1].rstrip(' ') + if last.endswith(':'): + self.curpar[-1] = last + ':' + else: + self.curpar.append(' ::') + else: + self.curpar.append('::') + self.flush_par() + with self.indented(): + if isinstance(node.content, TextNode): + # verbatim + lines = textwrap.dedent(text(node.content).lstrip('\n')).split('\n') + if not lines: + return + else: + # alltt, possibly with inline formats + lines = self.get_node_text(self.get_textonly_node( + node.content, warn=0)).split('\n') + [''] + # discard leading blank links + while not lines[0].strip(): + del lines[0] + for line in lines: + self.write(line) + + note_re = re.compile('^\(\d\)$') + + def visit_TableNode(self, node): + self.flush_par() + lines = node.lines[:] + lines.insert(0, node.headings) + fmted_rows = [] + width = WIDTH - len(self.indentation) + realwidths = [0] * node.numcols + colwidth = (width / node.numcols) + 5 + # don't allow paragraphs in table cells for now + with self.noflush: + for line in lines: + cells = [] + for i, cell in enumerate(line): + par = self.get_node_text(cell, wrap=True, width=colwidth) + if len(par) == 1 and self.note_re.match(par[0].strip()): + # special case: escape "(1)" to avoid enumeration + par[0] = '\\' + par[0] + maxwidth = max(map(len, par)) if par else 0 + realwidths[i] = max(realwidths[i], maxwidth) + cells.append(par) + fmted_rows.append(cells) + + def writesep(char='-'): + out = ['+'] + for width in realwidths: + out.append(char * (width+2)) + out.append('+') + self.write(''.join(out)) + + def writerow(row): + lines = map(None, *row) + for line in lines: + out = ['|'] + for i, cell in enumerate(line): + if cell: + out.append(' ' + cell.ljust(realwidths[i]+1)) + else: + out.append(' ' * (realwidths[i] + 2)) + out.append('|') + self.write(''.join(out)) + + writesep('-') + writerow(fmted_rows[0]) + writesep('=') + for row in fmted_rows[1:]: + writerow(row) + writesep('-') + self.write() + + def visit_ItemizeNode(self, node): + self.flush_par() + for title, content in node.items: + if not empty(title): + # do it like in a description list + self.write(self.get_node_text(title)) + with self.indented(): + self.visit_node(content) + else: + self.curpar.append('* ') + with self.indented(2, firstline=False): + self.visit_node(content) + + def visit_EnumerateNode(self, node): + self.flush_par() + for title, content in node.items: + assert empty(title) + self.curpar.append('#. ') + with self.indented(3, firstline=False): + self.visit_node(content) + + def visit_DescriptionNode(self, node): + self.flush_par() + for title, content in node.items: + self.write(self.get_node_text(title)) + with self.indented(): + self.visit_node(content) + + visit_DefinitionsNode = visit_DescriptionNode + + def visit_ProductionListNode(self, node): + self.flush_par() + arg = text(node.arg) + self.write('.. productionlist::%s' % (' '+arg if arg else '')) + with self.indented(): + for item in node.items: + if not empty(item[0]): + lasttext = text(item[0]) + self.write('%s: %s' % ( + text(item[0]).ljust(len(lasttext)), + self.get_node_text(item[1]))) + self.write() + + def visit_EmptyNode(self, node): + pass + + def visit_TextNode(self, node): + if self.noescape: + self.curpar.append(node.text) + else: + self.curpar.append(fixup_text(node.text)) + + visit_NbspNode = visit_TextNode + visit_SimpleCmdNode = visit_TextNode + + def visit_BreakNode(self, node): + # XXX: linebreaks in ReST? + self.curpar.append(' --- ') + + def visit_IndexNode(self, node): + if node.cmdname == 'withsubitem': + self.indexsubitem = ' ' + text(node.indexargs[0]) + self.visit_node(node.indexargs[1]) + self.indexsubitem = '' + else: + self.indexentries.append((node.cmdname, node.indexargs, + self.indexsubitem)) + + # maps argumentless commands to text + simplecmd_mapping = { + 'NULL': '`NULL`', + 'shortversion': '|version|', + 'version': '|release|', + 'today': '|today|', + } + + # map LaTeX command names to roles: shorter names! + role_mapping = { + 'cfunction': 'cfunc', + 'constant': 'const', + 'csimplemacro': 'cmacro', + 'exception': 'exc', + 'function': 'func', + 'grammartoken': 'token', + 'member': 'attr', + 'method': 'meth', + 'module': 'mod', + 'programopt': 'option', + # these mean: no change + 'cdata': '', + 'class': '', + 'command': '', + 'ctype': '', + 'data': '', # NEW + 'dfn': '', + 'envvar': '', + 'file': '', + 'filenq': '', + 'filevar': '', + 'guilabel': '', + 'kbd': '', + 'keyword': '', + 'mailheader': '', + 'makevar': '', + 'menuselection': '', + 'mimetype': '', + 'newsgroup': '', + 'option': '', + 'pep': '', + 'program': '', + 'ref': '', + 'rfc': '', + } + + # do not warn about nested inline markup in these roles + role_no_warn = set(( + 'cdata', 'cfunction', 'class', 'constant', 'csimplemacro', 'ctype', + 'data', 'exception', 'function', 'member', 'method', 'module', + )) + + def visit_InlineNode(self, node): + # XXX: no nested markup -- docutils doesn't support it + cmdname = node.cmdname + if not node.args: + self.curpar.append(self.simplecmd_mapping[cmdname]) + return + content = node.args[0] + if cmdname in ('code', 'bfcode', 'samp', 'texttt', 'regexp'): + self.visit_wrapped('``', self.get_textonly_node(content, 'code', + warn=1), '``', noescape=True) + elif cmdname in ('emph', 'textit'): + self.visit_wrapped('*', self.get_textonly_node(content, 'emph', + warn=1), '*') + elif cmdname in ('strong', 'textbf'): + self.visit_wrapped('**', self.get_textonly_node(content, 'strong', + warn=1), '**') + elif cmdname in ('b', 'textrm', 'email'): + self.visit_node(content) + elif cmdname in ('var', 'token'): + # \token appears in productionlists only + self.visit_wrapped('`', self.get_textonly_node(content, 'var', + warn=1), '`') + elif cmdname == 'ref': + self.curpar.append(':ref:`%s%s`' % (self.labelprefix, + text(node.args[0]).lower())) + elif cmdname == 'refmodule': + self.visit_wrapped(':mod:`', node.args[1], '`', noescape=True) + elif cmdname == 'optional': + self.visit_wrapped('[', content, ']') + elif cmdname == 'url': + self.visit_node(content) + elif cmdname == 'ulink': + target = text(node.args[1]) + if target.startswith('..'): + self.visit_wrapped('', content, ' (X' + 'XX reference: %s)' % target) + elif not target.startswith(('http:', 'mailto:')): + #self.warnings.append('Local \\ulink to %s, use \\ref instead' % target) + self.visit_wrapped('', content, ' (X' 'XX reference: %s)' % target) + else: + self.visit_wrapped('`', self.get_textonly_node(content, 'ulink', warn=1), + ' <%s>`_' % target) + elif cmdname == 'citetitle': + target = text(content) + if not target: + self.visit_node(node.args[1]) + elif target.startswith('..'): + self.visit_wrapped('', node.args[1], + ' (X' + 'XX reference: %s)' % target) + else: + self.visit_wrapped('`', self.get_textonly_node(node.args[1], + 'citetitle', warn=1), + ' <%s>`_' % target) + elif cmdname == 'character': + # ``'a'`` is not longer than :character:`a` + self.visit_wrapped("``'", content, "'``", noescape=True) + elif cmdname == 'manpage': + self.curpar.append(':manpage:`') + self.visit_node(self.get_textonly_node(content, warn=0)) + self.visit_wrapped('(', self.get_textonly_node(node.args[1], warn=0), ')') + self.curpar.append('`') + elif cmdname == 'footnote': + self.curpar.append(' [#]_') + self.footnotes.append(content) + elif cmdname == 'frac': + self.visit_wrapped('(', node.args[0], ')/') + self.visit_wrapped('(', node.args[1], ')') + elif cmdname == 'longprogramopt': + self.visit_wrapped(':option:`--', content, '`') + elif cmdname == '': + self.visit_node(content) + # stray commands from distutils + elif cmdname in ('argument name', 'value', 'attribute', 'option name'): + self.visit_wrapped('`', content, '`') + else: + self.visit_wrapped(':%s:`' % (self.role_mapping[cmdname] or cmdname), + self.get_textonly_node( + content, cmdname, warn=(cmdname not in self.role_no_warn)), '`') diff --git a/converter/scanner.py b/converter/scanner.py new file mode 100644 index 000000000..53502d2ce --- /dev/null +++ b/converter/scanner.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +""" + scanner + ~~~~~~~ + + This library implements a regex based scanner. + + :copyright: 2006-2007 by Armin Ronacher, Georg Brandl. + :license: BSD license. +""" +import re + + +class EndOfText(RuntimeError): + """ + Raise if end of text is reached and the user + tried to call a match function. + """ + + +class Scanner(object): + """ + Simple scanner + + All method patterns are regular expression strings (not + compiled expressions!) + """ + + def __init__(self, text, flags=0): + """ + :param text: The text which should be scanned + :param flags: default regular expression flags + """ + self.data = text + self.data_length = len(text) + self.start_pos = 0 + self.pos = 0 + self.flags = flags + self.last = None + self.match = None + self._re_cache = {} + + def eos(self): + """`True` if the scanner reached the end of text.""" + return self.pos >= self.data_length + eos = property(eos, eos.__doc__) + + def check(self, pattern): + """ + Apply `pattern` on the current position and return + the match object. (Doesn't touch pos). Use this for + lookahead. + """ + if self.eos: + raise EndOfText() + if pattern not in self._re_cache: + self._re_cache[pattern] = re.compile(pattern, self.flags) + return self._re_cache[pattern].match(self.data, self.pos) + + def test(self, pattern): + """Apply a pattern on the current position and check + if it patches. Doesn't touch pos.""" + return self.check(pattern) is not None + + def scan(self, pattern): + """ + Scan the text for the given pattern and update pos/match + and related fields. The return value is a boolen that + indicates if the pattern matched. The matched value is + stored on the instance as ``match``, the last value is + stored as ``last``. ``start_pos`` is the position of the + pointer before the pattern was matched, ``pos`` is the + end position. + """ + if self.eos: + raise EndOfText() + if pattern not in self._re_cache: + self._re_cache[pattern] = re.compile(pattern, self.flags) + self.last = self.match + m = self._re_cache[pattern].match(self.data, self.pos) + if m is None: + return False + self.start_pos = m.start() + self.pos = m.end() + self.match = m + return True + + def get_char(self): + """Scan exactly one char.""" + self.scan('.') + + def __repr__(self): + return '<%s %d/%d>' % ( + self.__class__.__name__, + self.pos, + self.data_length + ) diff --git a/converter/tokenizer.py b/converter/tokenizer.py new file mode 100644 index 000000000..053ecc0c4 --- /dev/null +++ b/converter/tokenizer.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- +""" + Python documentation LaTeX file tokenizer + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + For more documentation, look into the ``restwriter.py`` file. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +import re + +from .scanner import Scanner + +class Tokenizer(Scanner): + """ Lex a Python doc LaTeX document. """ + + specials = { + '{': 'bgroup', + '}': 'egroup', + '[': 'boptional', + ']': 'eoptional', + '~': 'tilde', + '$': 'mathmode', + } + + @property + def mtext(self): + return self.match.group() + + def tokenize(self): + return TokenStream(self._tokenize()) + + def _tokenize(self): + lineno = 1 + while not self.eos: + if self.scan(r'\\verb([^a-zA-Z])(.*?)(\1)'): + # specialcase \verb here + yield lineno, 'command', 'verb', '\\verb' + yield lineno, 'text', self.match.group(1), self.match.group(1) + yield lineno, 'text', self.match.group(2), self.match.group(2) + yield lineno, 'text', self.match.group(3), self.match.group(3) + elif self.scan(r'\\([a-zA-Z]+\*?)[ \t]*'): + yield lineno, 'command', self.match.group(1), self.mtext + elif self.scan(r'\\.'): + yield lineno, 'command', self.mtext[1], self.mtext + elif self.scan(r'\\\n'): + yield lineno, 'text', self.mtext, self.mtext + lineno += 1 + elif self.scan(r'%(.*)\n[ \t]*'): + yield lineno, 'comment', self.match.group(1), self.mtext + lineno += 1 + elif self.scan(r'[{}\[\]~$]'): + yield lineno, self.specials[self.mtext], self.mtext, self.mtext + elif self.scan(r'(\n[ \t]*){2,}'): + lines = self.mtext.count('\n') + yield lineno, 'parasep', '\n' * lines, self.mtext + lineno += lines + elif self.scan(r'\n[ \t]*'): + yield lineno, 'text', ' ', self.mtext + lineno += 1 + elif self.scan(r'[^\\%}{\[\]~\n]+'): + yield lineno, 'text', self.mtext, self.mtext + else: + raise RuntimeError('unexpected text on line %d: %r' % + (lineno, self.data[self.pos:self.pos+100])) + + +class TokenStream(object): + """ + A token stream works like a normal generator just that + it supports peeking and pushing tokens back to the stream. + """ + + def __init__(self, generator): + self._generator = generator + self._pushed = [] + self.last = (1, 'initial', '') + + def __iter__(self): + return self + + def __nonzero__(self): + """ Are we at the end of the tokenstream? """ + if self._pushed: + return True + try: + self.push(self.next()) + except StopIteration: + return False + return True + + def pop(self): + """ Return the next token from the stream. """ + if self._pushed: + rv = self._pushed.pop() + else: + rv = self._generator.next() + self.last = rv + return rv + + next = pop + + def popmany(self, num=1): + """ Pop a list of tokens. """ + return [self.next() for i in range(num)] + + def peek(self): + """ Pop and push a token, return it. """ + token = self.next() + self.push(token) + return token + + def peekmany(self, num=1): + """ Pop and push a list of tokens. """ + tokens = self.popmany(num) + for tok in tokens: + self.push(tok) + return tokens + + def push(self, item): + """ Push a token back to the stream. """ + self._pushed.append(item) diff --git a/converter/util.py b/converter/util.py new file mode 100644 index 000000000..fe3360997 --- /dev/null +++ b/converter/util.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +""" + Python documentation conversion utils + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +import re + +from docutils.nodes import make_id + +from .docnodes import TextNode, EmptyNode, NodeList + + +def umlaut(cmd, c): + try: + if cmd == '"': + return {'o': u'ö', + 'a': u'ä', + 'u': u'ü', + 'i': u'ï', + 'O': u'Ö', + 'A': u'Ä', + 'U': u'Ü'}[c] + elif cmd == "'": + return {'a': u'á', + 'e': u'é'}[c] + elif cmd == '~': + return {'n': u'ñ'}[c] + elif cmd == 'c': + return {'c': u'ç'}[c] + elif cmd == '`': + return {'o': u'ò'}[c] + else: + from .latexparser import ParserError + raise ParserError('invalid umlaut \\%s' % cmd, 0) + except KeyError: + from .latexparser import ParserError + raise ParserError('unsupported umlaut \\%s%s' % (cmd, c), 0) + +def fixup_text(text): + return text.replace('``', '"').replace("''", '"').replace('`', "'").\ + replace('|', '\\|').replace('*', '\\*') + +def empty(node): + return (type(node) is EmptyNode) + +def text(node): + """ Return the text for a TextNode or raise an error. """ + if isinstance(node, TextNode): + return node.text + elif isinstance(node, NodeList): + restext = '' + for subnode in node: + restext += text(subnode) + return restext + from .restwriter import WriterError + raise WriterError('text() failed for %r' % node) + +markup_re = re.compile(r'(:[a-zA-Z0-9_-]+:)?`(.*?)`') + +def my_make_id(name): + """ Like make_id(), but strip roles first. """ + return make_id(markup_re.sub(r'\2', name)) + +alphanum = u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' +wordchars_s = alphanum + u'_.-' +wordchars_e = alphanum + u'+`(-' +bad_markup_re = re.compile(r'(:[a-zA-Z0-9_-]+:)?(`{1,2})[ ]*(.+?)[ ]*(\2)') +quoted_code_re = re.compile(r'\\`(``.+?``)\'') + +def repair_bad_inline_markup(text): + # remove quoting from `\code{x}' + xtext = quoted_code_re.sub(r'\1', text) + + # special: the literal backslash + xtext = xtext.replace('``\\``', '\x03') + # special: literal backquotes + xtext = xtext.replace('``````', '\x02') + + ntext = [] + lasti = 0 + l = len(xtext) + for m in bad_markup_re.finditer(xtext): + ntext.append(xtext[lasti:m.start()]) + s, e = m.start(), m.end() + if s != 0 and xtext[s-1:s] in wordchars_s: + ntext.append('\\ ') + ntext.append((m.group(1) or '') + m.group(2) + m.group(3) + m.group(4)) + if e != l and xtext[e:e+1] in wordchars_e: + ntext.append('\\ ') + lasti = m.end() + ntext.append(xtext[lasti:]) + return ''.join(ntext).replace('\x02', '``````').replace('\x03', '``\\``') diff --git a/etc/inst.diff b/etc/inst.diff new file mode 100644 index 000000000..30f414048 --- /dev/null +++ b/etc/inst.diff @@ -0,0 +1,122 @@ +Index: inst/inst.tex +=================================================================== +--- inst/inst.tex (Revision 54633) ++++ inst/inst.tex (Arbeitskopie) +@@ -324,32 +324,6 @@ + section~\ref{custom-install} on custom installations. + + +-% This rather nasty macro is used to generate the tables that describe +-% each installation scheme. It's nasty because it takes two arguments +-% for each "slot" in an installation scheme, there will soon be more +-% than five of these slots, and TeX has a limit of 10 arguments to a +-% macro. Uh-oh. +- +-\newcommand{\installscheme}[8] +- {\begin{tableiii}{l|l|l}{textrm} +- {Type of file} +- {Installation Directory} +- {Override option} +- \lineiii{pure module distribution} +- {\filevar{#1}\filenq{#2}} +- {\longprogramopt{install-purelib}} +- \lineiii{non-pure module distribution} +- {\filevar{#3}\filenq{#4}} +- {\longprogramopt{install-platlib}} +- \lineiii{scripts} +- {\filevar{#5}\filenq{#6}} +- {\longprogramopt{install-scripts}} +- \lineiii{data} +- {\filevar{#7}\filenq{#8}} +- {\longprogramopt{install-data}} +- \end{tableiii}} +- +- + \section{Alternate Installation} + \label{alt-install} + +@@ -399,10 +373,23 @@ + The \longprogramopt{home} option defines the installation base + directory. Files are installed to the following directories under the + installation base as follows: +-\installscheme{home}{/lib/python} +- {home}{/lib/python} +- {home}{/bin} +- {home}{/share} ++\begin{tableiii}{l|l|l}{textrm} ++ {Type of file} ++ {Installation Directory} ++ {Override option} ++ \lineiii{pure module distribution} ++ {\filevar{home}\filenq{/lib/python}} ++ {\longprogramopt{install-purelib}} ++ \lineiii{non-pure module distribution} ++ {\filevar{home}\filenq{/lib/python}} ++ {\longprogramopt{install-platlib}} ++ \lineiii{scripts} ++ {\filevar{home}\filenq{/bin}} ++ {\longprogramopt{install-scripts}} ++ \lineiii{data} ++ {\filevar{home}\filenq{/share}} ++ {\longprogramopt{install-data}} ++\end{tableiii} + + + \versionchanged[The \longprogramopt{home} option used to be supported +@@ -452,10 +439,23 @@ + etc.) If \longprogramopt{exec-prefix} is not supplied, it defaults to + \longprogramopt{prefix}. Files are installed as follows: + +-\installscheme{prefix}{/lib/python2.\filevar{X}/site-packages} +- {exec-prefix}{/lib/python2.\filevar{X}/site-packages} +- {prefix}{/bin} +- {prefix}{/share} ++\begin{tableiii}{l|l|l}{textrm} ++ {Type of file} ++ {Installation Directory} ++ {Override option} ++ \lineiii{pure module distribution} ++ {\filevar{prefix}\filenq{/lib/python2.\filevar{X}/site-packages}} ++ {\longprogramopt{install-purelib}} ++ \lineiii{non-pure module distribution} ++ {\filevar{exec-prefix}\filenq{/lib/python2.\filevar{X}/site-packages}} ++ {\longprogramopt{install-platlib}} ++ \lineiii{scripts} ++ {\filevar{prefix}\filenq{/bin}} ++ {\longprogramopt{install-scripts}} ++ \lineiii{data} ++ {\filevar{prefix}\filenq{/share}} ++ {\longprogramopt{install-data}} ++\end{tableiii} + + There is no requirement that \longprogramopt{prefix} or + \longprogramopt{exec-prefix} actually point to an alternate Python +@@ -502,11 +502,24 @@ + The installation base is defined by the \longprogramopt{prefix} option; + the \longprogramopt{exec-prefix} option is not supported under Windows. + Files are installed as follows: +-\installscheme{prefix}{} +- {prefix}{} +- {prefix}{\textbackslash{}Scripts} +- {prefix}{\textbackslash{}Data} + ++\begin{tableiii}{l|l|l}{textrm} ++ {Type of file} ++ {Installation Directory} ++ {Override option} ++ \lineiii{pure module distribution} ++ {\filevar{prefix}\filenq{}} ++ {\longprogramopt{install-purelib}} ++ \lineiii{non-pure module distribution} ++ {\filevar{prefix}\filenq{}} ++ {\longprogramopt{install-platlib}} ++ \lineiii{scripts} ++ {\filevar{prefix}\filenq{\textbackslash{}Scripts}} ++ {\longprogramopt{install-scripts}} ++ \lineiii{data} ++ {\filevar{prefix}\filenq{\textbackslash{}Data}} ++ {\longprogramopt{install-data}} ++\end{tableiii} + + + \section{Custom Installation} diff --git a/sphinx-build.py b/sphinx-build.py new file mode 100644 index 000000000..ab356f6f2 --- /dev/null +++ b/sphinx-build.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +""" + Sphinx - Python documentation toolchain + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +import sys + +if __name__ == '__main__': + from sphinx import main + try: + sys.exit(main(sys.argv)) + except Exception: + import traceback + traceback.print_exc() + import pdb + pdb.post_mortem(sys.exc_traceback) diff --git a/sphinx-web.py b/sphinx-web.py new file mode 100644 index 000000000..80820a1ce --- /dev/null +++ b/sphinx-web.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +""" + Sphinx - Python documentation webserver + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + :copyright: 2007 by Armin Ronacher, Georg Brandl. + :license: Python license. +""" +import os +import sys +import getopt + +import sphinx +from sphinx.web.application import setup_app +from sphinx.web.serve import run_simple + +try: + from werkzeug.debug import DebuggedApplication +except ImportError: + DebuggedApplication = lambda x, y: x + + +def main(argv): + opts, args = getopt.getopt(argv[1:], "dhf:") + opts = dict(opts) + if len(args) != 1 or '-h' in opts: + print 'usage: %s [-d] [-f cfg.py] ' % argv[0] + print ' -d: debug mode, use werkzeug debugger if installed' + print ' -f: use "cfg.py" file instead of doc_root/webconf.py' + return 2 + + conffile = opts.get('-f', os.path.join(args[0], 'webconf.py')) + config = {} + execfile(conffile, config) + + port = config.get('listen_port', 3000) + hostname = config.get('listen_addr', 'localhost') + debug = ('-d' in opts) or (hostname == 'localhost') + + config['data_root_path'] = args[0] + config['debug'] = debug + + def make_app(): + app = setup_app(config, check_superuser=True) + if debug: + app = DebuggedApplication(app, True) + return app + + if os.environ.get('RUN_MAIN') != 'true': + print '* Sphinx %s- Python documentation web application' % \ + sphinx.__version__.replace('$', '').replace('Revision:', 'rev.') + if debug: + print '* Running in debug mode' + + run_simple(hostname, port, make_app, use_reloader=debug) + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/sphinx/__init__.py b/sphinx/__init__.py new file mode 100644 index 000000000..96923a05c --- /dev/null +++ b/sphinx/__init__.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +""" + Sphinx + ~~~~~~ + + The Python documentation toolchain. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +import sys +import getopt +from os import path + +from .builder import builders +from .console import nocolor + +__version__ = '$Revision: 5369 $' + + +def usage(argv, msg=None): + if msg: + print >>sys.stderr, msg + print >>sys.stderr + print >>sys.stderr, """\ +usage: %s [options] sourcedir outdir [filenames...]" +options: -b -- builder to use (one of %s) + -a -- write all files; default is to only write new and changed files + -O -- give option to to the builder (-O help for list) + -D -- override a setting in sourcedir/conf.py + -N -- do not do colored output +modi: +* without -a and without filenames, write new and changed files. +* with -a, write all files. +* with filenames, write these.""" % (argv[0], ', '.join(builders)) + + +def main(argv): + try: + opts, args = getopt.getopt(argv[1:], 'ab:O:D:N') + srcdirname = path.abspath(args[0]) + if not path.isdir(srcdirname): + print >>sys.stderr, 'Error: Cannot find source directory.' + return 1 + if not path.isfile(path.join(srcdirname, 'conf.py')): + print >>sys.stderr, 'Error: Source directory doesn\'t contain conf.py file.' + return 1 + outdirname = path.abspath(args[1]) + if not path.isdir(outdirname): + print >>sys.stderr, 'Error: Cannot find output directory.' + return 1 + except (IndexError, getopt.error): + usage(argv) + return 1 + + filenames = args[2:] + err = 0 + for filename in filenames: + if not path.isfile(filename): + print >>sys.stderr, 'Cannot find file %r.' % filename + err = 1 + if err: + return 1 + + builder = all_files = None + opt_help = False + options = {} + confoverrides = {} + for opt, val in opts: + if opt == '-b': + if val not in builders: + usage(argv, 'Invalid builder value specified.') + return 1 + builder = val + elif opt == '-a': + if filenames: + usage(argv, 'Cannot combine -a option and filenames.') + return 1 + all_files = True + elif opt == '-O': + if val == 'help': + opt_help = True + continue + if '=' in val: + key, val = val.split('=') + try: + val = int(val) + except: pass + else: + key, val = val, True + options[key] = val + elif opt == '-D': + key, val = val.split('=') + try: + val = int(val) + except: pass + confoverrides[key] = val + elif opt == '-N': + nocolor() + + if builder is None: + print 'No builder selected, using default: html' + builder = 'html' + + builderobj = builders[builder] + + if opt_help: + print 'Options recognized by the %s builder:' % builder + for optname, description in builderobj.option_spec.iteritems(): + print ' * %s: %s' % (optname, description) + return 0 + + builderobj = builderobj(srcdirname, outdirname, options, + status_stream=sys.stdout, + warning_stream=sys.stderr, + confoverrides=confoverrides) + if all_files: + builderobj.build_all() + elif filenames: + builderobj.build_specific(filenames) + else: + builderobj.build_update() + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/sphinx/_jinja.py b/sphinx/_jinja.py new file mode 100644 index 000000000..3a61801f2 --- /dev/null +++ b/sphinx/_jinja.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +""" + sphinx._jinja + ~~~~~~~~~~~~~ + + Jinja glue. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" +from __future__ import absolute_import + +import sys +from os import path + +sys.path.insert(0, path.dirname(__file__)) + +from jinja import Environment, FileSystemLoader diff --git a/sphinx/addnodes.py b/sphinx/addnodes.py new file mode 100644 index 000000000..bdb13091a --- /dev/null +++ b/sphinx/addnodes.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +""" + sphinx.addnodes + ~~~~~~~~~~~~~~~ + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +from docutils import nodes + +# index markup +class index(nodes.Invisible, nodes.Inline, nodes.TextElement): pass + +# description units (classdesc, funcdesc etc.) +class desc(nodes.Admonition, nodes.Element): pass +class desc_content(nodes.General, nodes.Element): pass +class desc_signature(nodes.Part, nodes.Inline, nodes.TextElement): pass +class desc_classname(nodes.Part, nodes.Inline, nodes.TextElement): pass +class desc_name(nodes.Part, nodes.Inline, nodes.TextElement): pass +class desc_parameterlist(nodes.Part, nodes.Inline, nodes.TextElement): pass +class desc_parameter(nodes.Part, nodes.Inline, nodes.TextElement): pass +class desc_optional(nodes.Part, nodes.Inline, nodes.TextElement): pass + +# refcount annotation +class refcount(nodes.emphasis): pass + +# \versionadded, \versionchanged, \deprecated +class versionmodified(nodes.Admonition, nodes.TextElement): pass + +# seealso +class seealso(nodes.Admonition, nodes.Element): pass + +# productionlist +class productionlist(nodes.Admonition, nodes.Element): pass +class production(nodes.Part, nodes.Inline, nodes.TextElement): pass + +# toc tree +class toctree(nodes.General, nodes.Element): pass + +# centered +class centered(nodes.Part, nodes.Element): pass + +# pending xref +class pending_xref(nodes.Element): pass + +# compact paragraph -- never makes a

+class compact_paragraph(nodes.paragraph): pass + +# sets the highlighting language for literal blocks +class highlightlang(nodes.Element): pass + +# make them known to docutils. this is needed, because the HTMl writer +# will choke at some point if these are not added +nodes._add_node_class_names("""index desc desc_content desc_signature + desc_classname desc_name desc_parameterlist desc_parameter desc_optional + centered versionmodified seealso productionlist production toctree + pending_xref compact_paragraph highlightlang""".split()) diff --git a/sphinx/builder.py b/sphinx/builder.py new file mode 100644 index 000000000..a482a6013 --- /dev/null +++ b/sphinx/builder.py @@ -0,0 +1,608 @@ +# -*- coding: utf-8 -*- +""" + sphinx.builder + ~~~~~~~~~~~~~~ + + Builder classes for different output formats. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" +from __future__ import with_statement + +import os +import sys +import time +import types +import codecs +import shutil +import cPickle as pickle +import cStringIO as StringIO +from os import path + +from docutils.io import StringOutput, DocTreeInput +from docutils.core import publish_parts +from docutils.utils import new_document +from docutils.readers import doctree +from docutils.frontend import OptionParser + +from .util import (get_matching_files, attrdict, status_iterator, + ensuredir, get_category, relative_uri) +from .writer import HTMLWriter +from .console import bold, purple, green +from .htmlhelp import build_hhx +from .environment import BuildEnvironment +from .highlighting import pygments, get_stylesheet + +# side effect: registers roles and directives +from . import roles +from . import directives + +ENV_PICKLE_FILENAME = 'environment.pickle' +LAST_BUILD_FILENAME = 'last_build' + +# Helper objects + +class relpath_to(object): + def __init__(self, builder, filename): + self.baseuri = builder.get_target_uri(filename) + self.builder = builder + def __call__(self, otheruri, resource=False): + if not resource: + otheruri = self.builder.get_target_uri(otheruri) + return relative_uri(self.baseuri, otheruri) + + +class collect_env_warnings(object): + def __init__(self, builder): + self.builder = builder + def __enter__(self): + self.stream = StringIO.StringIO() + self.builder.env.set_warning_stream(self.stream) + def __exit__(self, *args): + self.builder.env.set_warning_stream(self.builder.warning_stream) + warnings = self.stream.getvalue() + if warnings: + print >>self.builder.warning_stream, warnings + + +class Builder(object): + """ + Builds target formats from the reST sources. + """ + + option_spec = { + 'freshenv': 'Don\'t use a pickled environment', + } + + def __init__(self, srcdirname, outdirname, options, env=None, + status_stream=None, warning_stream=None, + confoverrides=None): + self.srcdir = srcdirname + self.outdir = outdirname + if not path.isdir(path.join(outdirname, '.doctrees')): + os.mkdir(path.join(outdirname, '.doctrees')) + + self.options = attrdict(options) + self.validate_options() + + # probably set in load_env() + self.env = env + + self.config = {} + execfile(path.join(srcdirname, 'conf.py'), self.config) + # remove potentially pickling-problematic values + del self.config['__builtins__'] + for key, val in self.config.items(): + if isinstance(val, types.ModuleType): + del self.config[key] + if confoverrides: + self.config.update(confoverrides) + + self.status_stream = status_stream or sys.stdout + self.warning_stream = warning_stream or sys.stderr + + self.init() + + # helper methods + + def validate_options(self): + for option in self.options: + if option not in self.option_spec: + raise ValueError('Got unexpected option %s' % option) + for option in self.option_spec: + if option not in self.options: + self.options[option] = False + + def msg(self, message='', nonl=False, nobold=False): + if not nobold: message = bold(message) + if nonl: + print >>self.status_stream, message, + else: + print >>self.status_stream, message + self.status_stream.flush() + + def init(self): + """Load necessary templates and perform initialization.""" + raise NotImplementedError + + def get_target_uri(self, source_filename): + """Return the target URI for a source filename.""" + raise NotImplementedError + + def get_relative_uri(self, from_, to): + """Return a relative URI between two source filenames.""" + return relative_uri(self.get_target_uri(from_), + self.get_target_uri(to)) + + def get_outdated_files(self): + """Return a list of output files that are outdated.""" + raise NotImplementedError + + # build methods + + def load_env(self): + """Set up the build environment. Return True if a pickled file could be + successfully loaded, False if a new environment had to be created.""" + if self.env: + return + if not self.options.freshenv: + try: + self.msg('trying to load pickled env...', nonl=True) + self.env = BuildEnvironment.frompickle( + path.join(self.outdir, ENV_PICKLE_FILENAME)) + self.msg('done', nobold=True) + except Exception, err: + self.msg('failed: %s' % err, nobold=True) + self.env = BuildEnvironment(self.srcdir, + path.join(self.outdir, '.doctrees')) + else: + self.env = BuildEnvironment(self.srcdir, + path.join(self.outdir, '.doctrees')) + + def build_all(self): + """Build all source files.""" + self.load_env() + self.build(None, summary='all source files') + + def build_specific(self, source_filenames): + """Only rebuild as much as needed for changes in the source_filenames.""" + # bring the filenames to the canonical format, that is, + # relative to the source directory. + dirlen = len(self.srcdir) + 1 + to_write = [path.abspath(filename)[dirlen:] for filename in source_filenames] + self.load_env() + self.build(to_write, + summary='%d source files given on command line' % len(to_write)) + + def build_update(self): + """Only rebuild files changed or added since last build.""" + self.load_env() + to_build = list(self.get_outdated_files()) + if not to_build: + self.msg('no files are out of date, exiting.') + return + self.build(to_build, + summary='%d source files that are out of date' % len(to_build)) + + def build(self, filenames, summary=None): + if summary: + self.msg('building [%s]:' % self.name, nonl=1) + self.msg(summary, nobold=1) + + # while reading, collect all warnings from docutils + with collect_env_warnings(self): + self.msg('reading, updating environment:', nonl=1) + iterator = self.env.update(self.config) + self.msg(iterator.next(), nobold=1) + for filename in iterator: + self.msg(purple(filename), nonl=1, nobold=1) + self.msg() + + # save the environment + self.msg('pickling the env...', nonl=True) + self.env.topickle(path.join(self.outdir, ENV_PICKLE_FILENAME)) + self.msg('done', nobold=True) + + # global actions + self.msg('checking consistency...') + self.env.check_consistency() + self.msg('creating index...') + self.env.create_index(self) + + self.prepare_writing() + + if filenames: + # add all TOC files that may have changed + filenames_set = set(filenames) + for filename in filenames: + for tocfilename in self.env.files_to_rebuild.get(filename, []): + filenames_set.add(tocfilename) + filenames_set.add('contents.rst') + else: + # build all + filenames_set = set(self.env.all_files) + + # write target files + with collect_env_warnings(self): + self.msg('writing output...') + for filename in status_iterator(sorted(filenames_set), green, + stream=self.status_stream): + doctree = self.env.get_and_resolve_doctree(filename, self) + self.write_file(filename, doctree) + + # finish (write style files etc.) + self.msg('finishing...') + self.finish() + self.msg('done!') + + def prepare_writing(self): + raise NotImplementedError + + def write_file(self, filename, doctree): + raise NotImplementedError + + def finish(self): + raise NotImplementedError + + +class StandaloneHTMLBuilder(Builder): + """ + Builds standalone HTML docs. + """ + name = 'html' + + option_spec = Builder.option_spec + option_spec.update({ + 'nostyle': 'Don\'t copy style and script files', + 'nosearchindex': 'Don\'t create a JSON search index for offline search', + }) + + copysource = True + + def init(self): + """Load templates.""" + # lazily import this, maybe other builders won't need it + from ._jinja import Environment, FileSystemLoader + + # load templates + self.templates = {} + templates_path = path.join(path.dirname(__file__), 'templates') + jinja_env = Environment(loader=FileSystemLoader(templates_path), + # disable traceback, more likely that something in the + # application is broken than in the templates + friendly_traceback=False) + for fname in os.listdir(templates_path): + if fname.endswith('.html'): + self.templates[fname[:-5]] = jinja_env.get_template(fname) + + def render_partial(self, node): + """Utility: Render a lone doctree node.""" + doc = new_document('foo') + doc.append(node) + return publish_parts( + doc, + source_class=DocTreeInput, + reader=doctree.Reader(), + writer=HTMLWriter(self.config), + settings_overrides={'output_encoding': 'unicode'} + ) + + def prepare_writing(self): + if not self.options.nosearchindex: + from .search import IndexBuilder + self.indexer = IndexBuilder() + else: + self.indexer = None + self.docwriter = HTMLWriter(self.config) + self.docsettings = OptionParser( + defaults=self.env.settings, + components=(self.docwriter,)).get_default_values() + + # format the "last updated on" string, only once is enough since it + # typically doesn't include the time of day + lufmt = self.config.get('last_updated_format') + if lufmt: + self.last_updated = time.strftime(lufmt) + else: + self.last_updated = None + + self.globalcontext = dict( + last_updated = self.last_updated, + builder = self.name, + release = self.config['release'], + parents = [], + len = len, + titles = {}, + ) + + def write_file(self, filename, doctree): + destination = StringOutput(encoding='utf-8') + doctree.settings = self.docsettings + + output = self.docwriter.write(doctree, destination) + self.docwriter.assemble_parts() + + prev = next = None + parents = [] + related = self.env.toctree_relations.get(filename) + if related: + prev = {'link': self.get_relative_uri(filename, related[1]), + 'title': self.render_partial(self.env.titles[related[1]])['title']} + next = {'link': self.get_relative_uri(filename, related[2]), + 'title': self.render_partial(self.env.titles[related[2]])['title']} + while related: + parents.append( + {'link': self.get_relative_uri(filename, related[0]), + 'title': self.render_partial(self.env.titles[related[0]])['title']}) + related = self.env.toctree_relations.get(related[0]) + if parents: + parents.pop() # remove link to "contents.rst"; we have a generic + # "back to index" link already + parents.reverse() + + title = self.env.titles.get(filename) + if title: + title = self.render_partial(title)['title'] + else: + title = '' + self.globalcontext['titles'][filename] = title + sourcename = filename[:-4] + '.txt' + context = dict( + title = title, + sourcename = sourcename, + pathto = relpath_to(self, self.get_target_uri(filename)), + body = self.docwriter.parts['fragment'], + toc = self.render_partial(self.env.get_toc_for(filename))['fragment'], + # only display a TOC if there's more than one item to show + display_toc = (self.env.toc_num_entries[filename] > 1), + parents = parents, + prev = prev, + next = next, + ) + + self.index_file(filename, doctree, title) + self.handle_file(filename, context) + + def finish(self): + self.msg('writing additional files...') + + # the global general index + + # the total count of lines for each index letter, used to distribute + # the entries into two columns + indexcounts = [] + for key, entries in self.env.index: + indexcounts.append(sum(1 + len(subitems) for _, (_, subitems) in entries)) + + genindexcontext = dict( + genindexentries = self.env.index, + genindexcounts = indexcounts, + current_page_name = 'genindex', + pathto = relpath_to(self, self.get_target_uri('genindex.rst')), + ) + self.handle_file('genindex.rst', genindexcontext, 'genindex') + + # the global module index + + # the sorted list of all modules, for the global module index + modules = sorted(((mn, (self.get_relative_uri('modindex.rst', fn) + + '#module-' + mn, sy, pl)) + for (mn, (fn, sy, pl)) in self.env.modules.iteritems()), + key=lambda x: x[0].lower()) + # collect all platforms + platforms = set() + # sort out collapsable modules + modindexentries = [] + pmn = '' + cg = 0 # collapse group + fl = '' # first letter + for mn, (fn, sy, pl) in modules: + pl = pl.split(', ') if pl else [] + platforms.update(pl) + if fl != mn[0].lower() and mn[0] != '_': + modindexentries.append(['', False, 0, False, mn[0].upper(), '', []]) + tn = mn.partition('.')[0] + if tn != mn: + # submodule + if pmn == tn: + # first submodule - make parent collapsable + modindexentries[-1][1] = True + elif not pmn.startswith(tn): + # submodule without parent in list, add dummy entry + cg += 1 + modindexentries.append([tn, True, cg, False, '', '', []]) + else: + cg += 1 + modindexentries.append([mn, False, cg, (tn != mn), fn, sy, pl]) + pmn = mn + fl = mn[0].lower() + platforms = sorted(platforms) + + modindexcontext = dict( + modindexentries = modindexentries, + platforms = platforms, + current_page_name = 'modindex', + pathto = relpath_to(self, self.get_target_uri('modindex.rst')), + ) + self.handle_file('modindex.rst', modindexcontext, 'modindex') + + # the index page + indexcontext = dict( + pathto = relpath_to(self, self.get_target_uri('index.rst')), + current_page_name = 'index', + ) + self.handle_file('index.rst', indexcontext, 'index') + + # the search page + searchcontext = dict( + pathto = relpath_to(self, self.get_target_uri('search.rst')), + current_page_name = 'search', + ) + self.handle_file('search.rst', searchcontext, 'search') + + if not self.options.nostyle: + self.msg('copying style files...') + # copy style files + styledirname = path.join(path.dirname(__file__), 'style') + ensuredir(path.join(self.outdir, 'style')) + for filename in os.listdir(styledirname): + if not filename.startswith('.'): + shutil.copyfile(path.join(styledirname, filename), + path.join(self.outdir, 'style', filename)) + # add pygments style file + f = open(path.join(self.outdir, 'style', 'pygments.css'), 'w') + if pygments: + f.write(get_stylesheet()) + f.close() + + # dump the search index + self.handle_finish() + + # --------- these are overwritten by the Web builder + + def get_target_uri(self, source_filename): + return source_filename[:-4] + '.html' + + def get_outdated_files(self): + for filename in get_matching_files( + self.srcdir, '*.rst', exclude=set(self.config.get('unused_files', ()))): + try: + targetmtime = path.getmtime(path.join(self.outdir, + filename[:-4] + '.html')) + except: + targetmtime = 0 + if path.getmtime(path.join(self.srcdir, filename)) > targetmtime: + yield filename + + def index_file(self, filename, doctree, title): + # only index pages with title + if self.indexer is not None and title: + category = get_category(filename) + if category is not None: + self.indexer.feed(self.get_target_uri(filename)[:-5], # strip '.html' + category, title, doctree) + + def handle_file(self, filename, context, templatename='page'): + ctx = self.globalcontext.copy() + ctx.update(context) + output = self.templates[templatename].render(ctx) + outfilename = path.join(self.outdir, filename[:-4] + '.html') + ensuredir(path.dirname(outfilename)) # normally different from self.outdir + try: + with codecs.open(outfilename, 'w', 'utf-8') as fp: + fp.write(output) + except (IOError, OSError), err: + print >>self.warning_stream, "Error writing file %s: %s" % (outfilename, err) + if self.copysource and context.get('sourcename'): + # copy the source file for the "show source" link + shutil.copyfile(path.join(self.srcdir, filename), + path.join(self.outdir, context['sourcename'])) + + def handle_finish(self): + if self.indexer is not None: + self.msg('dumping search index...') + f = open(path.join(self.outdir, 'searchindex.json'), 'w') + self.indexer.dump(f, 'json') + f.close() + + +class WebHTMLBuilder(StandaloneHTMLBuilder): + """ + Builds HTML docs usable with the web-based doc server. + """ + name = 'web' + + # doesn't use the standalone specific options + option_spec = Builder.option_spec.copy() + option_spec.update({ + 'nostyle': 'Don\'t copy style and script files', + 'nosearchindex': 'Don\'t create a search index for the online search', + }) + + def init(self): + # Nothing to do here. + pass + + def get_outdated_files(self): + for filename in get_matching_files( + self.srcdir, '*.rst', exclude=set(self.config.get('unused_files', ()))): + try: + targetmtime = path.getmtime(path.join(self.outdir, + filename[:-4] + '.fpickle')) + except: + targetmtime = 0 + if path.getmtime(path.join(self.srcdir, filename)) > targetmtime: + yield filename + + def get_target_uri(self, source_filename): + if source_filename == 'index.rst': + return '' + if source_filename.endswith('/index.rst'): + return source_filename[:-9] # up to / + return source_filename[:-4] + '/' + + def index_file(self, filename, doctree, title): + # only index pages with title and category + if self.indexer is not None and title: + category = get_category(filename) + if category is not None: + self.indexer.feed(filename, category, title, doctree) + + def handle_file(self, filename, context, templatename='page'): + outfilename = path.join(self.outdir, filename[:-4] + '.fpickle') + ensuredir(path.dirname(outfilename)) + context.pop('pathto', None) # can't be pickled + with file(outfilename, 'wb') as fp: + pickle.dump(context, fp, 2) + + # if there is a source file, copy the source file for the "show source" link + if context.get('sourcename'): + source_name = path.join(self.outdir, 'sources', context['sourcename']) + ensuredir(path.dirname(source_name)) + shutil.copyfile(path.join(self.srcdir, filename), source_name) + + def handle_finish(self): + # dump the global context + outfilename = path.join(self.outdir, 'globalcontext.pickle') + with file(outfilename, 'wb') as fp: + pickle.dump(self.globalcontext, fp, 2) + + if self.indexer is not None: + self.msg('dumping search index...') + f = open(path.join(self.outdir, 'searchindex.pickle'), 'w') + self.indexer.dump(f, 'pickle') + f.close() + # touch 'last build' file, used by the web application to determine + # when to reload its environment and clear the cache + open(path.join(self.outdir, LAST_BUILD_FILENAME), 'w').close() + # copy configuration file if not present + if not path.isfile(path.join(self.outdir, 'webconf.py')): + shutil.copyfile(path.join(path.dirname(__file__), 'web', 'webconf.py'), + path.join(self.outdir, 'webconf.py')) + + +class HTMLHelpBuilder(StandaloneHTMLBuilder): + """ + Builder that also outputs Windows HTML help project, contents and index files. + Adapted from the original Doc/tools/prechm.py. + """ + name = 'htmlhelp' + + option_spec = Builder.option_spec.copy() + option_spec.update({ + 'outname': 'Output file base name (default "pydoc")' + }) + + # don't copy the reST source + copysource = False + + def handle_finish(self): + build_hhx(self, self.outdir, self.options.get('outname') or 'pydoc') + + +builders = { + 'html': StandaloneHTMLBuilder, + 'web': WebHTMLBuilder, + 'htmlhelp': HTMLHelpBuilder, +} diff --git a/sphinx/console.py b/sphinx/console.py new file mode 100644 index 000000000..2c6f15ee8 --- /dev/null +++ b/sphinx/console.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +""" + sphinx.console + ~~~~~~~~~~~~~~ + + Format colored console output. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +codes = {} + +def nocolor(): + codes.clear() + +def colorize(name, text): + return codes.get(name, '') + text + codes.get('reset', '') + +def create_color_func(name): + def inner(text): + return colorize(name, text) + globals()[name] = inner + +_attrs = { + 'reset': '39;49;00m', + 'bold': '01m', + 'faint': '02m', + 'standout': '03m', + 'underline': '04m', + 'blink': '05m', +} + +for name, value in _attrs.items(): + codes[name] = '\x1b[' + value + +_colors = [ + ('black', 'darkgray'), + ('darkred', 'red'), + ('darkgreen', 'green'), + ('brown', 'yellow'), + ('darkblue', 'blue'), + ('purple', 'fuchsia'), + ('turquoise', 'teal'), + ('lightgray', 'white'), +] + +for i, (dark, light) in enumerate(_colors): + codes[dark] = '\x1b[%im' % (i+30) + codes[light] = '\x1b[%i;01m' % (i+30) + +for name in codes: + create_color_func(name) diff --git a/sphinx/directives.py b/sphinx/directives.py new file mode 100644 index 000000000..53310033a --- /dev/null +++ b/sphinx/directives.py @@ -0,0 +1,519 @@ +# -*- coding: utf-8 -*- +""" + sphinx.directives + ~~~~~~~~~~~~~~~~~ + + Handlers for additional ReST directives. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +import re +import string +from os import path + +from docutils import nodes +from docutils.parsers.rst import directives, roles +from docutils.parsers.rst.directives import admonitions + +from . import addnodes + +# ------ index markup -------------------------------------------------------------- + +entrytypes = [ + 'single', 'pair', 'triple', 'quadruple', + 'module', 'keyword', 'operator', 'object', 'exception', 'statement', 'builtin', +] + +def index_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + arguments = arguments[0].split('\n') + env = state.document.settings.env + targetid = 'index-%s' % env.index_num + env.index_num += 1 + targetnode = nodes.target('', '', ids=[targetid]) + state.document.note_explicit_target(targetnode) + indexnode = addnodes.index() + indexnode['entries'] = arguments + for entry in arguments: + try: + type, string = entry.split(':', 1) + env.note_index_entry(type.strip(), string.strip(), + targetid, string.strip()) + except ValueError: + continue + return [indexnode, targetnode] + +index_directive.arguments = (1, 0, 1) +directives.register_directive('index', index_directive) + +# ------ information units --------------------------------------------------------- + +def desc_index_text(desctype, currmodule, name): + if desctype == 'function': + if not currmodule: + return '%s() (built-in function)' % name + return '%s() (in module %s)' % (name, currmodule) + elif desctype == 'data': + if not currmodule: + return '%s (built-in variable)' % name + return '%s (in module %s)' % (name, currmodule) + elif desctype == 'class': + return '%s (class in %s)' % (name, currmodule) + elif desctype == 'exception': + return name + elif desctype == 'method': + try: + clsname, methname = name.rsplit('.', 1) + except: + if currmodule: + return '%s() (in module %s)' % (name, currmodule) + else: + return '%s()' % name + if currmodule: + return '%s() (%s.%s method)' % (methname, currmodule, clsname) + else: + return '%s() (%s method)' % (methname, clsname) + elif desctype == 'attribute': + try: + clsname, attrname = name.rsplit('.', 1) + except: + if currmodule: + return '%s (in module %s)' % (name, currmodule) + else: + return name + if currmodule: + return '%s (%s.%s attribute)' % (attrname, currmodule, clsname) + else: + return '%s (%s attribute)' % (attrname, clsname) + elif desctype == 'opcode': + return '%s (opcode)' % name + elif desctype == 'cfunction': + return '%s (C function)' % name + elif desctype == 'cmember': + return '%s (C member)' % name + elif desctype == 'cmacro': + return '%s (C macro)' % name + elif desctype == 'ctype': + return '%s (C type)' % name + elif desctype == 'cvar': + return '%s (C variable)' % name + else: + raise ValueError("unhandled descenv: %s" % desctype) + + +# ------ functions to parse a Python or C signature and create desc_* nodes. + +py_sig_re = re.compile(r'''^([\w.]*\.)? # class names + (\w+) \s* # thing name + (?: \((.*)\) )? $ # optionally arguments + ''', re.VERBOSE) + +py_paramlist_re = re.compile(r'([\[\],])') # split at '[', ']' and ',' + +def parse_py_signature(signode, sig, desctype, currclass): + """ + Transform a python signature into RST nodes. Returns (signode, fullname). + Return the fully qualified name of the thing. + + If inside a class, the current class name is handled intelligently: + * it is stripped from the displayed name if present + * it is added to the full name (return value) if not present + """ + m = py_sig_re.match(sig) + if m is None: raise ValueError + classname, name, arglist = m.groups() + + if currclass: + if classname and classname.startswith(currclass): + fullname = classname + name + classname = classname[len(currclass):].lstrip('.') + elif classname: + fullname = currclass + '.' + classname + name + else: + fullname = currclass + '.' + name + else: + fullname = classname + name if classname else name + + if classname: + signode += addnodes.desc_classname(classname, classname) + signode += addnodes.desc_name(name, name) + if not arglist: + if desctype in ('function', 'method'): + # for callables, add an empty parameter list + signode += addnodes.desc_parameterlist() + return fullname + signode += addnodes.desc_parameterlist() + + stack = [signode[-1]] + arglist = arglist.replace('`', '').replace(r'\ ', '') # remove markup + for token in py_paramlist_re.split(arglist): + if token == '[': + opt = addnodes.desc_optional() + stack[-1] += opt + stack.append(opt) + elif token == ']': + try: stack.pop() + except IndexError: raise ValueError + elif not token or token == ',' or token.isspace(): + pass + else: + token = token.strip() + stack[-1] += addnodes.desc_parameter(token, token) + if len(stack) != 1: raise ValueError + return fullname + + +c_sig_re = re.compile( + r'''^([^(]*?) # return type + (\w+) \s* # thing name + (?: \((.*)\) )? $ # optionally arguments + ''', re.VERBOSE) +c_funcptr_sig_re = re.compile( + r'''^([^(]+?) # return type + (\( [^()]+ \)) \s* # name in parentheses + \( (.*) \) $ # arguments + ''', re.VERBOSE) + +# RE to split at word boundaries +wsplit_re = re.compile(r'(\W+)') + +# These C types aren't described in the reference, so don't try to create +# a cross-reference to them +stopwords = set(('const', 'void', 'char', 'int', 'long', 'FILE', 'struct')) + +def parse_c_type(node, ctype): + # add cross-ref nodes for all words + for part in filter(None, wsplit_re.split(ctype)): + tnode = nodes.Text(part, part) + if part[0] in string.letters+'_' and part not in stopwords: + pnode = addnodes.pending_xref( + '', reftype='ctype', reftarget=part, modname=None, classname=None) + pnode += tnode + node += pnode + else: + node += tnode + +def parse_c_signature(signode, sig, desctype): + """Transform a C-language signature into RST nodes.""" + # first try the function pointer signature regex, it's more specific + m = c_funcptr_sig_re.match(sig) + if m is None: + m = c_sig_re.match(sig) + if m is None: + raise ValueError('no match') + rettype, name, arglist = m.groups() + + parse_c_type(signode, rettype) + signode += addnodes.desc_name(name, name) + if not arglist: + if desctype == 'cfunction': + # for functions, add an empty parameter list + signode += addnodes.desc_parameterlist() + return name + + paramlist = addnodes.desc_parameterlist() + arglist = arglist.replace('`', '').replace('\\ ', '') # remove markup + # this messes up function pointer types, but not too badly ;) + args = arglist.split(',') + for arg in args: + arg = arg.strip() + param = addnodes.desc_parameter('', '', noemph=True) + try: + ctype, argname = arg.rsplit(' ', 1) + except ValueError: + # no argument name given, only the type + parse_c_type(param, arg) + else: + parse_c_type(param, ctype) + param += nodes.emphasis(' '+argname, ' '+argname) + paramlist += param + signode += paramlist + return name + + +opcode_sig_re = re.compile(r'(\w+(?:\+\d)?)\s*\((.*)\)') + +def parse_opcode_signature(signode, sig, desctype): + """Transform an opcode signature into RST nodes.""" + m = opcode_sig_re.match(sig) + if m is None: raise ValueError + opname, arglist = m.groups() + signode += addnodes.desc_name(opname, opname) + paramlist = addnodes.desc_parameterlist() + signode += paramlist + paramlist += addnodes.desc_parameter(arglist, arglist) + return opname.strip() + + +def add_refcount_annotation(env, node, name): + """Add a reference count annotation. Return None.""" + entry = env.refcounts.get(name) + if not entry: + return + elif entry.result_type not in ("PyObject*", "PyVarObject*"): + return + rc = 'Return value: ' + if entry.result_refs is None: + rc += "Always NULL." + else: + rc += ("New" if entry.result_refs else "Borrowed") + " reference." + node += addnodes.refcount(rc, rc) + + +def desc_directive(desctype, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + env = state.document.settings.env + node = addnodes.desc() + node['desctype'] = desctype + + noindex = ('noindex' in options) + signatures = map(lambda s: s.strip(), arguments[0].split('\n')) + names = [] + for i, sig in enumerate(signatures): + # add a signature node for each signature in the current unit + # and add a reference target for it + sig = sig.strip() + signode = addnodes.desc_signature(sig, '') + signode['first'] = False + node.append(signode) + try: + if desctype in ('function', 'data', 'class', 'exception', + 'method', 'attribute'): + name = parse_py_signature(signode, sig, desctype, env.currclass) + elif desctype in ('cfunction', 'cmember', 'cmacro', 'ctype', 'cvar'): + name = parse_c_signature(signode, sig, desctype) + elif desctype == 'opcode': + name = parse_opcode_signature(signode, sig, desctype) + else: + # describe: use generic fallback + raise ValueError + except ValueError, err: + signode.clear() + signode += addnodes.desc_name(sig, sig) + continue # we don't want an index entry here + # only add target and index entry if this is the first description of the + # function name in this desc block + if not noindex and name not in names: + fullname = (env.currmodule + '.' if env.currmodule else '') + name + # note target + if fullname not in state.document.ids: + signode['names'].append(fullname) + signode['ids'].append(fullname) + signode['first'] = (not names) + state.document.note_explicit_target(signode) + env.note_descref(fullname, desctype) + names.append(name) + + env.note_index_entry('single', + desc_index_text(desctype, env.currmodule, name), + fullname, fullname) + + subnode = addnodes.desc_content() + if desctype == 'cfunction': + add_refcount_annotation(env, subnode, name) + # needed for automatic qualification of members + if desctype == 'class' and names: + env.currclass = names[0] + # needed for association of version{added,changed} directives + if names: + env.currdesc = names[0] + state.nested_parse(content, content_offset, subnode) + if desctype == 'class': + env.currclass = None + env.currdesc = None + node.append(subnode) + return [node] + +desc_directive.content = 1 +desc_directive.arguments = (1, 0, 1) +desc_directive.options = {'noindex': directives.flag} + +desctypes = [ + # the Python ones + 'function', + 'data', + 'class', + 'method', + 'attribute', + 'exception', + # the C ones + 'cfunction', + 'cmember', + 'cmacro', + 'ctype', + 'cvar', + # the odd one + 'opcode', + # the generic one + 'describe', +] + +for name in desctypes: + directives.register_directive(name, desc_directive) + + +# ------ versionadded/versionchanged ----------------------------------------------- + +def version_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + node = addnodes.versionmodified() + node['type'] = name + node['version'] = arguments[0] + if len(arguments) == 2: + inodes, messages = state.inline_text(arguments[1], lineno+1) + node.extend(inodes) + if content: + state.nested_parse(content, content_offset, node) + ret = [node] + messages + else: + ret = [node] + env = state.document.settings.env + env.note_versionchange(node['type'], node['version'], node) + return ret + +version_directive.arguments = (1, 1, 1) +version_directive.content = 1 + +directives.register_directive('deprecated', version_directive) +directives.register_directive('versionadded', version_directive) +directives.register_directive('versionchanged', version_directive) + + +# ------ see also ------------------------------------------------------------------ + +def seealso_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + rv = admonitions.make_admonition( + addnodes.seealso, name, ['See also:'], options, content, + lineno, content_offset, block_text, state, state_machine) + return rv + +seealso_directive.content = 1 +seealso_directive.arguments = (0, 0, 0) +directives.register_directive('seealso', seealso_directive) + + +# ------ production list (for the reference) --------------------------------------- + +def productionlist_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + env = state.document.settings.env + node = addnodes.productionlist() + messages = [] + i = 0 + + # use token as the default role while in production list + roles._roles[''] = roles._role_registry['token'] + for rule in arguments[0].split('\n'): + if i == 0 and ':' not in rule: + # production group + continue + i += 1 + try: + name, tokens = rule.split(':', 1) + except ValueError: + break + subnode = addnodes.production() + subnode['tokenname'] = name.strip() + if subnode['tokenname']: + idname = 'grammar-token-%s' % subnode['tokenname'] + if idname not in state.document.ids: + subnode['ids'].append(idname) + state.document.note_implicit_target(subnode, subnode) + env.note_token(subnode['tokenname']) + inodes, imessages = state.inline_text(tokens, lineno+i) + subnode.extend(inodes) + messages.extend(imessages) + node.append(subnode) + del roles._roles[''] + return [node] + messages + +productionlist_directive.content = 0 +productionlist_directive.arguments = (1, 0, 1) +directives.register_directive('productionlist', productionlist_directive) + +# ------ section metadata ---------------------------------------------------------- + +def module_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + env = state.document.settings.env + modname = arguments[0].strip() + env.currmodule = modname + env.note_module(modname, options.get('synopsis', ''), options.get('platform', '')) + ret = [] + targetnode = nodes.target('', '', ids=['module-' + modname]) + state.document.note_explicit_target(targetnode) + ret.append(targetnode) + if 'platform' in options: + node = nodes.paragraph() + node += nodes.emphasis('Platforms: ', 'Platforms: ') + node += nodes.Text(options['platform'], options['platform']) + ret.append(node) + # the synopsis isn't printed; in fact, it is only used in the modindex currently + env.note_index_entry('single', '%s (module)' % modname, 'module-' + modname, + modname) + return ret + +module_directive.arguments = (1, 0, 0) +module_directive.options = {'platform': lambda x: x, + 'synopsis': lambda x: x} +directives.register_directive('module', module_directive) + + +def author_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + # The author directives aren't included in the built document + return [] + +author_directive.arguments = (1, 0, 1) +directives.register_directive('sectionauthor', author_directive) +directives.register_directive('moduleauthor', author_directive) + + +# ------ toctree directive --------------------------------------------------------- + +def toctree_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + env = state.document.settings.env + dirname = path.dirname(env.filename) + + subnode = addnodes.toctree() + includefiles = filter(None, content) + # absolutize filenames + includefiles = map(lambda x: path.normpath(path.join(dirname, x)), includefiles) + subnode['includefiles'] = includefiles + subnode['maxdepth'] = options.get('maxdepth', -1) + return [subnode] + +toctree_directive.content = 1 +toctree_directive.options = {'maxdepth': int} +directives.register_directive('toctree', toctree_directive) + + +# ------ centered directive --------------------------------------------------------- + +def centered_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + if not arguments: + return [] + subnode = addnodes.centered() + inodes, messages = state.inline_text(arguments[0], lineno) + subnode.extend(inodes) + return [subnode] + messages + +centered_directive.arguments = (1, 0, 1) +directives.register_directive('centered', centered_directive) + + +# ------ highlightlanguage directive ------------------------------------------------ + +def highlightlang_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + return [addnodes.highlightlang(lang=arguments[0].strip())] + +highlightlang_directive.content = 0 +highlightlang_directive.arguments = (1, 0, 0) +directives.register_directive('highlightlang', + highlightlang_directive) diff --git a/sphinx/environment.py b/sphinx/environment.py new file mode 100644 index 000000000..a70f5fb6f --- /dev/null +++ b/sphinx/environment.py @@ -0,0 +1,840 @@ +# -*- coding: utf-8 -*- +""" + sphinx.environment + ~~~~~~~~~~~~~~~~~~ + + Global creation environment. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" +from __future__ import with_statement + +import re +import os +import time +import heapq +import hashlib +import difflib +import itertools +import cPickle as pickle +from os import path +from string import uppercase + +from docutils import nodes +from docutils.io import FileInput +from docutils.core import publish_doctree +from docutils.utils import Reporter +from docutils.readers import standalone +from docutils.transforms import Transform +from docutils.transforms.parts import ContentsFilter +from docutils.transforms.universal import FilterMessages + +from . import addnodes +from .util import get_matching_files +from .refcounting import Refcounts + +default_settings = { + 'embed_stylesheet': False, + 'cloak_email_addresses': True, + 'pep_base_url': 'http://www.python.org/dev/peps/', + 'input_encoding': 'utf-8', + 'doctitle_xform': False, + 'sectsubtitle_xform': False, +} + +# This is increased every time a new environment attribute is added +# to properly invalidate pickle files. +ENV_VERSION = 9 + + +def walk_depth(node, depth, maxdepth): + """Utility: Cut a TOC at a specified depth.""" + for subnode in node.children[:]: + if isinstance(subnode, (addnodes.compact_paragraph, nodes.list_item)): + walk_depth(subnode, depth, maxdepth) + elif isinstance(subnode, nodes.bullet_list): + if depth > maxdepth: + subnode.parent.replace(subnode, []) + else: + walk_depth(subnode, depth+1, maxdepth) + + +default_substitutions = set([ + 'version', + 'release', + 'today', +]) + + +class DefaultSubstitutions(Transform): + """ + Replace some substitutions if they aren't defined in the document. + """ + # run before the default Substitutions + default_priority = 210 + + def apply(self): + config = self.document.settings.env.config + # only handle those not otherwise defined in the document + to_handle = default_substitutions - set(self.document.substitution_defs) + for ref in self.document.traverse(nodes.substitution_reference): + refname = ref['refname'] + if refname in to_handle: + text = config.get(refname, '') + if refname == 'today' and not text: + # special handling: can also specify a strftime format + text = time.strftime(config.get('today_fmt', '%B %d, %Y')) + ref.replace_self(nodes.Text(text, text)) + + +class MoveModuleTargets(Transform): + """ + Move module targets to their nearest enclosing section title. + """ + default_priority = 210 + + def apply(self): + for node in self.document.traverse(nodes.target): + if not node['ids']: + continue + if node['ids'][0].startswith('module-') and \ + node.parent.__class__ is nodes.section: + node.parent['ids'] = node['ids'] + node.parent.remove(node) + + +class MyStandaloneReader(standalone.Reader): + """ + Add our own Substitutions transform. + """ + def get_transforms(self): + tf = standalone.Reader.get_transforms(self) + return tf + [DefaultSubstitutions, MoveModuleTargets, + FilterMessages] + + +class MyContentsFilter(ContentsFilter): + """ + Used with BuildEnvironment.add_toc_from() to discard cross-file links + within table-of-contents link nodes. + """ + def visit_pending_xref(self, node): + self.parent.append(nodes.literal(node['reftarget'], node['reftarget'])) + raise nodes.SkipNode + + +class BuildEnvironment: + """ + The environment in which the ReST files are translated. + Stores an inventory of cross-file targets and provides doctree + transformations to resolve links to them. + + Not all doctrees are stored in the environment, only those of files + containing a "toctree" directive, because they have to change if sections + are edited in other files. This keeps the environment size moderate. + """ + + # --------- ENVIRONMENT PERSISTENCE ---------------------------------------- + + @staticmethod + def frompickle(filename): + with open(filename, 'rb') as picklefile: + env = pickle.load(picklefile) + if env.version != ENV_VERSION: + raise IOError('env version not current') + return env + + def topickle(self, filename): + # remove unpicklable attributes + wstream = self.warning_stream + self.set_warning_stream(None) + with open(filename, 'wb') as picklefile: + pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL) + # reset stream + self.set_warning_stream(wstream) + + # --------- ENVIRONMENT INITIALIZATION ------------------------------------- + + def __init__(self, srcdir, doctreedir): + self.doctreedir = doctreedir + self.srcdir = srcdir + self.config = {} + + # read the refcounts file + self.refcounts = Refcounts.fromfile( + path.join(self.srcdir, 'data', 'refcounts.dat')) + + # the docutils settings for building + self.settings = default_settings.copy() + self.settings['env'] = self + + # the stream to write warning messages to + self.warning_stream = None + + # this is to invalidate old pickles + self.version = ENV_VERSION + + # Build times -- to determine changed files + # Also use this as an inventory of all existing and built filenames. + self.all_files = {} # filename -> (mtime, md5) at the time of build + + # File metadata + self.metadata = {} # filename -> dict of metadata items + + # TOC inventory + self.titles = {} # filename -> title node + self.tocs = {} # filename -> table of contents nodetree + self.toc_num_entries = {} # filename -> number of real entries + # used to determine when to show the TOC in a sidebar + # (don't show if it's only one item) + self.toctree_relations = {} # filename -> ["parent", "previous", "next"] filename + # for navigating in the toctree + self.files_to_rebuild = {} # filename -> list of files (containing its TOCs) + # to rebuild too + + # X-ref target inventory + self.descrefs = {} # fullname -> filename, desctype + self.filemodules = {} # filename -> [modules] + self.modules = {} # modname -> filename, synopsis, platform + self.tokens = {} # tokenname -> filename + self.labels = {} # labelname -> filename, labelid + + # Other inventories + self.indexentries = {} # filename -> list of + # (type, string, target, aliasname) + self.versionchanges = {} # version -> list of + # (type, filename, module, descname, content) + + # These are set while parsing a file + self.filename = None # current file name + self.currmodule = None # current module name + self.currclass = None # current class name + self.currdesc = None # current descref name + self.index_num = 0 # autonumber for index targets + + def set_warning_stream(self, stream): + self.warning_stream = stream + self.settings['warning_stream'] = stream + + def clear_file(self, filename): + """Remove all traces of a source file in the inventory.""" + if filename in self.all_files: + self.all_files.pop(filename, None) + self.metadata.pop(filename, None) + self.titles.pop(filename, None) + self.tocs.pop(filename, None) + self.toc_num_entries.pop(filename, None) + self.files_to_rebuild.pop(filename, None) + + for fullname, (fn, _) in self.descrefs.items(): + if fn == filename: + del self.descrefs[fullname] + self.filemodules.pop(filename, None) + for modname, (fn, _, _) in self.modules.items(): + if fn == filename: + del self.modules[modname] + for tokenname, fn in self.tokens.items(): + if fn == filename: + del self.tokens[tokenname] + for labelname, (fn, _, _) in self.labels.items(): + if fn == filename: + del self.labels[labelname] + self.indexentries.pop(filename, None) + for version, changes in self.versionchanges.items(): + new = [change for change in changes if change[1] != filename] + changes[:] = new + + def get_outdated_files(self, config): + """ + Return (removed, changed) iterables. + """ + all_source_files = list(get_matching_files( + self.srcdir, '*.rst', exclude=set(config.get('unused_files', ())))) + + # clear all files no longer present + removed = set(self.all_files) - set(all_source_files) + + if config != self.config: + # config values affect e.g. substitutions + changed = all_source_files + else: + changed = [] + for filename in all_source_files: + if filename not in self.all_files: + changed.append(filename) + else: + # if the doctree file is not there, rebuild + if not path.isfile(path.join(self.doctreedir, + filename[:-3] + 'doctree')): + changed.append(filename) + continue + mtime, md5 = self.all_files[filename] + newmtime = path.getmtime(path.join(self.srcdir, filename)) + if newmtime == mtime: + continue + # check the MD5 + with file(path.join(self.srcdir, filename), 'rb') as f: + newmd5 = hashlib.md5(f.read()).digest() + if newmd5 != md5: + changed.append(filename) + + return removed, changed + + def update(self, config): + """ + (Re-)read all files new or changed since last update. + Yields a summary and then filenames as it processes them. + """ + removed, changed = self.get_outdated_files(config) + msg = '%s removed, %s changed' % (len(removed), len(changed)) + if self.config != config: + msg = '[config changed] ' + msg + yield msg + + self.config = config + + # clear all files no longer present + for filename in removed: + self.clear_file(filename) + + # re-read the refcount file + self.refcounts = Refcounts.fromfile( + path.join(self.srcdir, 'data', 'refcounts.dat')) + + # read all new and changed files + for filename in changed: + yield filename + self.read_file(filename) + + # --------- SINGLE FILE BUILDING ------------------------------------------- + + def read_file(self, filename, src_path=None, save_parsed=True): + """Parse a file and add/update inventory entries for the doctree. + If srcpath is given, read from a different source file.""" + # remove all inventory entries for that file + self.clear_file(filename) + + if src_path is None: + src_path = path.join(self.srcdir, filename) + + self.filename = filename + doctree = publish_doctree(None, src_path, FileInput, + settings_overrides=self.settings, + reader=MyStandaloneReader()) + self.process_metadata(filename, doctree) + self.create_title_from(filename, doctree) + self.note_labels_from(filename, doctree) + self.build_toc_from(filename, doctree) + + # calculate the MD5 of the file at time of build + with file(src_path, 'rb') as f: + md5 = hashlib.md5(f.read()).digest() + self.all_files[filename] = (path.getmtime(src_path), md5) + + # make it picklable + doctree.reporter = None + doctree.transformer = None + doctree.settings.env = None + doctree.settings.warning_stream = None + + # cleanup + self.filename = None + self.currmodule = None + self.currclass = None + + if save_parsed: + # save the parsed doctree + doctree_filename = path.join(self.doctreedir, filename[:-3] + 'doctree') + dirname = path.dirname(doctree_filename) + if not path.isdir(dirname): + os.makedirs(dirname) + with file(doctree_filename, 'wb') as f: + pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL) + else: + return doctree + + def process_metadata(self, filename, doctree): + """ + Process the docinfo part of the doctree as metadata. + """ + self.metadata[filename] = md = {} + docinfo = doctree[0] + if docinfo.__class__ is not nodes.docinfo: + # nothing to see here + return + for node in docinfo: + if node.__class__ is nodes.author: + # handled specially by docutils + md['author'] = node.astext() + elif node.__class__ is nodes.field: + name, body = node + md[name.astext()] = body.astext() + del doctree[0] + + def create_title_from(self, filename, document): + """ + Add a title node to the document (just copy the first section title), + and store that title in the environment. + """ + for node in document.traverse(nodes.section): + titlenode = nodes.title() + visitor = MyContentsFilter(document) + node[0].walkabout(visitor) + titlenode += visitor.get_entry_text() + self.titles[filename] = titlenode + return + + def note_labels_from(self, filename, document): + for name, explicit in document.nametypes.iteritems(): + if not explicit: + continue + labelid = document.nameids[name] + node = document.ids[labelid] + if not isinstance(node, nodes.section): + # e.g. desc-signatures + continue + sectname = node[0].astext() # node[0] == title node + if name in self.labels: + print >>self.warning_stream, \ + ('WARNING: duplicate label %s, ' % name + + 'in %s and %s' % (self.labels[name][0], filename)) + self.labels[name] = filename, labelid, sectname + + def note_toctree(self, filename, toctreenode): + """Note a TOC tree directive in a document and gather information about + file relations from it.""" + includefiles = toctreenode['includefiles'] + includefiles_len = len(includefiles) + for i, includefile in enumerate(includefiles): + # the "previous" file for the first toctree item is the parent + previous = includefiles[i-1] if i > 0 else filename + # the "next" file for the last toctree item is the parent again + next = includefiles[i+1] if i < includefiles_len-1 else filename + self.toctree_relations[includefile] = [filename, previous, next] + # note that if the included file is rebuilt, this one must be + # too (since the TOC of the included file could have changed) + self.files_to_rebuild.setdefault(includefile, set()).add(filename) + + + def build_toc_from(self, filename, document): + """Build a TOC from the doctree and store it in the inventory.""" + numentries = [0] # nonlocal again... + + def build_toc(node): + entries = [] + for subnode in node: + if isinstance(subnode, addnodes.toctree): + # just copy the toctree node which is then resolved + # in self.resolve_toctrees + item = subnode.copy() + entries.append(item) + # do the inventory stuff + self.note_toctree(filename, subnode) + continue + if not isinstance(subnode, nodes.section): + continue + title = subnode[0] + # copy the contents of the section title, but without references + # and unnecessary stuff + visitor = MyContentsFilter(document) + title.walkabout(visitor) + nodetext = visitor.get_entry_text() + if not numentries[0]: + # for the very first toc entry, don't add an anchor + # as it is the file's title anyway + anchorname = '' + else: + anchorname = '#' + subnode['ids'][0] + numentries[0] += 1 + reference = nodes.reference('', '', refuri=filename, + anchorname=anchorname, + *nodetext) + para = addnodes.compact_paragraph('', '', reference) + item = nodes.list_item('', para) + item += build_toc(subnode) + entries.append(item) + if entries: + return nodes.bullet_list('', *entries) + return [] + toc = build_toc(document) + if toc: + self.tocs[filename] = toc + else: + self.tocs[filename] = nodes.bullet_list('') + self.toc_num_entries[filename] = numentries[0] + + def get_toc_for(self, filename): + """Return a TOC nodetree -- for use on the same page only!""" + toc = self.tocs[filename].deepcopy() + for node in toc.traverse(nodes.reference): + node['refuri'] = node['anchorname'] + return toc + + # ------- + # these are called from docutils directives and therefore use self.filename + # + def note_descref(self, fullname, desctype): + if fullname in self.descrefs: + print >>self.warning_stream, \ + ('WARNING: duplicate canonical description name %s, ' % fullname + + 'in %s and %s' % (self.descrefs[fullname][0], self.filename)) + self.descrefs[fullname] = (self.filename, desctype) + + def note_module(self, modname, synopsis, platform): + self.modules[modname] = (self.filename, synopsis, platform) + self.filemodules.setdefault(self.filename, []).append(modname) + + def note_token(self, tokenname): + self.tokens[tokenname] = self.filename + + + def note_index_entry(self, type, string, targetid, aliasname): + self.indexentries.setdefault(self.filename, []).append( + (type, string, targetid, aliasname)) + + def note_versionchange(self, type, version, node): + self.versionchanges.setdefault(version, []).append( + (type, self.filename, self.currmodule, self.currdesc, node.deepcopy())) + # ------- + + # --------- RESOLVING REFERENCES AND TOCTREES ------------------------------ + + def get_doctree(self, filename): + """Read the doctree for a file from the pickle and return it.""" + doctree_filename = path.join(self.doctreedir, filename[:-3] + 'doctree') + with file(doctree_filename, 'rb') as f: + doctree = pickle.load(f) + doctree.reporter = Reporter(filename, 2, 4, stream=self.warning_stream) + return doctree + + def get_and_resolve_doctree(self, filename, builder, doctree=None): + """Read the doctree from the pickle, resolve cross-references and + toctrees and return it.""" + if doctree is None: + doctree = self.get_doctree(filename) + + # resolve all pending cross-references + self.resolve_references(doctree, filename, builder) + + # now, resolve all toctree nodes + def _entries_from_toctree(toctreenode): + """Return TOC entries for a toctree node.""" + includefiles = map(str, toctreenode['includefiles']) + + entries = [] + for includefile in includefiles: + try: + toc = self.tocs[includefile].deepcopy() + except KeyError, err: + # this is raised if the included file does not exist + print >>self.warning_stream, 'WARNING: %s: toctree contains ' \ + 'ref to nonexisting file %r' % (filename, includefile) + else: + for toctreenode in toc.traverse(addnodes.toctree): + toctreenode.parent.replace_self( + _entries_from_toctree(toctreenode)) + entries.append(toc) + if entries: + return addnodes.compact_paragraph('', '', *entries) + return [] + + for toctreenode in doctree.traverse(addnodes.toctree): + maxdepth = toctreenode.get('maxdepth', -1) + newnode = _entries_from_toctree(toctreenode) + # prune the tree to maxdepth + if maxdepth > 0: + walk_depth(newnode, 1, maxdepth) + toctreenode.replace_self(newnode) + + # set the target paths in the toctrees (they are not known + # at TOC generation time) + for node in doctree.traverse(nodes.reference): + if node.hasattr('anchorname'): + # a TOC reference + node['refuri'] = builder.get_relative_uri( + filename, node['refuri']) + node['anchorname'] + + return doctree + + + def resolve_references(self, doctree, docfilename, builder): + for node in doctree.traverse(addnodes.pending_xref): + contnode = node[0].deepcopy() + newnode = None + + typ = node['reftype'] + target = node['reftarget'] + modname = node['modname'] + clsname = node['classname'] + + if typ == 'ref': + filename, labelid, sectname = self.labels.get(target, ('','','')) + if not filename: + newnode = doctree.reporter.system_message( + 2, 'undefined label: %s' % target) + print >>self.warning_stream, \ + '%s: undefined label: %s' % (docfilename, target) + else: + newnode = nodes.reference('', '') + if filename == docfilename: + newnode['refid'] = labelid + else: + newnode['refuri'] = builder.get_relative_uri( + docfilename, filename) + '#' + labelid + newnode.append(nodes.emphasis(sectname, sectname)) + elif typ == 'token': + filename = self.tokens.get(target, '') + if not filename: + newnode = contnode + else: + newnode = nodes.reference('', '') + if filename == docfilename: + newnode['refid'] = 'grammar-token-' + target + else: + newnode['refuri'] = builder.get_relative_uri( + docfilename, filename) + '#grammar-token-' + target + newnode.append(contnode) + elif typ == 'mod': + filename, synopsis, platform = self.modules.get(target, ('','','')) + # just link to an anchor if there are multiple modules in one file + # because the anchor is generally below the heading which is ugly + # but can't be helped easily + anchor = '' + if not filename or filename == docfilename: + # don't link to self + newnode = contnode + else: + if len(self.filemodules[filename]) > 1: + anchor = '#' + 'module-' + target + newnode = nodes.reference('', '') + newnode['refuri'] = ( + builder.get_relative_uri(docfilename, filename) + anchor) + newnode.append(contnode) + else: + name, desc = self.find_desc(modname, clsname, target, typ) + if not desc: + newnode = contnode + else: + newnode = nodes.reference('', '') + if desc[0] == docfilename: + newnode['refid'] = name + else: + newnode['refuri'] = ( + builder.get_relative_uri(docfilename, desc[0]) + + '#' + name) + newnode.append(contnode) + + if newnode: + node.replace_self(newnode) + + def create_index(self, builder, _fixre=re.compile(r'(.*) ([(][^()]*[)])')): + """Create the real index from the collected index entries.""" + new = {} + + def add_entry(word, subword, dic=new): + entry = dic.get(word) + if not entry: + dic[word] = entry = [[], {}] + if subword: + add_entry(subword, '', dic=entry[1]) + else: + entry[0].append(builder.get_relative_uri('genindex.rst', fn) + + '#' + tid) + + for fn, entries in self.indexentries.iteritems(): + # new entry types must be listed in directives.py! + for type, string, tid, alias in entries: + if type == 'single': + entry, _, subentry = string.partition('!') + add_entry(entry, subentry) + elif type == 'pair': + first, second = map(lambda x: x.strip(), string.split(';', 1)) + add_entry(first, second) + add_entry(second, first) + elif type == 'triple': + first, second, third = map(lambda x: x.strip(), string.split(';', 2)) + add_entry(first, second+' '+third) + add_entry(second, third+', '+first) + add_entry(third, first+' '+second) +# this is a bit ridiculous... +# elif type == 'quadruple': +# first, second, third, fourth = \ +# map(lambda x: x.strip(), string.split(';', 3)) +# add_entry(first, '%s %s %s' % (second, third, fourth)) +# add_entry(second, '%s %s, %s' % (third, fourth, first)) +# add_entry(third, '%s, %s %s' % (fourth, first, second)) +# add_entry(fourth, '%s %s %s' % (first, second, third)) + elif type in ('module', 'keyword', 'operator', 'object', + 'exception', 'statement'): + add_entry(string, type) + add_entry(type, string) + elif type == 'builtin': + add_entry(string, 'built-in function') + add_entry('built-in function', string) + else: + print >>self.warning_stream, \ + "unknown index entry type %r in %s" % (type, fn) + + newlist = new.items() + newlist.sort(key=lambda t: t[0].lower()) + + # fixup entries: transform + # func() (in module foo) + # func() (in module bar) + # into + # func() + # (in module foo) + # (in module bar) + oldkey = '' + oldsubitems = None + i = 0 + while i < len(newlist): + key, (targets, subitems) = newlist[i] + # cannot move if it hassubitems; structure gets too complex + if not subitems: + m = _fixre.match(key) + if m: + if oldkey == m.group(1): + # prefixes match: add entry as subitem of the previous entry + oldsubitems.setdefault(m.group(2), [[], {}])[0].extend(targets) + del newlist[i] + continue + oldkey = m.group(1) + else: + oldkey = key + oldsubitems = subitems + i += 1 + + # group the entries by letter + def keyfunc((k, v), ltrs=uppercase+'_'): + # hack: mutate the subitems dicts to a list in the keyfunc + v[1] = sorted((si, se) for (si, (se, void)) in v[1].iteritems()) + # now calculate the key + letter = k[0].upper() + if letter in ltrs: + return letter + else: + # get all other symbols under one heading + return 'Symbols' + self.index = [(key, list(group)) for (key, group) in + itertools.groupby(newlist, keyfunc)] + + def check_consistency(self): + """Do consistency checks.""" + + for filename in self.all_files: + if filename not in self.toctree_relations: + if filename == 'contents.rst': + # the master file is not included anywhere ;) + continue + self.warning_stream.write( + 'WARNING: %s isn\'t included in any toctree\n' % filename) + + # --------- QUERYING ------------------------------------------------------- + + def find_desc(self, modname, classname, name, type): + """Find a description node matching "name", perhaps using + the given module and/or classname.""" + # skip parens + if name[-2:] == '()': + name = name[:-2] + + # don't add module and class names for C things + if type[0] == 'c' and type not in ('class', 'const'): + # skip trailing star and whitespace + name = name.rstrip(' *') + if name in self.descrefs and self.descrefs[name][1][0] == 'c': + return name, self.descrefs[name] + return None, None + + if name in self.descrefs: + newname = name + elif modname and modname + '.' + name in self.descrefs: + newname = modname + '.' + name + elif modname and classname and \ + modname + '.' + classname + '.' + name in self.descrefs: + newname = modname + '.' + classname + '.' + name + # special case: builtin exceptions have module "exceptions" set + elif type == 'exc' and '.' not in name and \ + 'exceptions.' + name in self.descrefs: + newname = 'exceptions.' + name + # special case: object methods + elif type in ('func', 'meth') and '.' not in name and \ + 'object.' + name in self.descrefs: + newname = 'object.' + name + else: + return None, None + return newname, self.descrefs[newname] + + def find_keyword(self, keyword, avoid_fuzzy=False, cutoff=0.6, n=20): + """ + Find keyword matches for a keyword. If there's an exact match, just return + it, else return a list of fuzzy matches if avoid_fuzzy isn't True. + + Keywords searched are: first modules, then descrefs. + + Returns: None if nothing found + (type, filename, anchorname) if exact match found + list of (quality, type, filename, anchorname, description) if fuzzy + """ + + if keyword in self.modules: + filename, title, system = self.modules[keyword] + return 'module', filename, 'module-' + keyword + if keyword in self.descrefs: + filename, ref_type = self.descrefs[keyword] + return ref_type, filename, keyword + # special cases + if '.' not in keyword: + # exceptions are documented in the exceptions module + if 'exceptions.'+keyword in self.descrefs: + filename, ref_type = self.descrefs['exceptions.'+keyword] + return ref_type, filename, 'exceptions.'+keyword + # special methods are documented as object methods + if 'object.'+keyword in self.descrefs: + filename, ref_type = self.descrefs['object.'+keyword] + return ref_type, filename, 'object.'+keyword + + if avoid_fuzzy: + return + + # find fuzzy matches + s = difflib.SequenceMatcher() + s.set_seq2(keyword.lower()) + + def possibilities(): + for title, (fn, desc, _) in self.modules.iteritems(): + yield ('module', fn, 'module-'+title, desc) + for title, (fn, desctype) in self.descrefs.iteritems(): + yield (desctype, fn, title, '') + + def dotsearch(string): + parts = string.lower().split('.') + for idx in xrange(0, len(parts)): + yield '.'.join(parts[idx:]) + + result = [] + for type, filename, title, desc in possibilities(): + best_res = 0 + for part in dotsearch(title): + s.set_seq1(part) + if s.real_quick_ratio() >= cutoff and \ + s.quick_ratio() >= cutoff and \ + s.ratio() >= cutoff and \ + s.ratio() > best_res: + best_res = s.ratio() + if best_res: + result.append((best_res, type, filename, title, desc)) + + return heapq.nlargest(n, result) + + def get_real_filename(self, filename): + """ + Pass this function a filename without .rst extension to get the real + filename. This also resolves the special `index.rst` files. If the file + does not exist the return value will be `None`. + """ + for rstname in filename + '.rst', filename + path.sep + 'index.rst': + if rstname in self.all_files: + return rstname diff --git a/sphinx/highlighting.py b/sphinx/highlighting.py new file mode 100644 index 000000000..a44eb7668 --- /dev/null +++ b/sphinx/highlighting.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +""" + sphinx.highlighting + ~~~~~~~~~~~~~~~~~~~ + + Highlight code blocks using Pygments. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +import cgi +from collections import defaultdict + +try: + import pygments + from pygments import highlight + from pygments.lexers import PythonLexer, PythonConsoleLexer, CLexer, \ + TextLexer, RstLexer + from pygments.formatters import HtmlFormatter + from pygments.filters import ErrorToken + from pygments.style import Style + from pygments.styles.friendly import FriendlyStyle + from pygments.token import Generic, Comment +except ImportError: + pygments = None +else: + class PythonDocStyle(Style): + """ + Like friendly, but a bit darker to enhance contrast on the green background. + """ + + background_color = '#eeffcc' + default_style = '' + + styles = FriendlyStyle.styles + styles.update({ + Generic.Output: 'italic #333', + Comment: 'italic #408090', + }) + + lexers = defaultdict(TextLexer, + none = TextLexer(), + python = PythonLexer(), + pycon = PythonConsoleLexer(), + rest = RstLexer(), + c = CLexer(), + ) + for _lexer in lexers.values(): + _lexer.add_filter('raiseonerror') + + fmter = HtmlFormatter(style=PythonDocStyle) + + +def highlight_block(source, lang): + if not pygments: + return '

' + cgi.escape(source) + '
\n' + if lang == 'python': + if source.startswith('>>>'): + lexer = lexers['pycon'] + else: + lexer = lexers['python'] + else: + lexer = lexers[lang] + try: + return highlight(source, lexer, fmter) + except ErrorToken: + # this is most probably not Python, so let it pass textonly + return '
' + cgi.escape(source) + '
\n' + +def get_stylesheet(): + return fmter.get_style_defs() diff --git a/sphinx/htmlhelp.py b/sphinx/htmlhelp.py new file mode 100644 index 000000000..a5b1f4f8a --- /dev/null +++ b/sphinx/htmlhelp.py @@ -0,0 +1,188 @@ +# -*- coding: utf-8 -*- +""" + sphinx.htmlhelp + ~~~~~~~~~~~~~~~ + + Build HTML help support files. + Adapted from the original Doc/tools/prechm.py. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" +from __future__ import with_statement + +import os +import cgi +from os import path + +from docutils import nodes + +from . import addnodes + +# Project file (*.hhp) template. 'outname' is the file basename (like +# the pythlp in pythlp.hhp); 'version' is the doc version number (like +# the 2.2 in Python 2.2). +# The magical numbers in the long line under [WINDOWS] set most of the +# user-visible features (visible buttons, tabs, etc). +# About 0x10384e: This defines the buttons in the help viewer. The +# following defns are taken from htmlhelp.h. Not all possibilities +# actually work, and not all those that work are available from the Help +# Workshop GUI. In particular, the Zoom/Font button works and is not +# available from the GUI. The ones we're using are marked with 'x': +# +# 0x000002 Hide/Show x +# 0x000004 Back x +# 0x000008 Forward x +# 0x000010 Stop +# 0x000020 Refresh +# 0x000040 Home x +# 0x000080 Forward +# 0x000100 Back +# 0x000200 Notes +# 0x000400 Contents +# 0x000800 Locate x +# 0x001000 Options x +# 0x002000 Print x +# 0x004000 Index +# 0x008000 Search +# 0x010000 History +# 0x020000 Favorites +# 0x040000 Jump 1 +# 0x080000 Jump 2 +# 0x100000 Zoom/Font x +# 0x200000 TOC Next +# 0x400000 TOC Prev + +project_template = '''\ +[OPTIONS] +Compiled file=%(outname)s.chm +Contents file=%(outname)s.hhc +Default Window=%(outname)s +Default topic=index.html +Display compile progress=No +Full text search stop list file=%(outname)s.stp +Full-text search=Yes +Index file=%(outname)s.hhk +Language=0x409 +Title=Python %(version)s Documentation + +[WINDOWS] +%(outname)s="Python %(version)s Documentation","%(outname)s.hhc","%(outname)s.hhk",\ +"index.html","index.html",,,,,0x63520,220,0x10384e,[0,0,1024,768],,,,,,,0 + +[FILES] +''' + +contents_header = '''\ + + + + + + + + + + +
    +''' + +contents_footer = '''\ +
+''' + +object_sitemap = '''\ + + + + +''' + +# List of words the full text search facility shouldn't index. This +# becomes file outname.stp. Note that this list must be pretty small! +# Different versions of the MS docs claim the file has a maximum size of +# 256 or 512 bytes (including \r\n at the end of each line). +# Note that "and", "or", "not" and "near" are operators in the search +# language, so no point indexing them even if we wanted to. +stopwords = """ +a and are as at +be but by +for +if in into is it +near no not +of on or +such +that the their then there these they this to +was will with +""".split() + + +def build_hhx(builder, outdir, outname): + builder.msg('dumping stopword list...') + with open(path.join(outdir, outname+'.stp'), 'w') as f: + for word in sorted(stopwords): + print >>f, word + + builder.msg('writing project file...') + with open(path.join(outdir, outname+'.hhp'), 'w') as f: + f.write(project_template % {'outname': outname, + 'version': builder.config['version']}) + if not outdir.endswith(os.sep): + outdir += os.sep + olen = len(outdir) + for root, dirs, files in os.walk(outdir): + for fn in files: + if fn.endswith(('.html', '.css', '.js')): + print >>f, path.join(root, fn)[olen:].replace('/', '\\') + + builder.msg('writing TOC file...') + with open(path.join(outdir, outname+'.hhc'), 'w') as f: + f.write(contents_header) + # special books + f.write('
  • ' + object_sitemap % ('Main page', 'index.html')) + f.write('
  • ' + object_sitemap % ('Global Module Index', 'modindex.html')) + # the TOC + toc = builder.env.get_and_resolve_doctree('contents.rst', builder) + def write_toc(node, ullevel=0): + if isinstance(node, nodes.list_item): + f.write('
  • ') + for subnode in node: + write_toc(subnode, ullevel) + elif isinstance(node, nodes.reference): + f.write(object_sitemap % (cgi.escape(node.astext()), + node['refuri'])) + elif isinstance(node, nodes.bullet_list): + if ullevel != 0: + f.write('
      \n') + for subnode in node: + write_toc(subnode, ullevel+1) + if ullevel != 0: + f.write('
    \n') + elif isinstance(node, addnodes.compact_paragraph): + for subnode in node: + write_toc(subnode, ullevel) + elif isinstance(node, nodes.section): + write_toc(node[1], ullevel) + elif isinstance(node, nodes.document): + write_toc(node[0], ullevel) + write_toc(toc) + f.write(contents_footer) + + builder.msg('writing index file...') + with open(path.join(outdir, outname+'.hhk'), 'w') as f: + f.write('
      \n') + def write_index(title, refs, subitems): + if refs: + f.write('
    • ') + f.write(object_sitemap % (cgi.escape(title), refs[0])) + for ref in refs[1:]: + f.write(object_sitemap % ('[Link]', ref)) + if subitems: + f.write('
        ') + for subitem in subitems: + write_index(subitem[0], subitem[1], []) + f.write('
      ') + for (key, group) in builder.env.index: + for title, (refs, subitems) in group: + write_index(title, refs, subitems) + f.write('
    \n') diff --git a/sphinx/json.py b/sphinx/json.py new file mode 100644 index 000000000..95f09b4d0 --- /dev/null +++ b/sphinx/json.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +""" + sphinx.json + ~~~~~~~~~~~ + + Minimal JSON module that generates small dumps. + + This is not fully JSON compliant but enough for the searchindex. + And the generated files are smaller than the simplejson ones. + + Uses the basestring encode function from simplejson. + + :copyright: 2007 by Armin Ronacher, Bob Ippolito. + :license: Python license. +""" + +import re + +ESCAPE = re.compile(r'[\x00-\x19\\"\b\f\n\r\t]') +ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') +ESCAPE_DICT = { + '\\': '\\\\', + '"': '\\"', + '\b': '\\b', + '\f': '\\f', + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', +} +for i in range(0x20): + ESCAPE_DICT.setdefault(chr(i), '\\u%04x' % (i,)) + + +def encode_basestring_ascii(s): + def replace(match): + s = match.group(0) + try: + return ESCAPE_DICT[s] + except KeyError: + n = ord(s) + if n < 0x10000: + return '\\u%04x' % (n,) + else: + # surrogate pair + n -= 0x10000 + s1 = 0xd800 | ((n >> 10) & 0x3ff) + s2 = 0xdc00 | (n & 0x3ff) + return '\\u%04x\\u%04x' % (s1, s2) + return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' + + +def dump_json(obj, key=False): + if key: + if not isinstance(obj, basestring): + obj = str(obj) + return encode_basestring_ascii(obj) + if obj is None: + return 'null' + elif obj is True or obj is False: + return obj and 'true' or 'false' + elif isinstance(obj, (int, long, float)): + return str(obj) + elif isinstance(obj, dict): + return '{%s}' % ','.join('%s:%s' % ( + dump_json(key, True), + dump_json(value) + ) for key, value in obj.iteritems()) + elif isinstance(obj, (tuple, list, set)): + return '[%s]' % ','.join(dump_json(x) for x in obj) + elif isinstance(obj, basestring): + return encode_basestring_ascii(obj) + raise TypeError(type(obj)) diff --git a/sphinx/refcounting.py b/sphinx/refcounting.py new file mode 100644 index 000000000..3f3b92462 --- /dev/null +++ b/sphinx/refcounting.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +""" + sphinx.refcounting + ~~~~~~~~~~~~~~~~~~ + + Handle reference counting annotations, based on refcount.py + and anno-api.py. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" +from __future__ import with_statement + + +class RCEntry: + def __init__(self, name): + self.name = name + self.args = [] + self.result_type = '' + self.result_refs = None + + +class Refcounts(dict): + @classmethod + def fromfile(cls, filename): + d = cls() + with open(filename, 'r') as fp: + for line in fp: + line = line.strip() + if line[:1] in ("", "#"): + # blank lines and comments + continue + parts = line.split(":", 4) + if len(parts) != 5: + raise ValueError("Wrong field count in %r" % line) + function, type, arg, refcount, comment = parts + # Get the entry, creating it if needed: + try: + entry = d[function] + except KeyError: + entry = d[function] = RCEntry(function) + if not refcount or refcount == "null": + refcount = None + else: + refcount = int(refcount) + # Update the entry with the new parameter or the result information. + if arg: + entry.args.append((arg, type, refcount)) + else: + entry.result_type = type + entry.result_refs = refcount + return d diff --git a/sphinx/roles.py b/sphinx/roles.py new file mode 100644 index 000000000..fa194c76a --- /dev/null +++ b/sphinx/roles.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- +""" + sphinx.roles + ~~~~~~~~~~~~ + + Handlers for additional ReST roles. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +import re + +from docutils import nodes, utils +from docutils.parsers.rst import roles + +from . import addnodes + +ws_re = re.compile(r'\s+') + +generic_docroles = { + 'command' : nodes.strong, + 'dfn' : nodes.emphasis, + 'file' : nodes.emphasis, + 'filenq' : nodes.emphasis, + 'filevar' : nodes.emphasis, + 'guilabel' : nodes.strong, + 'kbd' : nodes.literal, + 'keyword' : nodes.literal, + 'mailheader' : nodes.emphasis, + 'makevar' : nodes.Text, + 'manpage' : nodes.emphasis, + 'mimetype' : nodes.emphasis, + 'newsgroup' : nodes.emphasis, + 'option' : nodes.emphasis, + 'program' : nodes.strong, + 'regexp' : nodes.literal, +} + +for rolename, nodeclass in generic_docroles.iteritems(): + roles.register_generic_role(rolename, nodeclass) + + +def indexmarkup_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): + env = inliner.document.settings.env + text = utils.unescape(text) + targetid = 'index-%s' % env.index_num + env.index_num += 1 + targetnode = nodes.target('', '', ids=[targetid]) + inliner.document.note_explicit_target(targetnode) + if typ == 'envvar': + env.note_index_entry('single', '%s' % text, + targetid, text) + env.note_index_entry('single', 'environment variables!%s' % text, + targetid, text) + textnode = nodes.strong(text, text) + return [targetnode, textnode], [] + elif typ == 'pep': + env.note_index_entry('single', 'Python Enhancement Proposals!PEP %s' % text, + targetid, 'PEP %s' % text) + try: + pepnum = int(text) + except ValueError: + msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno) + prb = inliner.problematic(rawtext, rawtext, msg) + return [prb], [msg] + ref = inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum + sn = nodes.strong('PEP '+text, 'PEP '+text) + rn = nodes.reference('', '', refuri=ref) + rn += sn + return [targetnode, rn], [] + elif typ == 'rfc': + env.note_index_entry('single', 'RFC!RFC %s' % text, + targetid, 'RFC %s' % text) + try: + rfcnum = int(text) + except ValueError: + msg = inliner.reporter.error('invalid RFC number %s' % text, line=lineno) + prb = inliner.problematic(rawtext, rawtext, msg) + return [prb], [msg] + ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum + sn = nodes.strong('RFC '+text, 'RFC '+text) + rn = nodes.reference('', '', refuri=ref) + rn += sn + return [targetnode, rn], [] + +roles.register_canonical_role('envvar', indexmarkup_role) +roles.register_local_role('pep', indexmarkup_role) +roles.register_local_role('rfc', indexmarkup_role) + + +# default is `literal` +innernodetypes = { + 'ref': nodes.emphasis, + 'token': nodes.strong, +} + +def xfileref_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): + env = inliner.document.settings.env + text = utils.unescape(text) + # 'token' is the default role inside 'productionlist' directives + if typ == '': + typ = 'token' + if env.config.get('strip_trailing_parentheses', False): + if text[-2:] == '()': + text = text[:-2] + pnode = addnodes.pending_xref(rawtext) + pnode['reftype'] = typ + pnode['reftarget'] = ws_re.sub('', text) + pnode['modname'] = env.currmodule + pnode['classname'] = env.currclass + pnode += innernodetypes.get(typ, nodes.literal)(rawtext, text, classes=['xref']) + return [pnode], [] + + +def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): + return [nodes.emphasis(rawtext, text.replace('-->', u'\N{TRIANGULAR BULLET}'))], [] + + +specific_docroles = { + 'data': xfileref_role, + 'exc': xfileref_role, + 'func': xfileref_role, + 'class': xfileref_role, + 'const': xfileref_role, + 'attr': xfileref_role, + 'meth': xfileref_role, + + 'cfunc' : xfileref_role, + 'cdata' : xfileref_role, + 'ctype' : xfileref_role, + 'cmacro' : xfileref_role, + + 'mod' : xfileref_role, + + 'ref': xfileref_role, + 'token' : xfileref_role, + + 'menuselection' : menusel_role, +} + +for rolename, func in specific_docroles.iteritems(): + roles.register_canonical_role(rolename, func) diff --git a/sphinx/search.py b/sphinx/search.py new file mode 100644 index 000000000..4507bcbca --- /dev/null +++ b/sphinx/search.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +""" + sphinx.search + ~~~~~~~~~~~~~ + + Create a search index for offline search. + + :copyright: 2007 by Armin Ronacher. + :license: Python license. +""" +import re +import pickle + +from collections import defaultdict +from docutils.nodes import Text, NodeVisitor +from .stemmer import PorterStemmer +from .json import dump_json + + +word_re = re.compile(r'\w+(?u)') + + +class Stemmer(PorterStemmer): + """ + All those porter stemmer implementations look hideous. + make at least the stem method nicer. + """ + + def stem(self, word): + return PorterStemmer.stem(self, word, 0, len(word) - 1) + + +class WordCollector(NodeVisitor): + """ + A special visitor that collects words for the `IndexBuilder`. + """ + + def __init__(self, document): + NodeVisitor.__init__(self, document) + self.found_words = [] + + def dispatch_visit(self, node): + if node.__class__ is Text: + self.found_words.extend(word_re.findall(node.astext())) + + +class IndexBuilder(object): + """ + Helper class that creates a searchindex based on the doctrees + passed to the `feed` method. + """ + formats = { + 'json': dump_json, + 'pickle': pickle.dumps + } + + def __init__(self): + self._filenames = {} + self._mapping = {} + self._titles = {} + self._categories = {} + self._stemmer = Stemmer() + + def dump(self, stream, format): + """Dump the freezed index to a stream.""" + stream.write(self.formats[format](self.freeze())) + + def freeze(self): + """ + Create a useable data structure. You can pass this output + to the `SearchFrontend` to search the index. + """ + return [ + [k for k, v in sorted(self._filenames.items(), + key=lambda x: x[1])], + dict(item for item in sorted(self._categories.items(), + key=lambda x: x[0])), + [v for k, v in sorted(self._titles.items(), + key=lambda x: x[0])], + dict(item for item in sorted(self._mapping.items(), + key=lambda x: x[0])), + ] + + def feed(self, filename, category, title, doctree): + """Feed a doctree to the index.""" + file_id = self._filenames.setdefault(filename, len(self._filenames)) + self._titles[file_id] = title + visitor = WordCollector(doctree) + doctree.walk(visitor) + self._categories.setdefault(category, set()).add(file_id) + for word in word_re.findall(title) + visitor.found_words: + self._mapping.setdefault(self._stemmer.stem(word.lower()), + set()).add(file_id) + + +class SearchFrontend(object): + """ + This class acts as a frontend for the search index. It can search + a searchindex as provided by `IndexBuilder`. + """ + + def __init__(self, index): + self.filenames, self.areas, self.titles, self.words = index + self._stemmer = Stemmer() + + def query(self, required, excluded, areas): + file_map = defaultdict(set) + for word in required: + if word not in self.words: + break + for fid in self.words[word]: + file_map[fid].add(word) + + return sorted(((self.filenames[fid], self.titles[fid]) + for fid, words in file_map.iteritems() + if len(words) == len(required) and + any(fid in self.areas.get(area, ()) for area in areas) and not + any(fid in self.words.get(word, ()) for word in excluded) + ), key=lambda x: x[1].lower()) + + def search(self, searchstring, areas): + required = set() + excluded = set() + for word in searchstring.split(): + if word.startswith('-'): + storage = excluded + word = word[1:] + else: + storage = required + storage.add(self._stemmer.stem(word.lower())) + + return self.query(required, excluded, areas) diff --git a/sphinx/smartypants.py b/sphinx/smartypants.py new file mode 100644 index 000000000..c833ae9f0 --- /dev/null +++ b/sphinx/smartypants.py @@ -0,0 +1,263 @@ +r""" +This is based on SmartyPants.py by `Chad Miller`_. + +Copyright and License +===================== + +SmartyPants_ license:: + + Copyright (c) 2003 John Gruber + (http://daringfireball.net/) + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + * Neither the name "SmartyPants" nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + + This software is provided by the copyright holders and contributors "as + is" and any express or implied warranties, including, but not limited + to, the implied warranties of merchantability and fitness for a + particular purpose are disclaimed. In no event shall the copyright + owner or contributors be liable for any direct, indirect, incidental, + special, exemplary, or consequential damages (including, but not + limited to, procurement of substitute goods or services; loss of use, + data, or profits; or business interruption) however caused and on any + theory of liability, whether in contract, strict liability, or tort + (including negligence or otherwise) arising in any way out of the use + of this software, even if advised of the possibility of such damage. + + +smartypants.py license:: + + smartypants.py is a derivative work of SmartyPants. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + This software is provided by the copyright holders and contributors "as + is" and any express or implied warranties, including, but not limited + to, the implied warranties of merchantability and fitness for a + particular purpose are disclaimed. In no event shall the copyright + owner or contributors be liable for any direct, indirect, incidental, + special, exemplary, or consequential damages (including, but not + limited to, procurement of substitute goods or services; loss of use, + data, or profits; or business interruption) however caused and on any + theory of liability, whether in contract, strict liability, or tort + (including negligence or otherwise) arising in any way out of the use + of this software, even if advised of the possibility of such damage. + +.. _Chad Miller: http://web.chad.org/ +""" + +import re + + +def sphinx_smarty_pants(t): + t = t.replace('"', '"') + t = educateDashesOldSchool(t) + t = educateQuotes(t) + t = t.replace('"', '"') + return t + +# Constants for quote education. + +punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]""" +close_class = r"""[^\ \t\r\n\[\{\(\-]""" +dec_dashes = r"""–|—""" + +# Special case if the very first character is a quote +# followed by punctuation at a non-word-break. Close the quotes by brute force: +single_quote_start_re = re.compile(r"""^'(?=%s\\B)""" % (punct_class,)) +double_quote_start_re = re.compile(r"""^"(?=%s\\B)""" % (punct_class,)) + +# Special case for double sets of quotes, e.g.: +#

    He said, "'Quoted' words in a larger quote."

    +double_quote_sets_re = re.compile(r""""'(?=\w)""") +single_quote_sets_re = re.compile(r"""'"(?=\w)""") + +# Special case for decade abbreviations (the '80s): +decade_abbr_re = re.compile(r"""\b'(?=\d{2}s)""") + +# Get most opening double quotes: +opening_double_quotes_regex = re.compile(r""" + ( + \s | # a whitespace char, or +   | # a non-breaking space entity, or + -- | # dashes, or + &[mn]dash; | # named dash entities + %s | # or decimal entities + &\#x201[34]; # or hex + ) + " # the quote + (?=\w) # followed by a word character + """ % (dec_dashes,), re.VERBOSE) + +# Double closing quotes: +closing_double_quotes_regex = re.compile(r""" + #(%s)? # character that indicates the quote should be closing + " + (?=\s) + """ % (close_class,), re.VERBOSE) + +closing_double_quotes_regex_2 = re.compile(r""" + (%s) # character that indicates the quote should be closing + " + """ % (close_class,), re.VERBOSE) + +# Get most opening single quotes: +opening_single_quotes_regex = re.compile(r""" + ( + \s | # a whitespace char, or +   | # a non-breaking space entity, or + -- | # dashes, or + &[mn]dash; | # named dash entities + %s | # or decimal entities + &\#x201[34]; # or hex + ) + ' # the quote + (?=\w) # followed by a word character + """ % (dec_dashes,), re.VERBOSE) + +closing_single_quotes_regex = re.compile(r""" + (%s) + ' + (?!\s | s\b | \d) + """ % (close_class,), re.VERBOSE) + +closing_single_quotes_regex_2 = re.compile(r""" + (%s) + ' + (\s | s\b) + """ % (close_class,), re.VERBOSE) + +def educateQuotes(str): + """ + Parameter: String. + + Returns: The string, with "educated" curly quote HTML entities. + + Example input: "Isn't this fun?" + Example output: “Isn’t this fun?” + """ + + # Special case if the very first character is a quote + # followed by punctuation at a non-word-break. Close the quotes by brute force: + str = single_quote_start_re.sub("’", str) + str = double_quote_start_re.sub("”", str) + + # Special case for double sets of quotes, e.g.: + #

    He said, "'Quoted' words in a larger quote."

    + str = double_quote_sets_re.sub("“‘", str) + str = single_quote_sets_re.sub("‘“", str) + + # Special case for decade abbreviations (the '80s): + str = decade_abbr_re.sub("’", str) + + str = opening_single_quotes_regex.sub(r"\1‘", str) + str = closing_single_quotes_regex.sub(r"\1’", str) + str = closing_single_quotes_regex_2.sub(r"\1’\2", str) + + # Any remaining single quotes should be opening ones: + str = str.replace("'", "‘") + + str = opening_double_quotes_regex.sub(r"\1“", str) + str = closing_double_quotes_regex.sub(r"”", str) + str = closing_double_quotes_regex_2.sub(r"\1”", str) + + # Any remaining quotes should be opening ones. + str = str.replace('"', "“") + + return str + + +def educateBackticks(str): + """ + Parameter: String. + Returns: The string, with ``backticks'' -style double quotes + translated into HTML curly quote entities. + Example input: ``Isn't this fun?'' + Example output: “Isn't this fun?” + """ + return str.replace("``", "“").replace("''", "”") + + +def educateSingleBackticks(str): + """ + Parameter: String. + Returns: The string, with `backticks' -style single quotes + translated into HTML curly quote entities. + + Example input: `Isn't this fun?' + Example output: ‘Isn’t this fun?’ + """ + return str.replace('`', "‘").replace("'", "’") + + +def educateDashesOldSchool(str): + """ + Parameter: String. + + Returns: The string, with each instance of "--" translated to + an en-dash HTML entity, and each "---" translated to + an em-dash HTML entity. + """ + return str.replace('---', "—").replace('--', "–") + + +def educateDashesOldSchoolInverted(str): + """ + Parameter: String. + + Returns: The string, with each instance of "--" translated to + an em-dash HTML entity, and each "---" translated to + an en-dash HTML entity. Two reasons why: First, unlike the + en- and em-dash syntax supported by + EducateDashesOldSchool(), it's compatible with existing + entries written before SmartyPants 1.1, back when "--" was + only used for em-dashes. Second, em-dashes are more + common than en-dashes, and so it sort of makes sense that + the shortcut should be shorter to type. (Thanks to Aaron + Swartz for the idea.) + """ + return str.replace('---', "–").replace('--', "—") + + + +def educateEllipses(str): + """ + Parameter: String. + Returns: The string, with each instance of "..." translated to + an ellipsis HTML entity. + + Example input: Huh...? + Example output: Huh…? + """ + return str.replace('...', "…").replace('. . .', "…") + + +__author__ = "Chad Miller " +__version__ = "1.5_1.5: Sat, 13 Aug 2005 15:50:24 -0400" +__url__ = "http://wiki.chad.org/SmartyPantsPy" +__description__ = \ + "Smart-quotes, smart-ellipses, and smart-dashes for weblog entries in pyblosxom" diff --git a/sphinx/stemmer.py b/sphinx/stemmer.py new file mode 100644 index 000000000..9ba617cf1 --- /dev/null +++ b/sphinx/stemmer.py @@ -0,0 +1,344 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" + sphinx.stemmer + ~~~~~~~~~~~~~~ + + Porter Stemming Algorithm + + This is the Porter stemming algorithm, ported to Python from the + version coded up in ANSI C by the author. It may be be regarded + as canonical, in that it follows the algorithm presented in + + Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14, + no. 3, pp 130-137, + + only differing from it at the points maked --DEPARTURE-- below. + + See also http://www.tartarus.org/~martin/PorterStemmer + + The algorithm as described in the paper could be exactly replicated + by adjusting the points of DEPARTURE, but this is barely necessary, + because (a) the points of DEPARTURE are definitely improvements, and + (b) no encoding of the Porter stemmer I have seen is anything like + as exact as this version, even with the points of DEPARTURE! + + Release 1: January 2001 + + :copyright: 2001 by Vivake Gupta . + :license: Public Domain (?). +""" + +class PorterStemmer(object): + + def __init__(self): + """The main part of the stemming algorithm starts here. + b is a buffer holding a word to be stemmed. The letters are in b[k0], + b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is + readjusted downwards as the stemming progresses. Zero termination is + not in fact used in the algorithm. + + Note that only lower case sequences are stemmed. Forcing to lower case + should be done before stem(...) is called. + """ + + self.b = "" # buffer for word to be stemmed + self.k = 0 + self.k0 = 0 + self.j = 0 # j is a general offset into the string + + def cons(self, i): + """cons(i) is TRUE <=> b[i] is a consonant.""" + if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' \ + or self.b[i] == 'o' or self.b[i] == 'u': + return 0 + if self.b[i] == 'y': + if i == self.k0: + return 1 + else: + return (not self.cons(i - 1)) + return 1 + + def m(self): + """m() measures the number of consonant sequences between k0 and j. + if c is a consonant sequence and v a vowel sequence, and <..> + indicates arbitrary presence, + + gives 0 + vc gives 1 + vcvc gives 2 + vcvcvc gives 3 + .... + """ + n = 0 + i = self.k0 + while 1: + if i > self.j: + return n + if not self.cons(i): + break + i = i + 1 + i = i + 1 + while 1: + while 1: + if i > self.j: + return n + if self.cons(i): + break + i = i + 1 + i = i + 1 + n = n + 1 + while 1: + if i > self.j: + return n + if not self.cons(i): + break + i = i + 1 + i = i + 1 + + def vowelinstem(self): + """vowelinstem() is TRUE <=> k0,...j contains a vowel""" + for i in range(self.k0, self.j + 1): + if not self.cons(i): + return 1 + return 0 + + def doublec(self, j): + """doublec(j) is TRUE <=> j,(j-1) contain a double consonant.""" + if j < (self.k0 + 1): + return 0 + if (self.b[j] != self.b[j-1]): + return 0 + return self.cons(j) + + def cvc(self, i): + """cvc(i) is TRUE <=> i-2,i-1,i has the form consonant - vowel - consonant + and also if the second c is not w,x or y. this is used when trying to + restore an e at the end of a short e.g. + + cav(e), lov(e), hop(e), crim(e), but + snow, box, tray. + """ + if i < (self.k0 + 2) or not self.cons(i) or self.cons(i-1) or not self.cons(i-2): + return 0 + ch = self.b[i] + if ch == 'w' or ch == 'x' or ch == 'y': + return 0 + return 1 + + def ends(self, s): + """ends(s) is TRUE <=> k0,...k ends with the string s.""" + length = len(s) + if s[length - 1] != self.b[self.k]: # tiny speed-up + return 0 + if length > (self.k - self.k0 + 1): + return 0 + if self.b[self.k-length+1:self.k+1] != s: + return 0 + self.j = self.k - length + return 1 + + def setto(self, s): + """setto(s) sets (j+1),...k to the characters in the string s, readjusting k.""" + length = len(s) + self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:] + self.k = self.j + length + + def r(self, s): + """r(s) is used further down.""" + if self.m() > 0: + self.setto(s) + + def step1ab(self): + """step1ab() gets rid of plurals and -ed or -ing. e.g. + + caresses -> caress + ponies -> poni + ties -> ti + caress -> caress + cats -> cat + + feed -> feed + agreed -> agree + disabled -> disable + + matting -> mat + mating -> mate + meeting -> meet + milling -> mill + messing -> mess + + meetings -> meet + """ + if self.b[self.k] == 's': + if self.ends("sses"): + self.k = self.k - 2 + elif self.ends("ies"): + self.setto("i") + elif self.b[self.k - 1] != 's': + self.k = self.k - 1 + if self.ends("eed"): + if self.m() > 0: + self.k = self.k - 1 + elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem(): + self.k = self.j + if self.ends("at"): self.setto("ate") + elif self.ends("bl"): self.setto("ble") + elif self.ends("iz"): self.setto("ize") + elif self.doublec(self.k): + self.k = self.k - 1 + ch = self.b[self.k] + if ch == 'l' or ch == 's' or ch == 'z': + self.k = self.k + 1 + elif (self.m() == 1 and self.cvc(self.k)): + self.setto("e") + + def step1c(self): + """step1c() turns terminal y to i when there is another vowel in the stem.""" + if (self.ends("y") and self.vowelinstem()): + self.b = self.b[:self.k] + 'i' + self.b[self.k+1:] + + def step2(self): + """step2() maps double suffices to single ones. + so -ization ( = -ize plus -ation) maps to -ize etc. note that the + string before the suffix must give m() > 0. + """ + if self.b[self.k - 1] == 'a': + if self.ends("ational"): self.r("ate") + elif self.ends("tional"): self.r("tion") + elif self.b[self.k - 1] == 'c': + if self.ends("enci"): self.r("ence") + elif self.ends("anci"): self.r("ance") + elif self.b[self.k - 1] == 'e': + if self.ends("izer"): self.r("ize") + elif self.b[self.k - 1] == 'l': + if self.ends("bli"): self.r("ble") # --DEPARTURE-- + # To match the published algorithm, replace this phrase with + # if self.ends("abli"): self.r("able") + elif self.ends("alli"): self.r("al") + elif self.ends("entli"): self.r("ent") + elif self.ends("eli"): self.r("e") + elif self.ends("ousli"): self.r("ous") + elif self.b[self.k - 1] == 'o': + if self.ends("ization"): self.r("ize") + elif self.ends("ation"): self.r("ate") + elif self.ends("ator"): self.r("ate") + elif self.b[self.k - 1] == 's': + if self.ends("alism"): self.r("al") + elif self.ends("iveness"): self.r("ive") + elif self.ends("fulness"): self.r("ful") + elif self.ends("ousness"): self.r("ous") + elif self.b[self.k - 1] == 't': + if self.ends("aliti"): self.r("al") + elif self.ends("iviti"): self.r("ive") + elif self.ends("biliti"): self.r("ble") + elif self.b[self.k - 1] == 'g': # --DEPARTURE-- + if self.ends("logi"): self.r("log") + # To match the published algorithm, delete this phrase + + def step3(self): + """step3() dels with -ic-, -full, -ness etc. similar strategy to step2.""" + if self.b[self.k] == 'e': + if self.ends("icate"): self.r("ic") + elif self.ends("ative"): self.r("") + elif self.ends("alize"): self.r("al") + elif self.b[self.k] == 'i': + if self.ends("iciti"): self.r("ic") + elif self.b[self.k] == 'l': + if self.ends("ical"): self.r("ic") + elif self.ends("ful"): self.r("") + elif self.b[self.k] == 's': + if self.ends("ness"): self.r("") + + def step4(self): + """step4() takes off -ant, -ence etc., in context vcvc.""" + if self.b[self.k - 1] == 'a': + if self.ends("al"): pass + else: return + elif self.b[self.k - 1] == 'c': + if self.ends("ance"): pass + elif self.ends("ence"): pass + else: return + elif self.b[self.k - 1] == 'e': + if self.ends("er"): pass + else: return + elif self.b[self.k - 1] == 'i': + if self.ends("ic"): pass + else: return + elif self.b[self.k - 1] == 'l': + if self.ends("able"): pass + elif self.ends("ible"): pass + else: return + elif self.b[self.k - 1] == 'n': + if self.ends("ant"): pass + elif self.ends("ement"): pass + elif self.ends("ment"): pass + elif self.ends("ent"): pass + else: return + elif self.b[self.k - 1] == 'o': + if self.ends("ion") and (self.b[self.j] == 's' \ + or self.b[self.j] == 't'): pass + elif self.ends("ou"): pass + # takes care of -ous + else: return + elif self.b[self.k - 1] == 's': + if self.ends("ism"): pass + else: return + elif self.b[self.k - 1] == 't': + if self.ends("ate"): pass + elif self.ends("iti"): pass + else: return + elif self.b[self.k - 1] == 'u': + if self.ends("ous"): pass + else: return + elif self.b[self.k - 1] == 'v': + if self.ends("ive"): pass + else: return + elif self.b[self.k - 1] == 'z': + if self.ends("ize"): pass + else: return + else: + return + if self.m() > 1: + self.k = self.j + + def step5(self): + """step5() removes a final -e if m() > 1, and changes -ll to -l if + m() > 1. + """ + self.j = self.k + if self.b[self.k] == 'e': + a = self.m() + if a > 1 or (a == 1 and not self.cvc(self.k-1)): + self.k = self.k - 1 + if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1: + self.k = self.k -1 + + def stem(self, p, i, j): + """In stem(p,i,j), p is a char pointer, and the string to be stemmed + is from p[i] to p[j] inclusive. Typically i is zero and j is the + offset to the last character of a string, (p[j+1] == '\0'). The + stemmer adjusts the characters p[i] ... p[j] and returns the new + end-point of the string, k. Stemming never increases word length, so + i <= k <= j. To turn the stemmer into a module, declare 'stem' as + extern, and delete the remainder of this file. + """ + # copy the parameters into statics + self.b = p + self.k = j + self.k0 = i + if self.k <= self.k0 + 1: + return self.b # --DEPARTURE-- + + # With this line, strings of length 1 or 2 don't go through the + # stemming process, although no mention is made of this in the + # published algorithm. Remove the line to match the published + # algorithm. + + self.step1ab() + self.step1c() + self.step2() + self.step3() + self.step4() + self.step5() + return self.b[self.k0:self.k+1] diff --git a/sphinx/style/admin.css b/sphinx/style/admin.css new file mode 100644 index 000000000..a25b77fa1 --- /dev/null +++ b/sphinx/style/admin.css @@ -0,0 +1,162 @@ +/** + * Sphinx Admin Panel + */ + +div.admin { + margin: 0 -20px -30px -20px; + padding: 0 20px 10px 20px; + background-color: #f2f2f2; + color: black; +} + +div.admin a { + color: #333; + text-decoration: underline; +} + +div.admin a:hover { + color: black; +} + +div.admin h1, +div.admin h2 { + background-color: #555; + border-bottom: 1px solid #222; + color: white; +} + +div.admin form form { + display: inline; +} + +div.admin input, div.admin textarea { + font-family: 'Bitstream Vera Sans', 'Arial', sans-serif; + font-size: 13px; + color: #333; + padding: 2px; + background-color: #fff; + border: 1px solid #aaa; +} + +div.admin input[type="reset"], +div.admin input[type="submit"] { + cursor: pointer; + font-weight: bold; + padding: 2px; +} + +div.admin input[type="reset"]:hover, +div.admin input[type="submit"]:hover { + border: 1px solid #333; +} + +div.admin div.actions { + margin: 10px 0 0 0; + padding: 5px; + background-color: #aaa; + border: 1px solid #777; +} + +div.admin div.error { + margin: 10px 0 0 0; + padding: 5px; + border: 2px solid #222; + background-color: #ccc; + font-weight: bold; +} + +div.admin div.dialog { + background-color: #ccc; + margin: 10px 0 10px 0; +} + +div.admin div.dialog h2 { + margin: 0; + font-size: 18px; + padding: 4px 10px 4px 10px; +} + +div.admin div.dialog div.text { + padding: 10px; +} + +div.admin div.dialog div.buttons { + padding: 5px 10px 5px 10px; +} + +div.admin table.mapping { + width: 100%; + border: 1px solid #999; + border-collapse: collapse; + background-color: #aaa; +} + +div.admin table.mapping th { + background-color: #ddd; + border-bottom: 1px solid #888; + padding: 5px; +} + +div.admin table.mapping th.recent_comments { + background-color: #c5cba4; +} + +div.admin table.mapping, +div.admin table.mapping a { + color: black; +} + +div.admin table.mapping td { + border: 1px solid #888; + border-left: none; + border-right: none; + text-align: left; + line-height: 24px; + padding: 0 5px 0 5px; +} + +div.admin table.mapping tr:hover { + background-color: #888; +} + +div.admin table.mapping td.username { + width: 180px; +} + +div.admin table.mapping td.pub_date { + font-style: italic; + text-align: right; +} + +div.admin table.mapping td.groups input { + width: 100%; +} + +div.admin table.mapping td.actions input { + padding: 0; +} + +div.admin table.mapping .actions { + text-align: right; + width: 70px; +} + +div.admin table.mapping span.meta { + font-size: 11px; + color: #222; +} + +div.admin table.mapping span.meta a { + color: #222; +} + +div.admin div.detail_form dt { + clear: both; + float: left; + width: 110px; +} + +div.admin div.detail_form textarea { + width: 98%; + height: 160px; +} diff --git a/sphinx/style/comment.png b/sphinx/style/comment.png new file mode 100644 index 000000000..5219131f2 Binary files /dev/null and b/sphinx/style/comment.png differ diff --git a/sphinx/style/default.css b/sphinx/style/default.css new file mode 100644 index 000000000..b9e2c9202 --- /dev/null +++ b/sphinx/style/default.css @@ -0,0 +1,764 @@ +/** + * Python Doc Design + */ + +body { + font-family: 'Bitstream Vera Sans', 'Arial', sans-serif; + font-size: 13px; + background-color: #11303d; + color: #000; + margin: 0; + padding: 0; +} + +/* :::: LAYOUT :::: */ + +div.document { + background-color: #1c4e63; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 230px; +} + +div.body { + background-color: white; + padding: 0 20px 30px 20px; +} + +div.sidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sidebar { + float: left; + width: 230px; + margin-left: -100%; +} + +div.clearer { + clear: both; +} + +div.footer { + color: #fff; + width: 100%; + padding: 9px 0 9px 0; + text-align: center; +} + +div.footer a { + color: #fff; + text-decoration: underline; +} + +div.related { + background-color: #133f52; + color: #fff; + width: 100%; + height: 30px; + line-height: 30px; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +div.related a { + color: white; +} + +/* ::: TOC :::: */ +div.sidebar h3 { + font-family: 'Trebuchet MS', sans-serif; + color: white; + font-size: 24px; + font-weight: normal; + margin: 0; + padding: 0; +} + +div.sidebar h4 { + font-family: 'Trebuchet MS', sans-serif; + color: white; + font-size: 16px; + font-weight: normal; + margin: 5px 0 0 0; + padding: 0; +} + +div.sidebar p { + color: white; +} + +div.sidebar p.topless { + margin: 5px 10px 10px 10px; +} + +div.sidebar ul { + margin: 10px; + padding: 0; + list-style: none; + color: white; +} + +div.sidebar ul ul, +div.sidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sidebar a { + color: #98dbcc; +} + +div.sidebar form { + margin-top: 10px; +} + +div.sidebar input { + border: 1px solid #98dbcc; + font-family: 'Bitstream Vera Sans', 'Arial', sans-serif; + font-size: 1em; +} + +/* :::: MODULE CLOUD :::: */ +div.modulecloud { + margin: -5px 10px 5px 10px; + padding: 10px; + font-size: 110%; + line-height: 160%; + border: 1px solid #cbe7e5; + background-color: #f2fbfd; +} + +div.modulecloud a { + padding: 0 5px 0 5px; +} + +/* :::: SEARCH :::: */ +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* :::: COMMON FORM STYLES :::: */ + +div.actions { + padding: 5px 10px 5px 10px; + border-top: 1px solid #cbe7e5; + border-bottom: 1px solid #cbe7e5; + background-color: #e0f6f4; +} + +form dl { + color: #333; +} + +form dt { + clear: both; + float: left; + min-width: 110px; + margin-right: 10px; + padding-top: 2px; +} + +input#homepage { + display: none; +} + +div.error { + margin: 5px 20px 0 0; + padding: 5px; + border: 1px solid #d00; + font-weight: bold; +} + +/* :::: INLINE COMMENTS :::: */ + +div.inlinecomments { + position: absolute; + right: 20px; +} + +div.inlinecomments a.bubble { + display: block; + float: right; + background-image: url(style/comment.png); + background-repeat: no-repeat; + width: 25px; + height: 25px; + text-align: center; + padding-top: 3px; + font-size: 12px; + line-height: 14px; + font-weight: bold; + color: black; +} + +div.inlinecomments a.bubble span { + display: none; +} + +div.inlinecomments a.emptybubble { + background-image: url(style/nocomment.png); +} + +div.inlinecomments a.bubble:hover { + background-image: url(style/hovercomment.png); + text-decoration: none; + color: #3ca0a4; +} + +div.inlinecomments div.comments { + float: right; + margin: 25px 5px 0 0; + max-width: 50em; + min-width: 30em; + border: 1px solid #2eabb0; + background-color: #f2fbfd; + z-index: 150; +} + +div#comments { + border: 1px solid #2eabb0; +} + +div#comments div.nocomments { + padding: 10px; + font-weight: bold; +} + +div.inlinecomments div.comments h3, +div#comments h3 { + margin: 0; + padding: 0; + background-color: #2eabb0; + color: white; + border: none; + padding: 3px; +} + +div.inlinecomments div.comments div.actions { + padding: 4px; + margin: 0; + border-top: none; +} + +div#comments div.comment { + margin: 10px; + border: 1px solid #2eabb0; +} + +div.inlinecomments div.comment h4, +div.commentwindow div.comment h4, +div#comments div.comment h4 { + margin: 10px 0 0 0; + background-color: #2eabb0; + color: white; + border: none; + padding: 1px 4px 1px 4px; +} + +div#comments div.comment h4 { + margin: 0; +} + +div#comments div.comment h4 a { + color: #d5f4f4; +} + +div.inlinecomments div.comment div.text, +div.commentwindow div.comment div.text, +div#comments div.comment div.text { + margin: -5px 0 -5px 0; + padding: 0 10px 0 10px; +} + +div.inlinecomments div.comment div.meta, +div.commentwindow div.comment div.meta, +div#comments div.comment div.meta { + text-align: right; + padding: 2px 10px 2px 0; + font-size: 95%; + color: #538893; + border-top: 1px solid #cbe7e5; + background-color: #e0f6f4; +} + +div.commentwindow { + position: absolute; + width: 500px; + border: 1px solid #cbe7e5; + background-color: #f2fbfd; + display: none; + z-index: 130; +} + +div.commentwindow h3 { + margin: 0; + background-color: #2eabb0; + color: white; + border: none; + padding: 5px; + font-size: 22px; + cursor: pointer; +} + +div.commentwindow div.actions { + margin: 10px -10px 0 -10px; + padding: 4px 10px 4px 10px; + color: #538893; +} + +div.commentwindow div.actions input { + border: 1px solid #2eabb0; + background-color: white; + color: #135355; + cursor: pointer; +} + +div.commentwindow div.form { + padding: 0 10px 0 10px; +} + +div.commentwindow div.form input, +div.commentwindow div.form textarea { + border: 1px solid #3c9ea2; + background-color: white; + color: black; +} + +div.commentwindow div.error { + margin: 10px 5px 10px 5px; + background-color: #fbe5dc; + display: none; +} + +div.commentwindow div.form textarea { + width: 99%; +} + +div.commentwindow div.preview { + margin: 10px 0 10px 0; + background-color: #70d0d4; + padding: 0 1px 1px 25px; +} + +div.commentwindow div.preview h4 { + margin: 0 0 -5px -20px; + padding: 4px 0 0 4px; + color: white; + font-size: 18px; +} + +div.commentwindow div.preview div.comment { + background-color: #f2fbfd; +} + +div.commentwindow div.preview div.comment h4 { + margin: 10px 0 0 0!important; + padding: 1px 4px 1px 4px!important; + font-size: 16px; +} + +/* :::: SUGGEST CHANGES :::: */ +div#suggest-changes-box input, div#suggest-changes-box textarea { + border: 1px solid #ccc; + background-color: white; + color: black; +} + +div#suggest-changes-box textarea { + width: 99%; + height: 400px; +} + + +/* :::: PREVIEW :::: */ +div.preview { + background-image: url(style/preview.png); + padding: 0 20px 20px 20px; + margin-bottom: 30px; +} + + +/* :::: INDEX PAGE :::: */ + +table.contentstable { + width: 90%; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.5em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; +} + +/* :::: INDEX STYLES :::: */ + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable dl, table.indextable dd { + margin-top: 0; + margin-bottom: 0; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +form.pfform { + margin: 10px 0 20px 0; +} + +/* :::: GLOBAL STYLES :::: */ + +.docwarning { + background-color: #ffe4e4; + padding: 10px; + margin: 0 -20px 0 -20px; + border-bottom: 1px solid #f66; +} + +p.subhead { + font-weight: bold; + margin-top: 20px; +} + +a { + color: #355f7c; + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: 'Trebuchet MS', sans-serif; + background-color: #f2f2f2; + font-weight: normal; + color: #20435c; + border-bottom: 1px solid #ccc; + margin: 20px -20px 10px -20px; + padding: 3px 0 3px 10px; +} + +div.body h1 { margin-top: 0; font-size: 30px; } +div.body h2 { font-size: 25px; } +div.body h3 { font-size: 21px; } +div.body h4 { font-size: 18px; } +div.body h5 { font-size: 14px; } +div.body h6 { font-size: 12px; } + +a.headerlink, +a.headerlink, +a.headerlink, +a.headerlink, +a.headerlink, +a.headerlink { + color: #c60f0f; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; + visibility: hidden; +} + +*:hover > a.headerlink, +*:hover > a.headerlink, +*:hover > a.headerlink, +*:hover > a.headerlink, +*:hover > a.headerlink, +*:hover > a.headerlink { + visibility: visible; +} + +a.headerlink:hover, +a.headerlink:hover, +a.headerlink:hover, +a.headerlink:hover, +a.headerlink:hover, +a.headerlink:hover { + background-color: #c60f0f; + color: white; +} + +div.body p, div.body dd, div.body li { + text-align: justify; + line-height: 130%; +} + +div.body td { + text-align: left; +} + +ul.fakelist { + list-style: none; + margin: 10px 0 10px 20px; + padding: 0; +} + +/* "Footnotes" heading */ +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +/* Admonitions */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 10px 10px 0px 10px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dd { + margin-bottom: 10px; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.warning { + background-color: #ffe4e4; + border: 1px solid #f66; +} + +div.note { + background-color: #eee; + border: 1px solid #ccc; +} + +p.admonition-title { + margin: 0px 0px 5px 0px; + font-weight: bold; + font-size: 1.1em; +} + +table.docutils { + border: 0; +} + +table.docutils td, table.docutils th { + margin: 2px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.field-list td, table.field-list th { + border: 0 !important; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +dl { + margin-bottom: 15px; + clear: both; +} + +dd p { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.refcount { + color: #060; +} + +dt:target, +.highlight { + background-color: #fbe54e; +} + +th { + text-align: left; + padding-right: 5px; +} + +pre { + font-family: 'Bitstream Vera Sans Mono', monospace; + padding: 5px; + background-color: #efc; + color: #333; + border: 1px solid #ac9; + border-left: none; + border-right: none; +} + +tt { + font-family: 'Bitstream Vera Sans Mono', monospace; + background-color: #ecf0f3; + padding: 1px; +} + +tt.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +tt.descclassname { + background-color: transparent; +} + +tt.xref, a tt { + background-color: transparent; + font-weight: bold; +} + +.footnote:target { background-color: #ffa } + +h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.versionmodified { + font-style: italic; +} + +form.comment { + margin: 0; + padding: 10px 30px 10px 30px; + background-color: #eee; +} + +form.comment h3 { + background-color: #326591; + color: white; + margin: -10px -30px 10px -30px; + padding: 5px; + font-size: 1.4em; +} + +form.comment input, +form.comment textarea { + border: 1px solid #ccc; + padding: 2px; + font-family: 'Bitstream Vera Sans', 'Verdana', sans-serif; + font-size: 13px; +} + +form.comment input[type="text"] { + width: 240px; +} + +form.comment textarea { + width: 100%; + height: 200px; + margin-bottom: 10px; +} + +/* :::: PRINT :::: */ +@media print { + div.documentwrapper { + width: 100%; + } + + div.body { + margin: 0; + } + + div.sidebar, + div.related, + div.footer, + div#comments div.new-comment-box, + #top-link { + display: none; + } +} diff --git a/sphinx/style/doctools.js b/sphinx/style/doctools.js new file mode 100644 index 000000000..3945713f5 --- /dev/null +++ b/sphinx/style/doctools.js @@ -0,0 +1,349 @@ +/// XXX: make it cross browser + +/** + * make the code below compatible with browsers without + * an installed firebug like debugger + */ +if (!window.console || !console.firebug) { + var names = ["log", "debug", "info", "warn", "error", "assert", "dir", "dirxml", + "group", "groupEnd", "time", "timeEnd", "count", "trace", "profile", "profileEnd"]; + window.console = {}; + for (var i = 0; i < names.length; ++i) + window.console[names[i]] = function() {} +} + +/** + * small helper function to urldecode strings + */ +jQuery.urldecode = function(x) { + return decodeURIComponent(x).replace(/\+/g, ' '); +} + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s == 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +} + +/** + * small function to check if an array contains + * a given item. + */ +jQuery.contains = function(arr, item) { + for (var i = 0; i < arr.length; i++) { + if (arr[i] == item) + return true; + } + return false; +} + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node) { + if (node.nodeType == 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && !jQuery.className.has(node.parentNode, className)) { + var span = document.createElement("span"); + span.className = className; + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this) + }); + } + } + return this.each(function() { + highlight(this); + }); +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.addContextElements(); + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initModIndex(); + this.initComments(); + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + for (var i = 1; i <= 6; i++) { + $('h' + i + '[@id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', 'Permalink to this headline'). + appendTo(this); + }); + } + $('dt[@id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', 'Permalink to this definition'). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash && $.browser.mozilla) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlight'); + }); + }, 10); + $('
  • ') + .appendTo($('.sidebar .this-page-menu')); + } + }, + + /** + * init the modindex toggle buttons + */ + initModIndex : function() { + $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + console.log($('tr.cg-' + idnum).toggle()); + if (src.substr(-9) == 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', '').click(); + }, + + /** + * init the inline comments + */ + initComments : function() { + $('.inlinecomments div.actions').each(function() { + this.innerHTML += ' | '; + $(this).append($('hide comments').click(function() { + $(this).parent().parent().toggle(); + return false; + })); + }); + $('.inlinecomments .comments').hide(); + $('.inlinecomments a.bubble').each(function() { + $(this).click($(this).is('.emptybubble') ? function() { + var params = $.getQueryParameters(this.href); + Documentation.newComment(params.target[0]); + return false; + } : function() { + $('.comments', $(this).parent().parent()[0]).toggle(); + return false; + }); + }); + $('#comments div.actions a.newcomment').click(function() { + Documentation.newComment(); + return false; + }); + if (document.location.hash.match(/^#comment-/)) + $('.inlinecomments .comments ' + document.location.hash) + .parent().toggle(); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('.sidebar .this-page-menu li.highlight-link').fadeOut(300); + $('span.highlight').removeClass('highlight'); + }, + + /** + * show the comment window for a certain id or the whole page. + */ + newComment : function(id) { + Documentation.CommentWindow.openFor(id || ''); + }, + + /** + * write a new comment from within a comment view box + */ + newCommentFromBox : function(link) { + var params = $.getQueryParameters(link.href); + $(link).parent().parent().fadeOut('slow'); + this.newComment(params.target); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this == '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + /** + * class that represents the comment window + */ + CommentWindow : (function() { + var openWindows = {}; + + var Window = function(sectionID) { + this.url = Documentation.makeURL('@comments/' + Documentation.getCurrentURL() + + '/?target=' + $.urlencode(sectionID) + '&mode=ajax'); + this.sectionID = sectionID; + + this.root = $('
    '); + this.root.appendTo($('body')); + this.title = $('

    New Comment

    ').appendTo(this.root); + this.body = $('
    please wait...
    ').appendTo(this.root); + this.resizeHandle = $('
    ').appendTo(this.root); + + this.root.Draggable({ + handle: this.title[0], + }); + + this.root.css({ + left: window.innerWidth / 2 - $(this.root).width() / 2, + top: window.scrollY + (window.innerHeight / 2 - 150) + }); + this.root.fadeIn('slow'); + this.updateView(); + }; + + Window.prototype.updateView = function(data) { + var self = this; + function update(data) { + if (data.posted) { + document.location.hash = '#comment-' + data.commentID; + document.location.reload(); + } + else { + self.body.html(data.body); + $('div.actions', self.body).append($('') + .attr('type', 'button') + .attr('value', 'Close') + .click(function() { self.close(); }) + ); + $('div.actions input[@name="preview"]') + .attr('type', 'button') + .click(function() { self.submitForm($('form', self.body)[0], true); }); + $('form', self.body).bind("submit", function() { + self.submitForm(this); + return false; + }); + + if (data.error) { + self.root.Highlight(1000, '#aadee1'); + $('div.error', self.root).slideDown(500); + } + } + } + + if (typeof data == 'undefined') + $.getJSON(this.url, function(json) { update(json); }); + else + $.ajax({ + url: this.url, + type: 'POST', + dataType: 'json', + data: data, + success: function(json) { update(json); } + }); + } + + Window.prototype.getFormValue = function(name) { + return $('*[@name="' + name + '"]', this.body)[0].value; + } + + Window.prototype.submitForm = function(form, previewMode) { + this.updateView({ + author: form.author.value, + author_mail: form.author_mail.value, + title: form.title.value, + comment_body: form.comment_body.value, + preview: previewMode ? 'yes' : '' + }); + } + + Window.prototype.close = function() { + var self = this; + delete openWindows[this.sectionID]; + this.root.fadeOut('slow', function() { + self.root.remove(); + }); + } + + Window.openFor = function(sectionID) { + if (sectionID in openWindows) + return openWindows[sectionID]; + return new Window(sectionID); + } + + return Window; + })() +}; + + +$(document).ready(function() { + Documentation.init(); +}); diff --git a/sphinx/style/file.png b/sphinx/style/file.png new file mode 100644 index 000000000..d18082e39 Binary files /dev/null and b/sphinx/style/file.png differ diff --git a/sphinx/style/hovercomment.png b/sphinx/style/hovercomment.png new file mode 100644 index 000000000..5f2461f80 Binary files /dev/null and b/sphinx/style/hovercomment.png differ diff --git a/sphinx/style/interface.js b/sphinx/style/interface.js new file mode 100644 index 000000000..91a0d4f65 --- /dev/null +++ b/sphinx/style/interface.js @@ -0,0 +1,8 @@ +/* + * Interface elements for jQuery - http://interface.eyecon.ro + * + * Copyright (c) 2006 Stefan Petre + * Dual licensed under the MIT (MIT-LICENSE.txt) + * and GPL (GPL-LICENSE.txt) licenses. + */ + eval(function(p,a,c,k,e,d){e=function(c){return(c35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}('k.1a={2R:u(e){D x=0;D y=0;D 5H=I;D es=e.18;if(k(e).B(\'19\')==\'1n\'){62=es.3j;9C=es.Y;es.3j=\'2O\';es.19=\'2E\';es.Y=\'1O\';5H=1b}D el=e;7o(el){x+=el.8n+(el.4Y&&!k.3h.7N?T(el.4Y.5a)||0:0);y+=el.8t+(el.4Y&&!k.3h.7N?T(el.4Y.4Z)||0:0);el=el.dr}el=e;7o(el&&el.4S&&el.4S.5Z()!=\'2e\'){x-=el.3g||0;y-=el.2V||0;el=el.3e}if(5H){es.19=\'1n\';es.Y=9C;es.3j=62}E{x:x,y:y}},bN:u(el){D x=0,y=0;7o(el){x+=el.8n||0;y+=el.8t||0;el=el.dr}E{x:x,y:y}},2p:u(e){D w=k.B(e,\'Z\');D h=k.B(e,\'V\');D 1D=0;D hb=0;D es=e.18;if(k(e).B(\'19\')!=\'1n\'){1D=e.4b;hb=e.63}P{62=es.3j;9C=es.Y;es.3j=\'2O\';es.19=\'2E\';es.Y=\'1O\';1D=e.4b;hb=e.63;es.19=\'1n\';es.Y=9C;es.3j=62}E{w:w,h:h,1D:1D,hb:hb}},82:u(el){E{1D:el.4b||0,hb:el.63||0}},bq:u(e){D h,w,de;if(e){w=e.8k;h=e.8z}P{de=1j.4J;w=1V.d0||9B.d0||(de&&de.8k)||1j.2e.8k;h=1V.d1||9B.d1||(de&&de.8z)||1j.2e.8z}E{w:w,h:h}},6W:u(e){D t,l,w,h,iw,ih;if(e&&e.9A.5Z()!=\'2e\'){t=e.2V;l=e.3g;w=e.cY;h=e.cW;iw=0;ih=0}P{if(1j.4J&&1j.4J.2V){t=1j.4J.2V;l=1j.4J.3g;w=1j.4J.cY;h=1j.4J.cW}P if(1j.2e){t=1j.2e.2V;l=1j.2e.3g;w=1j.2e.cY;h=1j.2e.cW}iw=9B.d0||1j.4J.8k||1j.2e.8k||0;ih=9B.d1||1j.4J.8z||1j.2e.8z||0}E{t:t,l:l,w:w,h:h,iw:iw,ih:ih}},c8:u(e,7C){D el=k(e);D t=el.B(\'5o\')||\'\';D r=el.B(\'5p\')||\'\';D b=el.B(\'5m\')||\'\';D l=el.B(\'5k\')||\'\';if(7C)E{t:T(t)||0,r:T(r)||0,b:T(b)||0,l:T(l)};P E{t:t,r:r,b:b,l:l}},aj:u(e,7C){D el=k(e);D t=el.B(\'66\')||\'\';D r=el.B(\'6j\')||\'\';D b=el.B(\'5M\')||\'\';D l=el.B(\'4X\')||\'\';if(7C)E{t:T(t)||0,r:T(r)||0,b:T(b)||0,l:T(l)};P E{t:t,r:r,b:b,l:l}},6h:u(e,7C){D el=k(e);D t=el.B(\'4Z\')||\'\';D r=el.B(\'6k\')||\'\';D b=el.B(\'6g\')||\'\';D l=el.B(\'5a\')||\'\';if(7C)E{t:T(t)||0,r:T(r)||0,b:T(b)||0,l:T(l)||0};P E{t:t,r:r,b:b,l:l}},44:u(2l){D x=2l.hI||(2l.hK+(1j.4J.3g||1j.2e.3g))||0;D y=2l.hL||(2l.hM+(1j.4J.2V||1j.2e.2V))||0;E{x:x,y:y}},cS:u(54,cT){cT(54);54=54.77;7o(54){k.1a.cS(54,cT);54=54.hU}},i1:u(54){k.1a.cS(54,u(el){1Y(D 1p in el){if(2h el[1p]===\'u\'){el[1p]=U}}})},i3:u(el,1N){D 5C=$.1a.6W();D d3=$.1a.2p(el);if(!1N||1N==\'4i\')$(el).B({Q:5C.t+((14.3v(5C.h,5C.ih)-5C.t-d3.hb)/2)+\'S\'});if(!1N||1N==\'4a\')$(el).B({O:5C.l+((14.3v(5C.w,5C.iw)-5C.l-d3.1D)/2)+\'S\'})},i0:u(el,dP){D 1Q=$(\'1U[@2M*="95"]\',el||1j),95;1Q.1B(u(){95=q.2M;q.2M=dP;q.18.69="aw:ax.ay.hZ(2M=\'"+95+"\')"})}};[].3F||(7b.hV.3F=u(v,n){n=(n==U)?0:n;D m=q.1h;1Y(D i=n;i-cd?r:(2m(k.B(2i,2z))||0);49=49==\'3Y\'?(7M==\'1n\'?\'22\':\'2G\'):49;M[49]=1b;2k[2z]=49==\'22\'?[0,2i.6u[2z]]:[2i.6u[2z],0];if(2z!=\'1J\')y[2z]=2k[2z][0]+(2z!=\'3B\'&&2z!=\'8h\'?\'S\':\'\');P k.1p(y,"1J",2k[2z][0])}P{2k[2z]=[2m(k.3M(2i,2z)),2m(49)||0]}}P if(k.fx.d9[2z])2k[2z]=[k.fx.6H(k.3M(2i,2z)),k.fx.6H(49)];P if(/^6X$|92$|2B$|9I$|cD$/i.43(2z)){D m=49.4v(/\\s+/g,\' \').4v(/7K\\s*\\(\\s*/g,\'7K(\').4v(/\\s*,\\s*/g,\',\').4v(/\\s*\\)/g,\')\').bU(/([^\\s]+)/g);3m(2z){1e\'6X\':1e\'92\':1e\'cD\':1e\'9I\':m[3]=m[3]||m[1]||m[0];m[2]=m[2]||m[0];m[1]=m[1]||m[0];1Y(D i=0;iM.1m+z.9x){6c(z.2H);z.2H=U;1Y(p in 2k){if(p=="1J")k.1p(y,"1J",2k[p][1]);P if(2h 2k[p][1]==\'8i\')y[p]=\'7K(\'+2k[p][1].r+\',\'+2k[p][1].g+\',\'+2k[p][1].b+\')\';P y[p]=2k[p][1]+(p!=\'3B\'&&p!=\'8h\'?\'S\':\'\')}if(M.2G||M.22)1Y(D p in 2i.6u)if(p=="1J")k.1p(y,p,2i.6u[p]);P y[p]="";y.19=M.2G?\'1n\':(7M!=\'1n\'?7M:\'2E\');y.2Y=eH;2i.5R=U;if(k.eI(M.23))M.23.1F(2i)}P{D n=t-q.9x;D 8x=n/M.1m;1Y(p in 2k){if(2h 2k[p][1]==\'8i\'){y[p]=\'7K(\'+T(k.G[M.G](8x,n,2k[p][0].r,(2k[p][1].r-2k[p][0].r),M.1m))+\',\'+T(k.G[M.G](8x,n,2k[p][0].g,(2k[p][1].g-2k[p][0].g),M.1m))+\',\'+T(k.G[M.G](8x,n,2k[p][0].b,(2k[p][1].b-2k[p][0].b),M.1m))+\')\'}P{D cG=k.G[M.G](8x,n,2k[p][0],(2k[p][1]-2k[p][0]),M.1m);if(p=="1J")k.1p(y,"1J",cG);P y[p]=cG+(p!=\'3B\'&&p!=\'8h\'?\'S\':\'\')}}}};z.2H=6I(u(){z.2D()},13);2i.5R=z},cv:u(2i,2D){if(2D)2i.5R.9x-=kM;P{1V.6c(2i.5R.2H);2i.5R=U;k.2L(2i,"fx")}}});k.cu=u(5S){D 5u={};if(2h 5S==\'5g\'){5S=5S.5Z().7h(\';\');1Y(D i=0;i<5S.1h;i++){7H=5S[i].7h(\':\');if(7H.1h==2){5u[k.eP(7H[0].4v(/\\-(\\w)/g,u(m,c){E c.kn()}))]=k.eP(7H[1])}}}E 5u};k.12={1c:U,F:U,58:u(){E q.1B(u(){if(q.9q){q.A.5e.3p(\'5b\',k.12.cU);q.A=U;q.9q=I;if(k.3h.4I){q.d4="fQ"}P{q.18.kk=\'\';q.18.ej=\'\';q.18.e6=\'\'}}})},cU:u(e){if(k.12.F!=U){k.12.9w(e);E I}D C=q.3Z;k(1j).1H(\'3H\',k.12.d6).1H(\'61\',k.12.9w);C.A.1s=k.1a.44(e);C.A.4t=C.A.1s;C.A.7W=I;C.A.ki=q!=q.3Z;k.12.F=C;if(C.A.5i&&q!=q.3Z){ce=k.1a.2R(C.3e);cf=k.1a.2p(C);cg={x:T(k.B(C,\'O\'))||0,y:T(k.B(C,\'Q\'))||0};dx=C.A.4t.x-ce.x-cf.1D/2-cg.x;dy=C.A.4t.y-ce.y-cf.hb/2-cg.y;k.3d.59(C,[dx,dy])}E k.7Z||I},dT:u(e){D C=k.12.F;C.A.7W=1b;D 9p=C.18;C.A.7i=k.B(C,\'19\');C.A.4m=k.B(C,\'Y\');if(!C.A.c4)C.A.c4=C.A.4m;C.A.2c={x:T(k.B(C,\'O\'))||0,y:T(k.B(C,\'Q\'))||0};C.A.9l=0;C.A.9m=0;if(k.3h.4I){D cl=k.1a.6h(C,1b);C.A.9l=cl.l||0;C.A.9m=cl.t||0}C.A.1C=k.21(k.1a.2R(C),k.1a.2p(C));if(C.A.4m!=\'2y\'&&C.A.4m!=\'1O\'){9p.Y=\'2y\'}k.12.1c.5t();D 5s=C.dn(1b);k(5s).B({19:\'2E\',O:\'3c\',Q:\'3c\'});5s.18.5o=\'0\';5s.18.5p=\'0\';5s.18.5m=\'0\';5s.18.5k=\'0\';k.12.1c.1R(5s);D 3X=k.12.1c.K(0).18;if(C.A.cO){3X.Z=\'ao\';3X.V=\'ao\'}P{3X.V=C.A.1C.hb+\'S\';3X.Z=C.A.1C.1D+\'S\'}3X.19=\'2E\';3X.5o=\'3c\';3X.5p=\'3c\';3X.5m=\'3c\';3X.5k=\'3c\';k.21(C.A.1C,k.1a.2p(5s));if(C.A.2S){if(C.A.2S.O){C.A.2c.x+=C.A.1s.x-C.A.1C.x-C.A.2S.O;C.A.1C.x=C.A.1s.x-C.A.2S.O}if(C.A.2S.Q){C.A.2c.y+=C.A.1s.y-C.A.1C.y-C.A.2S.Q;C.A.1C.y=C.A.1s.y-C.A.2S.Q}if(C.A.2S.2N){C.A.2c.x+=C.A.1s.x-C.A.1C.x-C.A.1C.hb+C.A.2S.2N;C.A.1C.x=C.A.1s.x-C.A.1C.1D+C.A.2S.2N}if(C.A.2S.4l){C.A.2c.y+=C.A.1s.y-C.A.1C.y-C.A.1C.hb+C.A.2S.4l;C.A.1C.y=C.A.1s.y-C.A.1C.hb+C.A.2S.4l}}C.A.2x=C.A.2c.x;C.A.2r=C.A.2c.y;if(C.A.8g||C.A.2o==\'96\'){89=k.1a.6h(C.3e,1b);C.A.1C.x=C.8n+(k.3h.4I?0:k.3h.7N?-89.l:89.l);C.A.1C.y=C.8t+(k.3h.4I?0:k.3h.7N?-89.t:89.t);k(C.3e).1R(k.12.1c.K(0))}if(C.A.2o){k.12.bP(C);C.A.5J.2o=k.12.bH}if(C.A.5i){k.3d.bO(C)}3X.O=C.A.1C.x-C.A.9l+\'S\';3X.Q=C.A.1C.y-C.A.9m+\'S\';3X.Z=C.A.1C.1D+\'S\';3X.V=C.A.1C.hb+\'S\';k.12.F.A.9n=I;if(C.A.gx){C.A.5J.67=k.12.bI}if(C.A.3B!=I){k.12.1c.B(\'3B\',C.A.3B)}if(C.A.1J){k.12.1c.B(\'1J\',C.A.1J);if(1V.7a){k.12.1c.B(\'69\',\'9V(1J=\'+C.A.1J*2b+\')\')}}if(C.A.7w){k.12.1c.2Z(C.A.7w);k.12.1c.K(0).77.18.19=\'1n\'}if(C.A.4A)C.A.4A.1F(C,[5s,C.A.2c.x,C.A.2c.y]);if(k.1x&&k.1x.8W>0){k.1x.ea(C)}if(C.A.4j==I){9p.19=\'1n\'}E I},bP:u(C){if(C.A.2o.1K==b5){if(C.A.2o==\'96\'){C.A.24=k.21({x:0,y:0},k.1a.2p(C.3e));D 84=k.1a.6h(C.3e,1b);C.A.24.w=C.A.24.1D-84.l-84.r;C.A.24.h=C.A.24.hb-84.t-84.b}P if(C.A.2o==\'1j\'){D cM=k.1a.bq();C.A.24={x:0,y:0,w:cM.w,h:cM.h}}}P if(C.A.2o.1K==7b){C.A.24={x:T(C.A.2o[0])||0,y:T(C.A.2o[1])||0,w:T(C.A.2o[2])||0,h:T(C.A.2o[3])||0}}C.A.24.dx=C.A.24.x-C.A.1C.x;C.A.24.dy=C.A.24.y-C.A.1C.y},9o:u(F){if(F.A.8g||F.A.2o==\'96\'){k(\'2e\',1j).1R(k.12.1c.K(0))}k.12.1c.5t().2G().B(\'1J\',1);if(1V.7a){k.12.1c.B(\'69\',\'9V(1J=2b)\')}},9w:u(e){k(1j).3p(\'3H\',k.12.d6).3p(\'61\',k.12.9w);if(k.12.F==U){E}D F=k.12.F;k.12.F=U;if(F.A.7W==I){E I}if(F.A.48==1b){k(F).B(\'Y\',F.A.4m)}D 9p=F.18;if(F.5i){k.12.1c.B(\'94\',\'8C\')}if(F.A.7w){k.12.1c.4p(F.A.7w)}if(F.A.6o==I){if(F.A.fx>0){if(!F.A.1N||F.A.1N==\'4a\'){D x=11 k.fx(F,{1m:F.A.fx},\'O\');x.1L(F.A.2c.x,F.A.8c)}if(!F.A.1N||F.A.1N==\'4i\'){D y=11 k.fx(F,{1m:F.A.fx},\'Q\');y.1L(F.A.2c.y,F.A.8j)}}P{if(!F.A.1N||F.A.1N==\'4a\')F.18.O=F.A.8c+\'S\';if(!F.A.1N||F.A.1N==\'4i\')F.18.Q=F.A.8j+\'S\'}k.12.9o(F);if(F.A.4j==I){k(F).B(\'19\',F.A.7i)}}P if(F.A.fx>0){F.A.9n=1b;D dh=I;if(k.1x&&k.1t&&F.A.48){dh=k.1a.2R(k.1t.1c.K(0))}k.12.1c.5K({O:dh?dh.x:F.A.1C.x,Q:dh?dh.y:F.A.1C.y},F.A.fx,u(){F.A.9n=I;if(F.A.4j==I){F.18.19=F.A.7i}k.12.9o(F)})}P{k.12.9o(F);if(F.A.4j==I){k(F).B(\'19\',F.A.7i)}}if(k.1x&&k.1x.8W>0){k.1x.ed(F)}if(k.1t&&F.A.48){k.1t.dp(F)}if(F.A.2T&&(F.A.8c!=F.A.2c.x||F.A.8j!=F.A.2c.y)){F.A.2T.1F(F,F.A.bQ||[0,0,F.A.8c,F.A.8j])}if(F.A.3S)F.A.3S.1F(F);E I},bI:u(x,y,dx,dy){if(dx!=0)dx=T((dx+(q.A.gx*dx/14.3R(dx))/2)/q.A.gx)*q.A.gx;if(dy!=0)dy=T((dy+(q.A.gy*dy/14.3R(dy))/2)/q.A.gy)*q.A.gy;E{dx:dx,dy:dy,x:0,y:0}},bH:u(x,y,dx,dy){dx=14.3D(14.3v(dx,q.A.24.dx),q.A.24.w+q.A.24.dx-q.A.1C.1D);dy=14.3D(14.3v(dy,q.A.24.dy),q.A.24.h+q.A.24.dy-q.A.1C.hb);E{dx:dx,dy:dy,x:0,y:0}},d6:u(e){if(k.12.F==U||k.12.F.A.9n==1b){E}D F=k.12.F;F.A.4t=k.1a.44(e);if(F.A.7W==I){46=14.dm(14.5Y(F.A.1s.x-F.A.4t.x,2)+14.5Y(F.A.1s.y-F.A.4t.y,2));if(460){k.1x.a3(F)}E I},2s:u(o){if(!k.12.1c){k(\'2e\',1j).1R(\'<26 id="dW">\');k.12.1c=k(\'#dW\');D el=k.12.1c.K(0);D 4P=el.18;4P.Y=\'1O\';4P.19=\'1n\';4P.94=\'8C\';4P.dV=\'1n\';4P.2Y=\'2O\';if(1V.7a){el.d4="en"}P{4P.kh=\'1n\';4P.e6=\'1n\';4P.ej=\'1n\'}}if(!o){o={}}E q.1B(u(){if(q.9q||!k.1a)E;if(1V.7a){q.kf=u(){E I};q.kj=u(){E I}}D el=q;D 5e=o.3y?k(q).kp(o.3y):k(q);if(k.3h.4I){5e.1B(u(){q.d4="en"})}P{5e.B(\'-kE-7l-8Z\',\'1n\');5e.B(\'7l-8Z\',\'1n\');5e.B(\'-ko-7l-8Z\',\'1n\')}q.A={5e:5e,6o:o.6o?1b:I,4j:o.4j?1b:I,48:o.48?o.48:I,5i:o.5i?o.5i:I,8g:o.8g?o.8g:I,3B:o.3B?T(o.3B)||0:I,1J:o.1J?2m(o.1J):I,fx:T(o.fx)||U,6p:o.6p?o.6p:I,5J:{},1s:{},4A:o.4A&&o.4A.1K==2C?o.4A:I,3S:o.3S&&o.3S.1K==2C?o.3S:I,2T:o.2T&&o.2T.1K==2C?o.2T:I,1N:/4i|4a/.43(o.1N)?o.1N:I,6m:o.6m?T(o.6m)||0:0,2S:o.2S?o.2S:I,cO:o.cO?1b:I,7w:o.7w||I};if(o.5J&&o.5J.1K==2C)q.A.5J.7l=o.5J;if(o.4x&&o.4x.1K==2C)q.A.4x=o.4x;if(o.2o&&((o.2o.1K==b5&&(o.2o==\'96\'||o.2o==\'1j\'))||(o.2o.1K==7b&&o.2o.1h==4))){q.A.2o=o.2o}if(o.2K){q.A.2K=o.2K}if(o.67){if(2h o.67==\'kl\'){q.A.gx=T(o.67)||1;q.A.gy=T(o.67)||1}P if(o.67.1h==2){q.A.gx=T(o.67[0])||1;q.A.gy=T(o.67[1])||1}}if(o.3z&&o.3z.1K==2C){q.A.3z=o.3z}q.9q=1b;5e.1B(u(){q.3Z=el});5e.1H(\'5b\',k.12.cU)})}};k.fn.21({a4:k.12.58,6Y:k.12.2s});k.1x={ee:u(5r,5y,7j,7g){E 5r<=k.12.F.A.2x&&(5r+7j)>=(k.12.F.A.2x+k.12.F.A.1C.w)&&5y<=k.12.F.A.2r&&(5y+7g)>=(k.12.F.A.2r+k.12.F.A.1C.h)?1b:I},by:u(5r,5y,7j,7g){E!(5r>(k.12.F.A.2x+k.12.F.A.1C.w)||(5r+7j)(k.12.F.A.2r+k.12.F.A.1C.h)||(5y+7g)k.12.F.A.4t.x&&5yk.12.F.A.4t.y?1b:I},5l:I,3W:{},8W:0,3J:{},ea:u(C){if(k.12.F==U){E}D i;k.1x.3W={};D cZ=I;1Y(i in k.1x.3J){if(k.1x.3J[i]!=U){D 1k=k.1x.3J[i].K(0);if(k(k.12.F).is(\'.\'+1k.1i.a)){if(1k.1i.m==I){1k.1i.p=k.21(k.1a.2R(1k),k.1a.82(1k));1k.1i.m=1b}if(1k.1i.ac){k.1x.3J[i].2Z(1k.1i.ac)}k.1x.3W[i]=k.1x.3J[i];if(k.1t&&1k.1i.s&&k.12.F.A.48){1k.1i.el=k(\'.\'+1k.1i.a,1k);C.18.19=\'1n\';k.1t.c5(1k);1k.1i.9Z=k.1t.8o(k.1p(1k,\'id\')).7U;C.18.19=C.A.7i;cZ=1b}if(1k.1i.9v){1k.1i.9v.1F(k.1x.3J[i].K(0),[k.12.F])}}}}if(cZ){k.1t.28()}},ek:u(){k.1x.3W={};1Y(i in k.1x.3J){if(k.1x.3J[i]!=U){D 1k=k.1x.3J[i].K(0);if(k(k.12.F).is(\'.\'+1k.1i.a)){1k.1i.p=k.21(k.1a.2R(1k),k.1a.82(1k));if(1k.1i.ac){k.1x.3J[i].2Z(1k.1i.ac)}k.1x.3W[i]=k.1x.3J[i];if(k.1t&&1k.1i.s&&k.12.F.A.48){1k.1i.el=k(\'.\'+1k.1i.a,1k);C.18.19=\'1n\';k.1t.c5(1k);C.18.19=C.A.7i}}}}},a3:u(e){if(k.12.F==U){E}k.1x.5l=I;D i;D cb=I;D ec=0;1Y(i in k.1x.3W){D 1k=k.1x.3W[i].K(0);if(k.1x.5l==I&&k.1x[1k.1i.t](1k.1i.p.x,1k.1i.p.y,1k.1i.p.1D,1k.1i.p.hb)){if(1k.1i.hc&&1k.1i.h==I){k.1x.3W[i].2Z(1k.1i.hc)}if(1k.1i.h==I&&1k.1i.7T){cb=1b}1k.1i.h=1b;k.1x.5l=1k;if(k.1t&&1k.1i.s&&k.12.F.A.48){k.1t.1c.K(0).3b=1k.1i.eb;k.1t.a3(1k)}ec++}P if(1k.1i.h==1b){if(1k.1i.7Q){1k.1i.7Q.1F(1k,[e,k.12.1c.K(0).77,1k.1i.fx])}if(1k.1i.hc){k.1x.3W[i].4p(1k.1i.hc)}1k.1i.h=I}}if(k.1t&&!k.1x.5l&&k.12.F.48){k.1t.1c.K(0).18.19=\'1n\'}if(cb){k.1x.5l.1i.7T.1F(k.1x.5l,[e,k.12.1c.K(0).77])}},ed:u(e){D i;1Y(i in k.1x.3W){D 1k=k.1x.3W[i].K(0);if(1k.1i.ac){k.1x.3W[i].4p(1k.1i.ac)}if(1k.1i.hc){k.1x.3W[i].4p(1k.1i.hc)}if(1k.1i.s){k.1t.7V[k.1t.7V.1h]=i}if(1k.1i.9r&&1k.1i.h==1b){1k.1i.h=I;1k.1i.9r.1F(1k,[e,1k.1i.fx])}1k.1i.m=I;1k.1i.h=I}k.1x.3W={}},58:u(){E q.1B(u(){if(q.9u){if(q.1i.s){id=k.1p(q,\'id\');k.1t.5j[id]=U;k(\'.\'+q.1i.a,q).a4()}k.1x.3J[\'d\'+q.bn]=U;q.9u=I;q.f=U}})},2s:u(o){E q.1B(u(){if(q.9u==1b||!o.3P||!k.1a||!k.12){E}q.1i={a:o.3P,ac:o.a8||I,hc:o.a7||I,eb:o.4V||I,9r:o.kO||o.9r||I,7T:o.7T||o.dN||I,7Q:o.7Q||o.dz||I,9v:o.9v||I,t:o.6n&&(o.6n==\'ee\'||o.6n==\'by\')?o.6n:\'1s\',fx:o.fx?o.fx:I,m:I,h:I};if(o.bD==1b&&k.1t){id=k.1p(q,\'id\');k.1t.5j[id]=q.1i.a;q.1i.s=1b;if(o.2T){q.1i.2T=o.2T;q.1i.9Z=k.1t.8o(id).7U}}q.9u=1b;q.bn=T(14.6w()*cd);k.1x.3J[\'d\'+q.bn]=k(q);k.1x.8W++})}};k.fn.21({df:k.1x.58,dO:k.1x.2s});k.kH=k.1x.ek;k.R={1A:U,3Q:U,F:U,1s:U,1q:U,Y:U,7r:u(e){k.R.F=(q.a2)?q.a2:q;k.R.1s=k.1a.44(e);k.R.1q={Z:T(k(k.R.F).B(\'Z\'))||0,V:T(k(k.R.F).B(\'V\'))||0};k.R.Y={Q:T(k(k.R.F).B(\'Q\'))||0,O:T(k(k.R.F).B(\'O\'))||0};k(1j).1H(\'3H\',k.R.bj).1H(\'61\',k.R.bs);if(2h k.R.F.1g.ei===\'u\'){k.R.F.1g.ei.1F(k.R.F)}E I},bs:u(e){k(1j).3p(\'3H\',k.R.bj).3p(\'61\',k.R.bs);if(2h k.R.F.1g.e7===\'u\'){k.R.F.1g.e7.1F(k.R.F)}k.R.F=U},bj:u(e){if(!k.R.F){E}1s=k.1a.44(e);7u=k.R.Y.Q-k.R.1s.y+1s.y;7v=k.R.Y.O-k.R.1s.x+1s.x;7u=14.3v(14.3D(7u,k.R.F.1g.8U-k.R.1q.V),k.R.F.1g.7s);7v=14.3v(14.3D(7v,k.R.F.1g.8T-k.R.1q.Z),k.R.F.1g.7p);if(2h k.R.F.1g.4x===\'u\'){D 8J=k.R.F.1g.4x.1F(k.R.F,[7v,7u]);if(2h 8J==\'kc\'&&8J.1h==2){7v=8J[0];7u=8J[1]}}k.R.F.18.Q=7u+\'S\';k.R.F.18.O=7v+\'S\';E I},28:u(e){k(1j).1H(\'3H\',k.R.8C).1H(\'61\',k.R.8v);k.R.1A=q.1A;k.R.3Q=q.3Q;k.R.1s=k.1a.44(e);if(k.R.1A.1g.4A){k.R.1A.1g.4A.1F(k.R.1A,[q])}k.R.1q={Z:T(k(q.1A).B(\'Z\'))||0,V:T(k(q.1A).B(\'V\'))||0};k.R.Y={Q:T(k(q.1A).B(\'Q\'))||0,O:T(k(q.1A).B(\'O\'))||0};E I},8v:u(){k(1j).3p(\'3H\',k.R.8C).3p(\'61\',k.R.8v);if(k.R.1A.1g.3S){k.R.1A.1g.3S.1F(k.R.1A,[k.R.3Q])}k.R.1A=U;k.R.3Q=U},6V:u(dx,9t){E 14.3D(14.3v(k.R.1q.Z+dx*9t,k.R.1A.1g.9s),k.R.1A.1g.6q)},6Q:u(dy,9t){E 14.3D(14.3v(k.R.1q.V+dy*9t,k.R.1A.1g.8L),k.R.1A.1g.8M)},dX:u(V){E 14.3D(14.3v(V,k.R.1A.1g.8L),k.R.1A.1g.8M)},8C:u(e){if(k.R.1A==U){E}1s=k.1a.44(e);dx=1s.x-k.R.1s.x;dy=1s.y-k.R.1s.y;1E={Z:k.R.1q.Z,V:k.R.1q.V};2n={Q:k.R.Y.Q,O:k.R.Y.O};3m(k.R.3Q){1e\'e\':1E.Z=k.R.6V(dx,1);1r;1e\'eO\':1E.Z=k.R.6V(dx,1);1E.V=k.R.6Q(dy,1);1r;1e\'w\':1E.Z=k.R.6V(dx,-1);2n.O=k.R.Y.O-1E.Z+k.R.1q.Z;1r;1e\'5O\':1E.Z=k.R.6V(dx,-1);2n.O=k.R.Y.O-1E.Z+k.R.1q.Z;1E.V=k.R.6Q(dy,1);1r;1e\'7q\':1E.V=k.R.6Q(dy,-1);2n.Q=k.R.Y.Q-1E.V+k.R.1q.V;1E.Z=k.R.6V(dx,-1);2n.O=k.R.Y.O-1E.Z+k.R.1q.Z;1r;1e\'n\':1E.V=k.R.6Q(dy,-1);2n.Q=k.R.Y.Q-1E.V+k.R.1q.V;1r;1e\'9J\':1E.V=k.R.6Q(dy,-1);2n.Q=k.R.Y.Q-1E.V+k.R.1q.V;1E.Z=k.R.6V(dx,1);1r;1e\'s\':1E.V=k.R.6Q(dy,1);1r}if(k.R.1A.1g.4D){if(k.R.3Q==\'n\'||k.R.3Q==\'s\')4B=1E.V*k.R.1A.1g.4D;P 4B=1E.Z;5c=k.R.dX(4B*k.R.1A.1g.4D);4B=5c/k.R.1A.1g.4D;3m(k.R.3Q){1e\'n\':1e\'7q\':1e\'9J\':2n.Q+=1E.V-5c;1r}3m(k.R.3Q){1e\'7q\':1e\'w\':1e\'5O\':2n.O+=1E.Z-4B;1r}1E.V=5c;1E.Z=4B}if(2n.Qk.R.1A.1g.8U){1E.V=k.R.1A.1g.8U-2n.Q;if(k.R.1A.1g.4D){1E.Z=1E.V/k.R.1A.1g.4D}}if(2n.O+1E.Z>k.R.1A.1g.8T){1E.Z=k.R.1A.1g.8T-2n.O;if(k.R.1A.1g.4D){1E.V=1E.Z*k.R.1A.1g.4D}}D 6O=I;5L=k.R.1A.18;5L.O=2n.O+\'S\';5L.Q=2n.Q+\'S\';5L.Z=1E.Z+\'S\';5L.V=1E.V+\'S\';if(k.R.1A.1g.dY){6O=k.R.1A.1g.dY.1F(k.R.1A,[1E,2n]);if(6O){if(6O.1q){k.21(1E,6O.1q)}if(6O.Y){k.21(2n,6O.Y)}}}5L.O=2n.O+\'S\';5L.Q=2n.Q+\'S\';5L.Z=1E.Z+\'S\';5L.V=1E.V+\'S\';E I},2s:u(M){if(!M||!M.3U||M.3U.1K!=7n){E}E q.1B(u(){D el=q;el.1g=M;el.1g.9s=M.9s||10;el.1g.8L=M.8L||10;el.1g.6q=M.6q||6x;el.1g.8M=M.8M||6x;el.1g.7s=M.7s||-aF;el.1g.7p=M.7p||-aF;el.1g.8T=M.8T||6x;el.1g.8U=M.8U||6x;b3=k(el).B(\'Y\');if(!(b3==\'2y\'||b3==\'1O\')){el.18.Y=\'2y\'}eM=/n|9J|e|eO|s|5O|w|7q/g;1Y(i in el.1g.3U){if(i.5Z().bU(eM)!=U){if(el.1g.3U[i].1K==b5){3y=k(el.1g.3U[i]);if(3y.1P()>0){el.1g.3U[i]=3y.K(0)}}if(el.1g.3U[i].4S){el.1g.3U[i].1A=el;el.1g.3U[i].3Q=i;k(el.1g.3U[i]).1H(\'5b\',k.R.28)}}}if(el.1g.4N){if(2h el.1g.4N===\'5g\'){9K=k(el.1g.4N);if(9K.1P()>0){9K.1B(u(){q.a2=el});9K.1H(\'5b\',k.R.7r)}}P if(el.1g.4N.4S){el.1g.4N.a2=el;k(el.1g.4N).1H(\'5b\',k.R.7r)}P if(el.1g.4N==1b){k(q).1H(\'5b\',k.R.7r)}}})},58:u(){E q.1B(u(){D el=q;1Y(i in el.1g.3U){el.1g.3U[i].1A=U;el.1g.3U[i].3Q=U;k(el.1g.3U[i]).3p(\'5b\',k.R.28)}if(el.1g.4N){if(2h el.1g.4N===\'5g\'){3y=k(el.1g.4N);if(3y.1P()>0){3y.3p(\'5b\',k.R.7r)}}P if(el.1g.4N==1b){k(q).3p(\'5b\',k.R.7r)}}el.1g=U})}};k.fn.21({j5:k.R.2s,j4:k.R.58});k.2u=U;k.7Z=I;k.3n=U;k.81=[];k.a0=u(e){D 3O=e.7F||e.7A||-1;if(3O==17||3O==16){k.7Z=1b}};k.9Y=u(e){k.7Z=I};k.eW=u(e){q.f.1s=k.1a.44(e);q.f.1M=k.21(k.1a.2R(q),k.1a.2p(q));q.f.3a=k.1a.6W(q);q.f.1s.x-=q.f.1M.x;q.f.1s.y-=q.f.1M.y;if(q.f.hc)k.2u.2Z(q.f.hc);k.2u.B({19:\'2E\',Z:\'83\',V:\'83\'});if(q.f.o){k.2u.B(\'1J\',q.f.o)}k.3n=q;k.8K=I;k.81=[];q.f.el.1B(u(){q.1M={x:q.8n+(q.4Y&&!k.3h.7N?T(q.4Y.5a)||0:0)+(k.3n.3g||0),y:q.8t+(q.4Y&&!k.3h.7N?T(q.4Y.4Z)||0:0)+(k.3n.2V||0),1D:q.4b,hb:q.63};if(q.s==1b){if(k.7Z==I){q.s=I;k(q).4p(k.3n.f.7X)}P{k.8K=1b;k.81[k.81.1h]=k.1p(q,\'id\')}}});k(q).1R(k.2u.K(0));q.f.93=k.1a.6h(k.2u[0],1b);k.a1.1F(q,[e]);k(1j).1H(\'3H\',k.a1).1H(\'61\',k.bT);E I};k.a1=u(e){if(!k.3n)E;k.eU.1F(k.3n,[e])};k.eU=u(e){if(!k.3n)E;D 1s=k.1a.44(e);D 3a=k.1a.6W(k.3n);1s.x+=3a.l-q.f.3a.l-q.f.1M.x;1s.y+=3a.t-q.f.3a.t-q.f.1M.y;D 8D=14.3D(1s.x,q.f.1s.x);D 5O=14.3D(14.3R(1s.x-q.f.1s.x),14.3R(q.f.3a.w-8D));D 9f=14.3D(1s.y,q.f.1s.y);D 8R=14.3D(14.3R(1s.y-q.f.1s.y),14.3R(q.f.3a.h-9f));if(q.2V>0&&1s.y-20q.2V+q.f.1M.h){D 3T=14.3D(q.f.3a.h-q.2V,10);q.2V+=3T;if(q.2V!=3a.t)8R+=3T}if(q.3g>0&&1s.x-20q.3g+q.f.1M.w){D 3T=14.3D(q.f.3a.w-q.3g,10);q.3g+=3T;if(q.3g!=3a.l)5O+=3T}k.2u.B({O:8D+\'S\',Q:9f+\'S\',Z:5O-(q.f.93.l+q.f.93.r)+\'S\',V:8R-(q.f.93.t+q.f.93.b)+\'S\'});k.2u.l=8D+q.f.3a.l;k.2u.t=9f+q.f.3a.t;k.2u.r=k.2u.l+5O;k.2u.b=k.2u.t+8R;k.8K=I;q.f.el.1B(u(){9k=k.81.3F(k.1p(q,\'id\'));if(!(q.1M.x>k.2u.r||(q.1M.x+q.1M.1D)k.2u.b||(q.1M.y+q.1M.hb)0){h+=\'&\'}h+=s+\'[]=\'+k.1p(q,\'id\');o[o.1h]=k.1p(q,\'id\')}})}E{7U:h,o:o}};k.fn.jZ=u(o){if(!k.2u){k(\'2e\',1j).1R(\'<26 id="2u">\').1H(\'7E\',k.a0).1H(\'6S\',k.9Y);k.2u=k(\'#2u\');k.2u.B({Y:\'1O\',19:\'1n\'});if(1V.2l){k(\'2e\',1j).1H(\'7E\',k.a0).1H(\'6S\',k.9Y)}P{k(1j).1H(\'7E\',k.a0).1H(\'6S\',k.9Y)}}if(!o){o={}}E q.1B(u(){if(q.eX)E;q.eX=1b;q.f={a:o.3P,o:o.1J?2m(o.1J):I,7X:o.eE?o.eE:I,hc:o.4V?o.4V:I,8Y:o.8Y?o.8Y:I,8X:o.8X?o.8X:I};q.f.el=k(\'.\'+o.3P);k(q).1H(\'5b\',k.eW)})};k.1t={7V:[],5j:{},1c:I,7Y:U,28:u(){if(k.12.F==U){E}D 4M,3A,c,cs;k.1t.1c.K(0).3b=k.12.F.A.6p;4M=k.1t.1c.K(0).18;4M.19=\'2E\';k.1t.1c.1C=k.21(k.1a.2R(k.1t.1c.K(0)),k.1a.2p(k.1t.1c.K(0)));4M.Z=k.12.F.A.1C.1D+\'S\';4M.V=k.12.F.A.1C.hb+\'S\';3A=k.1a.c8(k.12.F);4M.5o=3A.t;4M.5p=3A.r;4M.5m=3A.b;4M.5k=3A.l;if(k.12.F.A.4j==1b){c=k.12.F.dn(1b);cs=c.18;cs.5o=\'3c\';cs.5p=\'3c\';cs.5m=\'3c\';cs.5k=\'3c\';cs.19=\'2E\';k.1t.1c.5t().1R(c)}k(k.12.F).dj(k.1t.1c.K(0));k.12.F.18.19=\'1n\'},dp:u(e){if(!e.A.48&&k.1x.5l.bD){if(e.A.3S)e.A.3S.1F(F);k(e).B(\'Y\',e.A.c4||e.A.4m);k(e).a4();k(k.1x.5l).dd(e)}k.1t.1c.4p(e.A.6p).3w(\'&7J;\');k.1t.7Y=U;D 4M=k.1t.1c.K(0).18;4M.19=\'1n\';k.1t.1c.dj(e);if(e.A.fx>0){k(e).7m(e.A.fx)}k(\'2e\').1R(k.1t.1c.K(0));D 86=[];D 8d=I;1Y(D i=0;i0){8d(86)}},a3:u(e,o){if(!k.12.F)E;D 6i=I;D i=0;if(e.1i.el.1P()>0){1Y(i=e.1i.el.1P();i>0;i--){if(e.1i.el.K(i-1)!=k.12.F){if(!e.5V.bM){if((e.1i.el.K(i-1).1M.y+e.1i.el.K(i-1).1M.hb/2)>k.12.F.A.2r){6i=e.1i.el.K(i-1)}P{1r}}P{if((e.1i.el.K(i-1).1M.x+e.1i.el.K(i-1).1M.1D/2)>k.12.F.A.2x&&(e.1i.el.K(i-1).1M.y+e.1i.el.K(i-1).1M.hb/2)>k.12.F.A.2r){6i=e.1i.el.K(i-1)}}}}}if(6i&&k.1t.7Y!=6i){k.1t.7Y=6i;k(6i).k6(k.1t.1c.K(0))}P if(!6i&&(k.1t.7Y!=U||k.1t.1c.K(0).3e!=e)){k.1t.7Y=U;k(e).1R(k.1t.1c.K(0))}k.1t.1c.K(0).18.19=\'2E\'},c5:u(e){if(k.12.F==U){E}e.1i.el.1B(u(){q.1M=k.21(k.1a.82(q),k.1a.2R(q))})},8o:u(s){D i;D h=\'\';D o={};if(s){if(k.1t.5j[s]){o[s]=[];k(\'#\'+s+\' .\'+k.1t.5j[s]).1B(u(){if(h.1h>0){h+=\'&\'}h+=s+\'[]=\'+k.1p(q,\'id\');o[s][o[s].1h]=k.1p(q,\'id\')})}P{1Y(a in s){if(k.1t.5j[s[a]]){o[s[a]]=[];k(\'#\'+s[a]+\' .\'+k.1t.5j[s[a]]).1B(u(){if(h.1h>0){h+=\'&\'}h+=s[a]+\'[]=\'+k.1p(q,\'id\');o[s[a]][o[s[a]].1h]=k.1p(q,\'id\')})}}}}P{1Y(i in k.1t.5j){o[i]=[];k(\'#\'+i+\' .\'+k.1t.5j[i]).1B(u(){if(h.1h>0){h+=\'&\'}h+=i+\'[]=\'+k.1p(q,\'id\');o[i][o[i].1h]=k.1p(q,\'id\')})}}E{7U:h,o:o}},dc:u(e){if(!e.jJ){E}E q.1B(u(){if(!q.5V||!k(e).is(\'.\'+q.5V.3P))k(e).2Z(q.5V.3P);k(e).6Y(q.5V.A)})},58:u(){E q.1B(u(){k(\'.\'+q.5V.3P).a4();k(q).df();q.5V=U;q.dD=U})},2s:u(o){if(o.3P&&k.1a&&k.12&&k.1x){if(!k.1t.1c){k(\'2e\',1j).1R(\'<26 id="dt">&7J;\');k.1t.1c=k(\'#dt\');k.1t.1c.K(0).18.19=\'1n\'}q.dO({3P:o.3P,a8:o.a8?o.a8:I,a7:o.a7?o.a7:I,4V:o.4V?o.4V:I,7T:o.7T||o.dN,7Q:o.7Q||o.dz,bD:1b,2T:o.2T||o.jL,fx:o.fx?o.fx:I,4j:o.4j?1b:I,6n:o.6n?o.6n:\'by\'});E q.1B(u(){D A={6o:o.6o?1b:I,dF:6x,1J:o.1J?2m(o.1J):I,6p:o.4V?o.4V:I,fx:o.fx?o.fx:I,48:1b,4j:o.4j?1b:I,3y:o.3y?o.3y:U,2o:o.2o?o.2o:U,4A:o.4A&&o.4A.1K==2C?o.4A:I,4x:o.4x&&o.4x.1K==2C?o.4x:I,3S:o.3S&&o.3S.1K==2C?o.3S:I,1N:/4i|4a/.43(o.1N)?o.1N:I,6m:o.6m?T(o.6m)||0:I,2S:o.2S?o.2S:I};k(\'.\'+o.3P,q).6Y(A);q.dD=1b;q.5V={3P:o.3P,6o:o.6o?1b:I,dF:6x,1J:o.1J?2m(o.1J):I,6p:o.4V?o.4V:I,fx:o.fx?o.fx:I,48:1b,4j:o.4j?1b:I,3y:o.3y?o.3y:U,2o:o.2o?o.2o:U,bM:o.bM?1b:I,A:A}})}}};k.fn.21({jR:k.1t.2s,dd:k.1t.dc,jQ:k.1t.58});k.jN=k.1t.8o;k.3d={bG:1,f0:u(3u){D 3u=3u;E q.1B(u(){q.4r.6T.1B(u(a6){k.3d.59(q,3u[a6])})})},K:u(){D 3u=[];q.1B(u(bJ){if(q.bF){3u[bJ]=[];D C=q;D 1q=k.1a.2p(q);q.4r.6T.1B(u(a6){D x=q.8n;D y=q.8t;99=T(x*2b/(1q.w-q.4b));8a=T(y*2b/(1q.h-q.63));3u[bJ][a6]=[99||0,8a||0,x||0,y||0]})}});E 3u},bO:u(C){C.A.fK=C.A.24.w-C.A.1C.1D;C.A.fN=C.A.24.h-C.A.1C.hb;if(C.9P.4r.bE){a5=C.9P.4r.6T.K(C.bR+1);if(a5){C.A.24.w=(T(k(a5).B(\'O\'))||0)+C.A.1C.1D;C.A.24.h=(T(k(a5).B(\'Q\'))||0)+C.A.1C.hb}9X=C.9P.4r.6T.K(C.bR-1);if(9X){D bL=T(k(9X).B(\'O\'))||0;D bK=T(k(9X).B(\'O\'))||0;C.A.24.x+=bL;C.A.24.y+=bK;C.A.24.w-=bL;C.A.24.h-=bK}}C.A.fW=C.A.24.w-C.A.1C.1D;C.A.fV=C.A.24.h-C.A.1C.hb;if(C.A.2K){C.A.gx=((C.A.24.w-C.A.1C.1D)/C.A.2K)||1;C.A.gy=((C.A.24.h-C.A.1C.hb)/C.A.2K)||1;C.A.fY=C.A.fW/C.A.2K;C.A.fS=C.A.fV/C.A.2K}C.A.24.dx=C.A.24.x-C.A.2c.x;C.A.24.dy=C.A.24.y-C.A.2c.y;k.12.1c.B(\'94\',\'aG\')},3z:u(C,x,y){if(C.A.2K){fZ=T(x/C.A.fY);99=fZ*2b/C.A.2K;fL=T(y/C.A.fS);8a=fL*2b/C.A.2K}P{99=T(x*2b/C.A.fK);8a=T(y*2b/C.A.fN)}C.A.bQ=[99||0,8a||0,x||0,y||0];if(C.A.3z)C.A.3z.1F(C,C.A.bQ)},g4:u(2l){3O=2l.7F||2l.7A||-1;3m(3O){1e 35:k.3d.59(q.3Z,[9W,9W]);1r;1e 36:k.3d.59(q.3Z,[-9W,-9W]);1r;1e 37:k.3d.59(q.3Z,[-q.3Z.A.gx||-1,0]);1r;1e 38:k.3d.59(q.3Z,[0,-q.3Z.A.gy||-1]);1r;1e 39:k.3d.59(q.3Z,[q.3Z.A.gx||1,0]);1r;1e 40:k.12.59(q.3Z,[0,q.3Z.A.gy||1]);1r}},59:u(C,Y){if(!C.A){E}C.A.1C=k.21(k.1a.2R(C),k.1a.2p(C));C.A.2c={x:T(k.B(C,\'O\'))||0,y:T(k.B(C,\'Q\'))||0};C.A.4m=k.B(C,\'Y\');if(C.A.4m!=\'2y\'&&C.A.4m!=\'1O\'){C.18.Y=\'2y\'}k.12.bP(C);k.3d.bO(C);dx=T(Y[0])||0;dy=T(Y[1])||0;2x=C.A.2c.x+dx;2r=C.A.2c.y+dy;if(C.A.2K){3q=k.12.bI.1F(C,[2x,2r,dx,dy]);if(3q.1K==7n){dx=3q.dx;dy=3q.dy}2x=C.A.2c.x+dx;2r=C.A.2c.y+dy}3q=k.12.bH.1F(C,[2x,2r,dx,dy]);if(3q&&3q.1K==7n){dx=3q.dx;dy=3q.dy}2x=C.A.2c.x+dx;2r=C.A.2c.y+dy;if(C.A.5i&&(C.A.3z||C.A.2T)){k.3d.3z(C,2x,2r)}2x=!C.A.1N||C.A.1N==\'4a\'?2x:C.A.2c.x||0;2r=!C.A.1N||C.A.1N==\'4i\'?2r:C.A.2c.y||0;C.18.O=2x+\'S\';C.18.Q=2r+\'S\'},2s:u(o){E q.1B(u(){if(q.bF==1b||!o.3P||!k.1a||!k.12||!k.1x){E}5N=k(o.3P,q);if(5N.1P()==0){E}D 4K={2o:\'96\',5i:1b,3z:o.3z&&o.3z.1K==2C?o.3z:U,2T:o.2T&&o.2T.1K==2C?o.2T:U,3y:q,1J:o.1J||I};if(o.2K&&T(o.2K)){4K.2K=T(o.2K)||1;4K.2K=4K.2K>0?4K.2K:1}if(5N.1P()==1)5N.6Y(4K);P{k(5N.K(0)).6Y(4K);4K.3y=U;5N.6Y(4K)}5N.7E(k.3d.g4);5N.1p(\'bG\',k.3d.bG++);q.bF=1b;q.4r={};q.4r.g6=4K.g6;q.4r.2K=4K.2K;q.4r.6T=5N;q.4r.bE=o.bE?1b:I;bS=q;bS.4r.6T.1B(u(2I){q.bR=2I;q.9P=bS});if(o.3u&&o.3u.1K==7b){1Y(i=o.3u.1h-1;i>=0;i--){if(o.3u[i].1K==7b&&o.3u[i].1h==2){el=q.4r.6T.K(i);if(el.4S){k.3d.59(el,o.3u[i])}}}}})}};k.fn.21({jV:k.3d.2s,k9:k.3d.f0,kb:k.3d.K});k.2t={6J:U,7c:I,9O:U,6D:u(e){k.2t.7c=1b;k.2t.22(e,q,1b)},bx:u(e){if(k.2t.6J!=q)E;k.2t.7c=I;k.2t.2G(e,q)},22:u(e,el,7c){if(k.2t.6J!=U)E;if(!el){el=q}k.2t.6J=el;1M=k.21(k.1a.2R(el),k.1a.2p(el));8G=k(el);45=8G.1p(\'45\');3f=8G.1p(\'3f\');if(45){k.2t.9O=45;8G.1p(\'45\',\'\');k(\'#fF\').3w(45);if(3f)k(\'#c9\').3w(3f.4v(\'k4://\',\'\'));P k(\'#c9\').3w(\'\');1c=k(\'#8V\');if(el.4T.3b){1c.K(0).3b=el.4T.3b}P{1c.K(0).3b=\'\'}c7=k.1a.2p(1c.K(0));fj=7c&&el.4T.Y==\'c3\'?\'4l\':el.4T.Y;3m(fj){1e\'Q\':2r=1M.y-c7.hb;2x=1M.x;1r;1e\'O\':2r=1M.y;2x=1M.x-c7.1D;1r;1e\'2N\':2r=1M.y;2x=1M.x+1M.1D;1r;1e\'c3\':k(\'2e\').1H(\'3H\',k.2t.3H);1s=k.1a.44(e);2r=1s.y+15;2x=1s.x+15;1r;aG:2r=1M.y+1M.hb;2x=1M.x;1r}1c.B({Q:2r+\'S\',O:2x+\'S\'});if(el.4T.53==I){1c.22()}P{1c.7m(el.4T.53)}if(el.4T.2U)el.4T.2U.1F(el);8G.1H(\'8q\',k.2t.2G).1H(\'5I\',k.2t.bx)}},3H:u(e){if(k.2t.6J==U){k(\'2e\').3p(\'3H\',k.2t.3H);E}1s=k.1a.44(e);k(\'#8V\').B({Q:1s.y+15+\'S\',O:1s.x+15+\'S\'})},2G:u(e,el){if(!el){el=q}if(k.2t.7c!=1b&&k.2t.6J==el){k.2t.6J=U;k(\'#8V\').7k(1);k(el).1p(\'45\',k.2t.9O).3p(\'8q\',k.2t.2G).3p(\'5I\',k.2t.bx);if(el.4T.3i)el.4T.3i.1F(el);k.2t.9O=U}},2s:u(M){if(!k.2t.1c){k(\'2e\').1R(\'<26 id="8V"><26 id="fF"><26 id="c9">\');k(\'#8V\').B({Y:\'1O\',3B:6x,19:\'1n\'});k.2t.1c=1b}E q.1B(u(){if(k.1p(q,\'45\')){q.4T={Y:/Q|4l|O|2N|c3/.43(M.Y)?M.Y:\'4l\',3b:M.3b?M.3b:I,53:M.53?M.53:I,2U:M.2U&&M.2U.1K==2C?M.2U:I,3i:M.3i&&M.3i.1K==2C?M.3i:I};D el=k(q);el.1H(\'aV\',k.2t.22);el.1H(\'6D\',k.2t.6D)}})}};k.fn.k0=k.2t.2s;k.21({G:{bV:u(p,n,1W,1I,1m){E((-14.5v(p*14.2Q)/2)+0.5)*1I+1W},k2:u(p,n,1W,1I,1m){E 1I*(n/=1m)*n*n+1W},fG:u(p,n,1W,1I,1m){E-1I*((n=n/1m-1)*n*n*n-1)+1W},k1:u(p,n,1W,1I,1m){if((n/=1m/2)<1)E 1I/2*n*n*n*n+1W;E-1I/2*((n-=2)*n*n*n-2)+1W},9c:u(p,n,1W,1I,1m){if((n/=1m)<(1/2.75)){E 1I*(7.9N*n*n)+1W}P if(n<(2/2.75)){E 1I*(7.9N*(n-=(1.5/2.75))*n+.75)+1W}P if(n<(2.5/2.75)){E 1I*(7.9N*(n-=(2.25/2.75))*n+.jC)+1W}P{E 1I*(7.9N*(n-=(2.jB/2.75))*n+.jd)+1W}},bY:u(p,n,1W,1I,1m){if(k.G.9c)E 1I-k.G.9c(p,1m-n,0,1I,1m)+1W;E 1W+1I},jc:u(p,n,1W,1I,1m){if(k.G.bY&&k.G.9c)if(n<1m/2)E k.G.bY(p,n*2,0,1I,1m)*.5+1W;E k.G.9c(p,n*2-1m,0,1I,1m)*.5+1I*.5+1W;E 1W+1I},jb:u(p,n,1W,1I,1m){D a,s;if(n==0)E 1W;if((n/=1m)==1)E 1W+1I;a=1I*0.3;p=1m*.3;if(a<14.3R(1I)){a=1I;s=p/4}P{s=p/(2*14.2Q)*14.c0(1I/a)}E-(a*14.5Y(2,10*(n-=1))*14.98((n*1m-s)*(2*14.2Q)/p))+1W},je:u(p,n,1W,1I,1m){D a,s;if(n==0)E 1W;if((n/=1m/2)==2)E 1W+1I;a=1I*0.3;p=1m*.3;if(a<14.3R(1I)){a=1I;s=p/4}P{s=p/(2*14.2Q)*14.c0(1I/a)}E a*14.5Y(2,-10*n)*14.98((n*1m-s)*(2*14.2Q)/p)+1I+1W},jf:u(p,n,1W,1I,1m){D a,s;if(n==0)E 1W;if((n/=1m/2)==2)E 1W+1I;a=1I*0.3;p=1m*.3;if(a<14.3R(1I)){a=1I;s=p/4}P{s=p/(2*14.2Q)*14.c0(1I/a)}if(n<1){E-.5*(a*14.5Y(2,10*(n-=1))*14.98((n*1m-s)*(2*14.2Q)/p))+1W}E a*14.5Y(2,-10*(n-=1))*14.98((n*1m-s)*(2*14.2Q)/p)*.5+1I+1W}}});k.fn.21({fz:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.5W(q,H,J,\'4U\',G)})},fP:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.5W(q,H,J,\'4y\',G)})},j9:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.5W(q,H,J,\'f8\',G)})},j3:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.5W(q,H,J,\'O\',G)})},j2:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.5W(q,H,J,\'2N\',G)})},j1:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.5W(q,H,J,\'fh\',G)})}});k.fx.5W=u(e,H,J,2P,G){if(!k.4O(e)){k.2L(e,\'1o\');E I}D z=q;z.el=k(e);z.1P=k.1a.2p(e);z.G=2h J==\'5g\'?J:G||U;if(!e.4s)e.4s=z.el.B(\'19\');if(2P==\'f8\'){2P=z.el.B(\'19\')==\'1n\'?\'4y\':\'4U\'}P if(2P==\'fh\'){2P=z.el.B(\'19\')==\'1n\'?\'2N\':\'O\'}z.el.22();z.H=H;z.J=2h J==\'u\'?J:U;z.fx=k.fx.9h(e);z.2P=2P;z.23=u(){if(z.J&&z.J.1K==2C){z.J.1F(z.el.K(0))}if(z.2P==\'4y\'||z.2P==\'2N\'){z.el.B(\'19\',z.el.K(0).4s==\'1n\'?\'2E\':z.el.K(0).4s)}P{z.el.2G()}k.fx.9g(z.fx.3o.K(0),z.fx.W);k.2L(z.el.K(0),\'1o\')};3m(z.2P){1e\'4U\':6d=11 k.fx(z.fx.3o.K(0),k.H(z.H,z.G,z.23),\'V\');6d.1L(z.fx.W.1q.hb,0);1r;1e\'4y\':z.fx.3o.B(\'V\',\'83\');z.el.22();6d=11 k.fx(z.fx.3o.K(0),k.H(z.H,z.G,z.23),\'V\');6d.1L(0,z.fx.W.1q.hb);1r;1e\'O\':6d=11 k.fx(z.fx.3o.K(0),k.H(z.H,z.G,z.23),\'Z\');6d.1L(z.fx.W.1q.1D,0);1r;1e\'2N\':z.fx.3o.B(\'Z\',\'83\');z.el.22();6d=11 k.fx(z.fx.3o.K(0),k.H(z.H,z.G,z.23),\'Z\');6d.1L(0,z.fx.W.1q.1D);1r}};k.fn.kd=u(5w,J){E q.1w(\'1o\',u(){if(!k.4O(q)){k.2L(q,\'1o\');E I}D e=11 k.fx.fa(q,5w,J);e.bc()})};k.fx.fa=u(e,5w,J){D z=q;z.el=k(e);z.el.22();z.J=J;z.5w=T(5w)||40;z.W={};z.W.Y=z.el.B(\'Y\');z.W.Q=T(z.el.B(\'Q\'))||0;z.W.O=T(z.el.B(\'O\'))||0;if(z.W.Y!=\'2y\'&&z.W.Y!=\'1O\'){z.el.B(\'Y\',\'2y\')}z.41=5;z.5D=1;z.bc=u(){z.5D++;z.e=11 k.fx(z.el.K(0),{1m:j6,23:u(){z.e=11 k.fx(z.el.K(0),{1m:80,23:u(){z.5w=T(z.5w/2);if(z.5D<=z.41)z.bc();P{z.el.B(\'Y\',z.W.Y).B(\'Q\',z.W.Q+\'S\').B(\'O\',z.W.O+\'S\');k.2L(z.el.K(0),\'1o\');if(z.J&&z.J.1K==2C){z.J.1F(z.el.K(0))}}}},\'Q\');z.e.1L(z.W.Q-z.5w,z.W.Q)}},\'Q\');z.e.1L(z.W.Q,z.W.Q-z.5w)}};k.fn.21({ji:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.4k(q,H,J,\'4y\',\'4d\',G)})},jj:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.4k(q,H,J,\'4y\',\'in\',G)})},jw:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.4k(q,H,J,\'4y\',\'3Y\',G)})},jv:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.4k(q,H,J,\'4U\',\'4d\',G)})},ju:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.4k(q,H,J,\'4U\',\'in\',G)})},jx:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.4k(q,H,J,\'4U\',\'3Y\',G)})},jy:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.4k(q,H,J,\'O\',\'4d\',G)})},jz:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.4k(q,H,J,\'O\',\'in\',G)})},jt:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.4k(q,H,J,\'O\',\'3Y\',G)})},js:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.4k(q,H,J,\'2N\',\'4d\',G)})},jm:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.4k(q,H,J,\'2N\',\'in\',G)})},jl:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.4k(q,H,J,\'2N\',\'3Y\',G)})}});k.fx.4k=u(e,H,J,2P,1u,G){if(!k.4O(e)){k.2L(e,\'1o\');E I}D z=q;z.el=k(e);z.G=2h J==\'5g\'?J:G||U;z.W={};z.W.Y=z.el.B(\'Y\');z.W.Q=z.el.B(\'Q\');z.W.O=z.el.B(\'O\');if(!e.4s)e.4s=z.el.B(\'19\');if(1u==\'3Y\'){1u=z.el.B(\'19\')==\'1n\'?\'in\':\'4d\'}z.el.22();if(z.W.Y!=\'2y\'&&z.W.Y!=\'1O\'){z.el.B(\'Y\',\'2y\')}z.1u=1u;J=2h J==\'u\'?J:U;8y=1;3m(2P){1e\'4U\':z.e=11 k.fx(z.el.K(0),k.H(H-15,z.G,J),\'Q\');z.68=2m(z.W.Q)||0;z.9L=z.fM;8y=-1;1r;1e\'4y\':z.e=11 k.fx(z.el.K(0),k.H(H-15,z.G,J),\'Q\');z.68=2m(z.W.Q)||0;z.9L=z.fM;1r;1e\'2N\':z.e=11 k.fx(z.el.K(0),k.H(H-15,z.G,J),\'O\');z.68=2m(z.W.O)||0;z.9L=z.f4;1r;1e\'O\':z.e=11 k.fx(z.el.K(0),k.H(H-15,z.G,J),\'O\');z.68=2m(z.W.O)||0;z.9L=z.f4;8y=-1;1r}z.e2=11 k.fx(z.el.K(0),k.H(H,z.G,u(){z.el.B(z.W);if(z.1u==\'4d\'){z.el.B(\'19\',\'1n\')}P z.el.B(\'19\',z.el.K(0).4s==\'1n\'?\'2E\':z.el.K(0).4s);k.2L(z.el.K(0),\'1o\')}),\'1J\');if(1u==\'in\'){z.e.1L(z.68+2b*8y,z.68);z.e2.1L(0,1)}P{z.e.1L(z.68,z.68+2b*8y);z.e2.1L(1,0)}};k.fn.21({jn:u(H,V,J,G){E q.1w(\'1o\',u(){11 k.fx.9M(q,H,V,J,\'g7\',G)})},jo:u(H,V,J,G){E q.1w(\'1o\',u(){11 k.fx.9M(q,H,V,J,\'9Q\',G)})},jr:u(H,V,J,G){E q.1w(\'1o\',u(){11 k.fx.9M(q,H,V,J,\'3Y\',G)})}});k.fx.9M=u(e,H,V,J,1u,G){if(!k.4O(e)){k.2L(e,\'1o\');E I}D z=q;z.el=k(e);z.G=2h J==\'5g\'?J:G||U;z.J=2h J==\'u\'?J:U;if(1u==\'3Y\'){1u=z.el.B(\'19\')==\'1n\'?\'9Q\':\'g7\'}z.H=H;z.V=V&&V.1K==cR?V:20;z.fx=k.fx.9h(e);z.1u=1u;z.23=u(){if(z.J&&z.J.1K==2C){z.J.1F(z.el.K(0))}if(z.1u==\'9Q\'){z.el.22()}P{z.el.2G()}k.fx.9g(z.fx.3o.K(0),z.fx.W);k.2L(z.el.K(0),\'1o\')};if(z.1u==\'9Q\'){z.el.22();z.fx.3o.B(\'V\',z.V+\'S\').B(\'Z\',\'83\');z.ef=11 k.fx(z.fx.3o.K(0),k.H(z.H,z.G,u(){z.ef=11 k.fx(z.fx.3o.K(0),k.H(z.H,z.G,z.23),\'V\');z.ef.1L(z.V,z.fx.W.1q.hb)}),\'Z\');z.ef.1L(0,z.fx.W.1q.1D)}P{z.ef=11 k.fx(z.fx.3o.K(0),k.H(z.H,z.G,u(){z.ef=11 k.fx(z.fx.3o.K(0),k.H(z.H,z.G,z.23),\'Z\');z.ef.1L(z.fx.W.1q.1D,0)}),\'V\');z.ef.1L(z.fx.W.1q.hb,z.V)}};k.fn.21({jq:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.6z(q,H,1,2b,1b,J,\'f1\',G)})},jp:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.6z(q,H,2b,1,1b,J,\'d2\',G)})},kt:u(H,J,G){E q.1w(\'1o\',u(){D G=G||\'fG\';11 k.fx.6z(q,H,2b,fd,1b,J,\'6l\',G)})},6z:u(H,5d,4L,6E,J,G){E q.1w(\'1o\',u(){11 k.fx.6z(q,H,5d,4L,6E,J,\'6z\',G)})}});k.fx.6z=u(e,H,5d,4L,6E,J,1u,G){if(!k.4O(e)){k.2L(e,\'1o\');E I}D z=q;z.el=k(e);z.5d=T(5d)||2b;z.4L=T(4L)||2b;z.G=2h J==\'5g\'?J:G||U;z.J=2h J==\'u\'?J:U;z.1m=k.H(H).1m;z.6E=6E||U;z.2f=k.1a.2p(e);z.W={Z:z.el.B(\'Z\'),V:z.el.B(\'V\'),4w:z.el.B(\'4w\')||\'2b%\',Y:z.el.B(\'Y\'),19:z.el.B(\'19\'),Q:z.el.B(\'Q\'),O:z.el.B(\'O\'),2Y:z.el.B(\'2Y\'),4Z:z.el.B(\'4Z\'),6k:z.el.B(\'6k\'),6g:z.el.B(\'6g\'),5a:z.el.B(\'5a\'),66:z.el.B(\'66\'),6j:z.el.B(\'6j\'),5M:z.el.B(\'5M\'),4X:z.el.B(\'4X\')};z.Z=T(z.W.Z)||e.4b||0;z.V=T(z.W.V)||e.63||0;z.Q=T(z.W.Q)||0;z.O=T(z.W.O)||0;1q=[\'em\',\'S\',\'kJ\',\'%\'];1Y(i in 1q){if(z.W.4w.3F(1q[i])>0){z.fi=1q[i];z.4w=2m(z.W.4w)}if(z.W.4Z.3F(1q[i])>0){z.fw=1q[i];z.bt=2m(z.W.4Z)||0}if(z.W.6k.3F(1q[i])>0){z.fB=1q[i];z.bg=2m(z.W.6k)||0}if(z.W.6g.3F(1q[i])>0){z.fE=1q[i];z.bf=2m(z.W.6g)||0}if(z.W.5a.3F(1q[i])>0){z.fv=1q[i];z.be=2m(z.W.5a)||0}if(z.W.66.3F(1q[i])>0){z.fk=1q[i];z.bb=2m(z.W.66)||0}if(z.W.6j.3F(1q[i])>0){z.fs=1q[i];z.ba=2m(z.W.6j)||0}if(z.W.5M.3F(1q[i])>0){z.fb=1q[i];z.cJ=2m(z.W.5M)||0}if(z.W.4X.3F(1q[i])>0){z.fq=1q[i];z.cX=2m(z.W.4X)||0}}if(z.W.Y!=\'2y\'&&z.W.Y!=\'1O\'){z.el.B(\'Y\',\'2y\')}z.el.B(\'2Y\',\'2O\');z.1u=1u;3m(z.1u){1e\'f1\':z.4f=z.Q+z.2f.h/2;z.57=z.Q;z.4c=z.O+z.2f.w/2;z.4W=z.O;1r;1e\'d2\':z.57=z.Q+z.2f.h/2;z.4f=z.Q;z.4W=z.O+z.2f.w/2;z.4c=z.O;1r;1e\'6l\':z.57=z.Q-z.2f.h/4;z.4f=z.Q;z.4W=z.O-z.2f.w/4;z.4c=z.O;1r}z.bo=I;z.t=(11 72).71();z.4u=u(){6c(z.2H);z.2H=U};z.2D=u(){if(z.bo==I){z.el.22();z.bo=1b}D t=(11 72).71();D n=t-z.t;D p=n/z.1m;if(t>=z.1m+z.t){b1(u(){o=1;if(z.1u){t=z.57;l=z.4W;if(z.1u==\'6l\')o=0}z.bv(z.4L,l,t,1b,o)},13);z.4u()}P{o=1;if(!k.G||!k.G[z.G]){s=((-14.5v(p*14.2Q)/2)+0.5)*(z.4L-z.5d)+z.5d}P{s=k.G[z.G](p,n,z.5d,(z.4L-z.5d),z.1m)}if(z.1u){if(!k.G||!k.G[z.G]){t=((-14.5v(p*14.2Q)/2)+0.5)*(z.57-z.4f)+z.4f;l=((-14.5v(p*14.2Q)/2)+0.5)*(z.4W-z.4c)+z.4c;if(z.1u==\'6l\')o=((-14.5v(p*14.2Q)/2)+0.5)*(-0.9R)+0.9R}P{t=k.G[z.G](p,n,z.4f,(z.57-z.4f),z.1m);l=k.G[z.G](p,n,z.4c,(z.4W-z.4c),z.1m);if(z.1u==\'6l\')o=k.G[z.G](p,n,0.9R,-0.9R,z.1m)}}z.bv(s,l,t,I,o)}};z.2H=6I(u(){z.2D()},13);z.bv=u(4z,O,Q,fp,1J){z.el.B(\'V\',z.V*4z/2b+\'S\').B(\'Z\',z.Z*4z/2b+\'S\').B(\'O\',O+\'S\').B(\'Q\',Q+\'S\').B(\'4w\',z.4w*4z/2b+z.fi);if(z.bt)z.el.B(\'4Z\',z.bt*4z/2b+z.fw);if(z.bg)z.el.B(\'6k\',z.bg*4z/2b+z.fB);if(z.bf)z.el.B(\'6g\',z.bf*4z/2b+z.fE);if(z.be)z.el.B(\'5a\',z.be*4z/2b+z.fv);if(z.bb)z.el.B(\'66\',z.bb*4z/2b+z.fk);if(z.ba)z.el.B(\'6j\',z.ba*4z/2b+z.fs);if(z.cJ)z.el.B(\'5M\',z.cJ*4z/2b+z.fb);if(z.cX)z.el.B(\'4X\',z.cX*4z/2b+z.fq);if(z.1u==\'6l\'){if(1V.7a)z.el.K(0).18.69="9V(1J="+1J*2b+")";z.el.K(0).18.1J=1J}if(fp){if(z.6E){z.el.B(z.W)}if(z.1u==\'d2\'||z.1u==\'6l\'){z.el.B(\'19\',\'1n\');if(z.1u==\'6l\'){if(1V.7a)z.el.K(0).18.69="9V(1J="+2b+")";z.el.K(0).18.1J=1}}P z.el.B(\'19\',\'2E\');if(z.J)z.J.1F(z.el.K(0));k.2L(z.el.K(0),\'1o\')}}};k.fn.kL=u(H,4C,J,G){E q.1w(\'f6\',u(){q.73=k(q).1p("18")||\'\';G=2h J==\'5g\'?J:G||U;J=2h J==\'u\'?J:U;D 9U=k(q).B(\'7f\');D 87=q.3e;7o(9U==\'b7\'&&87){9U=k(87).B(\'7f\');87=87.3e}k(q).B(\'7f\',4C);if(2h q.73==\'8i\')q.73=q.73["9T"];k(q).5K({\'7f\':9U},H,G,u(){k.2L(q,\'f6\');if(2h k(q).1p("18")==\'8i\'){k(q).1p("18")["9T"]="";k(q).1p("18")["9T"]=q.73}P{k(q).1p("18",q.73)}if(J)J.1F(q)})})};k.fn.21({kg:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.5A(q,H,J,\'4i\',\'5P\',G)})},kq:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.5A(q,H,J,\'4a\',\'5P\',G)})},kr:u(H,J,G){E q.1w(\'1o\',u(){if(k.B(q,\'19\')==\'1n\'){11 k.fx.5A(q,H,J,\'4a\',\'7e\',G)}P{11 k.fx.5A(q,H,J,\'4a\',\'5P\',G)}})},kz:u(H,J,G){E q.1w(\'1o\',u(){if(k.B(q,\'19\')==\'1n\'){11 k.fx.5A(q,H,J,\'4i\',\'7e\',G)}P{11 k.fx.5A(q,H,J,\'4i\',\'5P\',G)}})},ky:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.5A(q,H,J,\'4i\',\'7e\',G)})},kx:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.5A(q,H,J,\'4a\',\'7e\',G)})}});k.fx.5A=u(e,H,J,2P,1u,G){if(!k.4O(e)){k.2L(e,\'1o\');E I}D z=q;D 5H=I;z.el=k(e);z.G=2h J==\'5g\'?J:G||U;z.J=2h J==\'u\'?J:U;z.1u=1u;z.H=H;z.2f=k.1a.2p(e);z.W={};z.W.Y=z.el.B(\'Y\');z.W.19=z.el.B(\'19\');if(z.W.19==\'1n\'){62=z.el.B(\'3j\');z.el.22();5H=1b}z.W.Q=z.el.B(\'Q\');z.W.O=z.el.B(\'O\');if(5H){z.el.2G();z.el.B(\'3j\',62)}z.W.Z=z.2f.w+\'S\';z.W.V=z.2f.h+\'S\';z.W.2Y=z.el.B(\'2Y\');z.2f.Q=T(z.W.Q)||0;z.2f.O=T(z.W.O)||0;if(z.W.Y!=\'2y\'&&z.W.Y!=\'1O\'){z.el.B(\'Y\',\'2y\')}z.el.B(\'2Y\',\'2O\').B(\'V\',1u==\'7e\'&&2P==\'4i\'?1:z.2f.h+\'S\').B(\'Z\',1u==\'7e\'&&2P==\'4a\'?1:z.2f.w+\'S\');z.23=u(){z.el.B(z.W);if(z.1u==\'5P\')z.el.2G();P z.el.22();k.2L(z.el.K(0),\'1o\')};3m(2P){1e\'4i\':z.eh=11 k.fx(z.el.K(0),k.H(H-15,z.G,J),\'V\');z.et=11 k.fx(z.el.K(0),k.H(z.H,z.G,z.23),\'Q\');if(z.1u==\'5P\'){z.eh.1L(z.2f.h,0);z.et.1L(z.2f.Q,z.2f.Q+z.2f.h/2)}P{z.eh.1L(0,z.2f.h);z.et.1L(z.2f.Q+z.2f.h/2,z.2f.Q)}1r;1e\'4a\':z.eh=11 k.fx(z.el.K(0),k.H(H-15,z.G,J),\'Z\');z.et=11 k.fx(z.el.K(0),k.H(z.H,z.G,z.23),\'O\');if(z.1u==\'5P\'){z.eh.1L(z.2f.w,0);z.et.1L(z.2f.O,z.2f.O+z.2f.w/2)}P{z.eh.1L(0,z.2f.w);z.et.1L(z.2f.O+z.2f.w/2,z.2f.O)}1r}};k.fn.cr=u(H,41,J){E q.1w(\'1o\',u(){if(!k.4O(q)){k.2L(q,\'1o\');E I}D fx=11 k.fx.cr(q,H,41,J);fx.cm()})};k.fx.cr=u(el,H,41,J){D z=q;z.41=41;z.5D=1;z.el=el;z.H=H;z.J=J;k(z.el).22();z.cm=u(){z.5D++;z.e=11 k.fx(z.el,k.H(z.H,u(){z.ef=11 k.fx(z.el,k.H(z.H,u(){if(z.5D<=z.41)z.cm();P{k.2L(z.el,\'1o\');if(z.J&&z.J.1K==2C){z.J.1F(z.el)}}}),\'1J\');z.ef.1L(0,1)}),\'1J\');z.e.1L(1,0)}};k.fn.21({9S:u(H,1N,G){o=k.H(H);E q.1w(\'1o\',u(){11 k.fx.9S(q,o,1N,G)})},ks:u(H,1N,G){E q.1B(u(){k(\'a[@3f*="#"]\',q).5G(u(e){g8=q.3f.7h(\'#\');k(\'#\'+g8[1]).9S(H,1N,G);E I})})}});k.fx.9S=u(e,o,1N,G){D z=q;z.o=o;z.e=e;z.1N=/g3|g0/.43(1N)?1N:I;z.G=G;p=k.1a.2R(e);s=k.1a.6W();z.4u=u(){6c(z.2H);z.2H=U;k.2L(z.e,\'1o\')};z.t=(11 72).71();s.h=s.h>s.ih?(s.h-s.ih):s.h;s.w=s.w>s.iw?(s.w-s.iw):s.w;z.57=p.y>s.h?s.h:p.y;z.4W=p.x>s.w?s.w:p.x;z.4f=s.t;z.4c=s.l;z.2D=u(){D t=(11 72).71();D n=t-z.t;D p=n/z.o.1m;if(t>=z.o.1m+z.t){z.4u();b1(u(){z.cE(z.57,z.4W)},13)}P{if(!z.1N||z.1N==\'g3\'){if(!k.G||!k.G[z.G]){aa=((-14.5v(p*14.2Q)/2)+0.5)*(z.57-z.4f)+z.4f}P{aa=k.G[z.G](p,n,z.4f,(z.57-z.4f),z.o.1m)}}P{aa=z.4f}if(!z.1N||z.1N==\'g0\'){if(!k.G||!k.G[z.G]){a9=((-14.5v(p*14.2Q)/2)+0.5)*(z.4W-z.4c)+z.4c}P{a9=k.G[z.G](p,n,z.4c,(z.4W-z.4c),z.o.1m)}}P{a9=z.4c}z.cE(aa,a9)}};z.cE=u(t,l){1V.gN(l,t)};z.2H=6I(u(){z.2D()},13)};k.fn.cy=u(41,J){E q.1w(\'1o\',u(){if(!k.4O(q)){k.2L(q,\'1o\');E I}D e=11 k.fx.cy(q,41,J);e.cx()})};k.fx.cy=u(e,41,J){D z=q;z.el=k(e);z.el.22();z.41=T(41)||3;z.J=J;z.5D=1;z.W={};z.W.Y=z.el.B(\'Y\');z.W.Q=T(z.el.B(\'Q\'))||0;z.W.O=T(z.el.B(\'O\'))||0;if(z.W.Y!=\'2y\'&&z.W.Y!=\'1O\'){z.el.B(\'Y\',\'2y\')}z.cx=u(){z.5D++;z.e=11 k.fx(z.el.K(0),{1m:60,23:u(){z.e=11 k.fx(z.el.K(0),{1m:60,23:u(){z.e=11 k.fx(e,{1m:60,23:u(){if(z.5D<=z.41)z.cx();P{z.el.B(\'Y\',z.W.Y).B(\'Q\',z.W.Q+\'S\').B(\'O\',z.W.O+\'S\');k.2L(z.el.K(0),\'1o\');if(z.J&&z.J.1K==2C){z.J.1F(z.el.K(0))}}}},\'O\');z.e.1L(z.W.O-20,z.W.O)}},\'O\');z.e.1L(z.W.O+20,z.W.O-20)}},\'O\');z.e.1L(z.W.O,z.W.O+20)}};k.fn.21({g9:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.1z(q,H,J,\'4U\',\'in\',G)})},f3:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.1z(q,H,J,\'4U\',\'4d\',G)})},gM:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.1z(q,H,J,\'4U\',\'3Y\',G)})},gL:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.1z(q,H,J,\'4y\',\'in\',G)})},gK:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.1z(q,H,J,\'4y\',\'4d\',G)})},gS:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.1z(q,H,J,\'4y\',\'3Y\',G)})},gR:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.1z(q,H,J,\'O\',\'in\',G)})},gJ:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.1z(q,H,J,\'O\',\'4d\',G)})},gI:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.1z(q,H,J,\'O\',\'3Y\',G)})},gC:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.1z(q,H,J,\'2N\',\'in\',G)})},gB:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.1z(q,H,J,\'2N\',\'4d\',G)})},gU:u(H,J,G){E q.1w(\'1o\',u(){11 k.fx.1z(q,H,J,\'2N\',\'3Y\',G)})}});k.fx.1z=u(e,H,J,2P,1u,G){if(!k.4O(e)){k.2L(e,\'1o\');E I}D z=q;z.el=k(e);z.G=2h J==\'5g\'?J:G||U;z.J=2h J==\'u\'?J:U;if(1u==\'3Y\'){1u=z.el.B(\'19\')==\'1n\'?\'in\':\'4d\'}if(!e.4s)e.4s=z.el.B(\'19\');z.el.22();z.H=H;z.fx=k.fx.9h(e);z.1u=1u;z.2P=2P;z.23=u(){if(z.1u==\'4d\')z.el.B(\'3j\',\'2O\');k.fx.9g(z.fx.3o.K(0),z.fx.W);if(z.1u==\'in\'){z.el.B(\'19\',z.el.K(0).4s==\'1n\'?\'2E\':z.el.K(0).4s)}P{z.el.B(\'19\',\'1n\');z.el.B(\'3j\',\'dR\')}if(z.J&&z.J.1K==2C){z.J.1F(z.el.K(0))}k.2L(z.el.K(0),\'1o\')};3m(z.2P){1e\'4U\':z.ef=11 k.fx(z.el.K(0),k.H(z.H,z.G,z.23),\'Q\');z.7S=11 k.fx(z.fx.3o.K(0),k.H(z.H,z.G),\'V\');if(z.1u==\'in\'){z.ef.1L(-z.fx.W.1q.hb,0);z.7S.1L(0,z.fx.W.1q.hb)}P{z.ef.1L(0,-z.fx.W.1q.hb);z.7S.1L(z.fx.W.1q.hb,0)}1r;1e\'4y\':z.ef=11 k.fx(z.el.K(0),k.H(z.H,z.G,z.23),\'Q\');if(z.1u==\'in\'){z.ef.1L(z.fx.W.1q.hb,0)}P{z.ef.1L(0,z.fx.W.1q.hb)}1r;1e\'O\':z.ef=11 k.fx(z.el.K(0),k.H(z.H,z.G,z.23),\'O\');z.7S=11 k.fx(z.fx.3o.K(0),k.H(z.H,z.G),\'Z\');if(z.1u==\'in\'){z.ef.1L(-z.fx.W.1q.1D,0);z.7S.1L(0,z.fx.W.1q.1D)}P{z.ef.1L(0,-z.fx.W.1q.1D);z.7S.1L(z.fx.W.1q.1D,0)}1r;1e\'2N\':z.ef=11 k.fx(z.el.K(0),k.H(z.H,z.G,z.23),\'O\');if(z.1u==\'in\'){z.ef.1L(z.fx.W.1q.1D,0)}P{z.ef.1L(0,z.fx.W.1q.1D)}1r}};k.h2=U;k.fn.h1=u(o){E q.1B(u(){if(!o||!o.4L){E}D el=q;k(o.4L).1B(u(){11 k.fx.fu(el,q,o)})})};k.fx.fu=u(e,8s,o){D z=q;z.el=k(e);z.8s=8s;z.4e=1j.3t(\'26\');k(z.4e).B({Y:\'1O\'}).2Z(o.3b);if(!o.1m){o.1m=er}z.1m=o.1m;z.23=o.23;z.9i=0;z.9j=0;if(k.f5){z.9i=(T(k.3M(z.4e,\'5a\'))||0)+(T(k.3M(z.4e,\'6k\'))||0)+(T(k.3M(z.4e,\'4X\'))||0)+(T(k.3M(z.4e,\'6j\'))||0);z.9j=(T(k.3M(z.4e,\'4Z\'))||0)+(T(k.3M(z.4e,\'6g\'))||0)+(T(k.3M(z.4e,\'66\'))||0)+(T(k.3M(z.4e,\'5M\'))||0)}z.28=k.21(k.1a.2R(z.el.K(0)),k.1a.2p(z.el.K(0)));z.2X=k.21(k.1a.2R(z.8s),k.1a.2p(z.8s));z.28.1D-=z.9i;z.28.hb-=z.9j;z.2X.1D-=z.9i;z.2X.hb-=z.9j;z.J=o.23;k(\'2e\').1R(z.4e);k(z.4e).B(\'Z\',z.28.1D+\'S\').B(\'V\',z.28.hb+\'S\').B(\'Q\',z.28.y+\'S\').B(\'O\',z.28.x+\'S\').5K({Q:z.2X.y,O:z.2X.x,Z:z.2X.1D,V:z.2X.hb},z.1m,u(){k(z.4e).aB();if(z.23&&z.23.1K==2C){z.23.1F(z.el.K(0),[z.4L])}})};k.ak={2s:u(M){E q.1B(u(){D el=q;D 7x=2*14.2Q/eY;D aZ=2*14.2Q;if(k(el).B(\'Y\')!=\'2y\'&&k(el).B(\'Y\')!=\'1O\'){k(el).B(\'Y\',\'2y\')}el.1l={1S:k(M.1S,q),2F:M.2F,6M:M.6M,an:M.an,aZ:aZ,1P:k.1a.2p(q),Y:k.1a.2R(q),28:14.2Q/2,ct:M.ct,91:M.6R,6R:[],aY:I,7x:2*14.2Q/eY};el.1l.eZ=(el.1l.1P.w-el.1l.2F)/2;el.1l.7O=(el.1l.1P.h-el.1l.6M-el.1l.6M*el.1l.91)/2;el.1l.2D=2*14.2Q/el.1l.1S.1P();el.1l.cI=el.1l.1P.w/2;el.1l.cF=el.1l.1P.h/2-el.1l.6M*el.1l.91;D aS=1j.3t(\'26\');k(aS).B({Y:\'1O\',3B:1,Q:0,O:0});k(el).1R(aS);el.1l.1S.1B(u(2I){ab=k(\'1U\',q).K(0);V=T(el.1l.6M*el.1l.91);if(k.3h.4I){3N=1j.3t(\'1U\');k(3N).B(\'Y\',\'1O\');3N.2M=ab.2M;3N.18.69=\'iW aw:ax.ay.c1(1J=60, 18=1, iJ=0, i6=0, hz=0, hx=0)\'}P{3N=1j.3t(\'3N\');if(3N.ga){4H=3N.ga("2d");3N.18.Y=\'1O\';3N.18.V=V+\'S\';3N.18.Z=el.1l.2F+\'S\';3N.V=V;3N.Z=el.1l.2F;4H.i4();4H.i2(0,V);4H.hT(1,-1);4H.hJ(ab,0,0,el.1l.2F,V);4H.6E();4H.hN="hO-4d";D b0=4H.hQ(0,0,0,V);b0.g1(1,"fU(1X, 1X, 1X, 1)");b0.g1(0,"fU(1X, 1X, 1X, 0.6)");4H.hR=b0;if(iR.iv.3F(\'ix\')!=-1){4H.it()}P{4H.ir(0,0,el.1l.2F,V)}}}el.1l.6R[2I]=3N;k(aS).1R(3N)}).1H(\'aV\',u(e){el.1l.aY=1b;el.1l.H=el.1l.7x*0.1*el.1l.H/14.3R(el.1l.H);E I}).1H(\'8q\',u(e){el.1l.aY=I;E I});k.ak.7P(el);el.1l.H=el.1l.7x*0.2;el.1l.gm=1V.6I(u(){el.1l.28+=el.1l.H;if(el.1l.28>aZ)el.1l.28=0;k.ak.7P(el)},20);k(el).1H(\'8q\',u(){el.1l.H=el.1l.7x*0.2*el.1l.H/14.3R(el.1l.H)}).1H(\'3H\',u(e){if(el.1l.aY==I){1s=k.1a.44(e);fe=el.1l.1P.w-1s.x+el.1l.Y.x;el.1l.H=el.1l.ct*el.1l.7x*(el.1l.1P.w/2-fe)/(el.1l.1P.w/2)}})})},7P:u(el){el.1l.1S.1B(u(2I){ch=el.1l.28+2I*el.1l.2D;x=el.1l.eZ*14.5v(ch);y=el.1l.7O*14.98(ch);fm=T(2b*(el.1l.7O+y)/(2*el.1l.7O));fl=(el.1l.7O+y)/(2*el.1l.7O);Z=T((el.1l.2F-el.1l.an)*fl+el.1l.an);V=T(Z*el.1l.6M/el.1l.2F);q.18.Q=el.1l.cF+y-V/2+"S";q.18.O=el.1l.cI+x-Z/2+"S";q.18.Z=Z+"S";q.18.V=V+"S";q.18.3B=fm;el.1l.6R[2I].18.Q=T(el.1l.cF+y+V-1-V/2)+"S";el.1l.6R[2I].18.O=T(el.1l.cI+x-Z/2)+"S";el.1l.6R[2I].18.Z=Z+"S";el.1l.6R[2I].18.V=T(V*el.1l.91)+"S"})}};k.fn.h9=k.ak.2s;k.ff={2s:u(M){E q.1B(u(){if(!M.ae||!M.ad)E;D el=q;el.2j={ag:M.ag||bw,ae:M.ae,ad:M.ad,8r:M.8r||\'f7\',af:M.af||\'f7\',2U:M.2U&&2h M.2U==\'u\'?M.2U:I,3i:M.2U&&2h M.3i==\'u\'?M.3i:I,74:M.74&&2h M.74==\'u\'?M.74:I,ai:k(M.ae,q),8f:k(M.ad,q),H:M.H||8w,6e:M.6e||0};el.2j.8f.2G().B(\'V\',\'83\').eq(0).B({V:el.2j.ag+\'S\',19:\'2E\'}).2X();el.2j.ai.1B(u(2I){q.7d=2I}).h6(u(){k(q).2Z(el.2j.af)},u(){k(q).4p(el.2j.af)}).1H(\'5G\',u(e){if(el.2j.6e==q.7d)E;el.2j.ai.eq(el.2j.6e).4p(el.2j.8r).2X().eq(q.7d).2Z(el.2j.8r).2X();el.2j.8f.eq(el.2j.6e).5K({V:0},el.2j.H,u(){q.18.19=\'1n\';if(el.2j.3i){el.2j.3i.1F(el,[q])}}).2X().eq(q.7d).22().5K({V:el.2j.ag},el.2j.H,u(){q.18.19=\'2E\';if(el.2j.2U){el.2j.2U.1F(el,[q])}}).2X();if(el.2j.74){el.2j.74.1F(el,[q,el.2j.8f.K(q.7d),el.2j.ai.K(el.2j.6e),el.2j.8f.K(el.2j.6e)])}el.2j.6e=q.7d}).eq(0).2Z(el.2j.8r).2X();k(q).B(\'V\',k(q).B(\'V\')).B(\'2Y\',\'2O\')})}};k.fn.h7=k.ff.2s;k.3L={1c:U,8u:u(){31=q.2v;if(!31)E;18={fg:k(q).B(\'fg\')||\'\',4w:k(q).B(\'4w\')||\'\',8h:k(q).B(\'8h\')||\'\',fI:k(q).B(\'fI\')||\'\',fJ:k(q).B(\'fJ\')||\'\',fT:k(q).B(\'fT\')||\'\',cH:k(q).B(\'cH\')||\'\',fc:k(q).B(\'fc\')||\'\'};k.3L.1c.B(18);3w=k.3L.g2(31);3w=3w.4v(11 cp("\\\\n","g"),"
    ");k.3L.1c.3w(\'km\');ck=k.3L.1c.K(0).4b;k.3L.1c.3w(3w);Z=k.3L.1c.K(0).4b+ck;if(q.6t.2J&&Z>q.6t.2J[0]){Z=q.6t.2J[0]}q.18.Z=Z+\'S\';if(q.4S==\'cQ\'){V=k.3L.1c.K(0).63+ck;if(q.6t.2J&&V>q.6t.2J[1]){V=q.6t.2J[1]}q.18.V=V+\'S\'}},g2:u(31){co={\'&\':\'&j0;\',\'<\':\'&kB;\',\'>\':\'>\',\'"\':\'&kw;\'};1Y(i in co){31=31.4v(11 cp(i,\'g\'),co[i])}E 31},2s:u(2J){if(k.3L.1c==U){k(\'2e\',1j).1R(\'<26 id="fH" 18="Y: 1O; Q: 0; O: 0; 3j: 2O;">\');k.3L.1c=k(\'#fH\')}E q.1B(u(){if(/cQ|bz/.43(q.4S)){if(q.4S==\'bz\'){f9=q.5n(\'1u\');if(!/31|kv/.43(f9)){E}}if(2J&&(2J.1K==cR||(2J.1K==7b&&2J.1h==2))){if(2J.1K==cR)2J=[2J,2J];P{2J[0]=T(2J[0])||8w;2J[1]=T(2J[1])||8w}q.6t={2J:2J}}k(q).5I(k.3L.8u).6S(k.3L.8u).fX(k.3L.8u);k.3L.8u.1F(q)}})}};k.fn.ke=k.3L.2s;k.N={1c:U,8S:U,3E:U,2H:U,4o:U,bp:U,1d:U,2g:U,1S:U,5t:u(){k.N.8S.5t();if(k.N.3E){k.N.3E.2G()}},4u:u(){k.N.1S=U;k.N.2g=U;k.N.4o=k.N.1d.2v;if(k.N.1c.B(\'19\')==\'2E\'){if(k.N.1d.1f.fx){3m(k.N.1d.1f.fx.1u){1e\'bB\':k.N.1c.7k(k.N.1d.1f.fx.1m,k.N.5t);1r;1e\'1z\':k.N.1c.f3(k.N.1d.1f.fx.1m,k.N.5t);1r;1e\'aT\':k.N.1c.fz(k.N.1d.1f.fx.1m,k.N.5t);1r}}P{k.N.1c.2G()}if(k.N.1d.1f.3i)k.N.1d.1f.3i.1F(k.N.1d,[k.N.1c,k.N.3E])}P{k.N.5t()}1V.c6(k.N.2H)},fy:u(){D 1d=k.N.1d;D 4g=k.N.ap(1d);if(1d&&4g.3k!=k.N.4o&&4g.3k.1h>=1d.1f.aL){k.N.4o=4g.3k;k.N.bp=4g.3k;79={2q:k(1d).1p(\'kP\')||\'2q\',2v:4g.3k};k.kN({1u:\'kG\',79:k.kI(79),kF:u(ft){1d.1f.4h=k(\'3k\',ft);1P=1d.1f.4h.1P();if(1P>0){D 5x=\'\';1d.1f.4h.1B(u(2I){5x+=\'<90 4G="\'+k(\'2v\',q).31()+\'" 8O="\'+2I+\'" 18="94: aG;">\'+k(\'31\',q).31()+\'\'});if(1d.1f.aR){D 3G=k(\'2v\',1d.1f.4h.K(0)).31();1d.2v=4g.3l+3G+1d.1f.3K+4g.5Q;k.N.6G(1d,4g.3k.1h!=3G.1h?(4g.3l.1h+4g.3k.1h):3G.1h,4g.3k.1h!=3G.1h?(4g.3l.1h+3G.1h):3G.1h)}if(1P>0){k.N.b4(1d,5x)}P{k.N.4u()}}P{k.N.4u()}},6b:1d.1f.aM})}},b4:u(1d,5x){k.N.8S.3w(5x);k.N.1S=k(\'90\',k.N.8S.K(0));k.N.1S.aV(k.N.f2).1H(\'5G\',k.N.fO);D Y=k.1a.2R(1d);D 1P=k.1a.2p(1d);k.N.1c.B(\'Q\',Y.y+1P.hb+\'S\').B(\'O\',Y.x+\'S\').2Z(1d.1f.aK);if(k.N.3E){k.N.3E.B(\'19\',\'2E\').B(\'Q\',Y.y+1P.hb+\'S\').B(\'O\',Y.x+\'S\').B(\'Z\',k.N.1c.B(\'Z\')).B(\'V\',k.N.1c.B(\'V\'))}k.N.2g=0;k.N.1S.K(0).3b=1d.1f.70;k.N.8P(1d,1d.1f.4h.K(0),\'6Z\');if(k.N.1c.B(\'19\')==\'1n\'){if(1d.1f.bA){D bm=k.1a.aj(1d,1b);D bl=k.1a.6h(1d,1b);k.N.1c.B(\'Z\',1d.4b-(k.f5?(bm.l+bm.r+bl.l+bl.r):0)+\'S\')}if(1d.1f.fx){3m(1d.1f.fx.1u){1e\'bB\':k.N.1c.7m(1d.1f.fx.1m);1r;1e\'1z\':k.N.1c.g9(1d.1f.fx.1m);1r;1e\'aT\':k.N.1c.fP(1d.1f.fx.1m);1r}}P{k.N.1c.22()}if(k.N.1d.1f.2U)k.N.1d.1f.2U.1F(k.N.1d,[k.N.1c,k.N.3E])}},fC:u(){D 1d=q;if(1d.1f.4h){k.N.4o=1d.2v;k.N.bp=1d.2v;D 5x=\'\';1d.1f.4h.1B(u(2I){2v=k(\'2v\',q).31().5Z();fR=1d.2v.5Z();if(2v.3F(fR)==0){5x+=\'<90 4G="\'+k(\'2v\',q).31()+\'" 8O="\'+2I+\'" 18="94: aG;">\'+k(\'31\',q).31()+\'\'}});if(5x!=\'\'){k.N.b4(1d,5x);q.1f.aW=1b;E}}1d.1f.4h=U;q.1f.aW=I},6G:u(2q,28,2X){if(2q.aI){D 6K=2q.aI();6K.j8(1b);6K.fr("bW",28);6K.ja("bW",-2X+28);6K.8Z()}P if(2q.aU){2q.aU(28,2X)}P{if(2q.5B){2q.5B=28;2q.dq=2X}}2q.6D()},fD:u(2q){if(2q.5B)E 2q.5B;P if(2q.aI){D 6K=1j.6G.du();D fo=6K.jg();E 0-fo.fr(\'bW\',-jX)}},ap:u(2q){D 4F={2v:2q.2v,3l:\'\',5Q:\'\',3k:\'\'};if(2q.1f.aO){D 97=I;D 5B=k.N.fD(2q)||0;D 56=4F.2v.7h(2q.1f.3K);1Y(D i=0;i<56.1h;i++){if((4F.3l.1h+56[i].1h>=5B||5B==0)&&!97){if(4F.3l.1h<=5B)4F.3k=56[i];P 4F.5Q+=56[i]+(56[i]!=\'\'?2q.1f.3K:\'\');97=1b}P if(97){4F.5Q+=56[i]+(56[i]!=\'\'?2q.1f.3K:\'\')}if(!97){4F.3l+=56[i]+(56.1h>1?2q.1f.3K:\'\')}}}P{4F.3k=4F.2v}E 4F},bu:u(e){1V.c6(k.N.2H);D 1d=k.N.ap(q);D 3O=e.7F||e.7A||-1;if(/^13$|27$|35$|36$|38$|40$|^9$/.43(3O)&&k.N.1S){if(1V.2l){1V.2l.cj=1b;1V.2l.ci=I}P{e.al();e.am()}if(k.N.2g!=U)k.N.1S.K(k.N.2g||0).3b=\'\';P k.N.2g=-1;3m(3O){1e 9:1e 13:if(k.N.2g==-1)k.N.2g=0;D 2g=k.N.1S.K(k.N.2g||0);D 3G=2g.5n(\'4G\');q.2v=1d.3l+3G+q.1f.3K+1d.5Q;k.N.4o=1d.3k;k.N.6G(q,1d.3l.1h+3G.1h+q.1f.3K.1h,1d.3l.1h+3G.1h+q.1f.3K.1h);k.N.4u();if(q.1f.6a){4n=T(2g.5n(\'8O\'))||0;k.N.8P(q,q.1f.4h.K(4n),\'6a\')}if(q.76)q.76(I);E 3O!=13;1r;1e 27:q.2v=1d.3l+k.N.4o+q.1f.3K+1d.5Q;q.1f.4h=U;k.N.4u();if(q.76)q.76(I);E I;1r;1e 35:k.N.2g=k.N.1S.1P()-1;1r;1e 36:k.N.2g=0;1r;1e 38:k.N.2g--;if(k.N.2g<0)k.N.2g=k.N.1S.1P()-1;1r;1e 40:k.N.2g++;if(k.N.2g==k.N.1S.1P())k.N.2g=0;1r}k.N.8P(q,q.1f.4h.K(k.N.2g||0),\'6Z\');k.N.1S.K(k.N.2g||0).3b=q.1f.70;if(k.N.1S.K(k.N.2g||0).76)k.N.1S.K(k.N.2g||0).76(I);if(q.1f.aR){D aA=k.N.1S.K(k.N.2g||0).5n(\'4G\');q.2v=1d.3l+aA+q.1f.3K+1d.5Q;if(k.N.4o.1h!=aA.1h)k.N.6G(q,1d.3l.1h+k.N.4o.1h,1d.3l.1h+aA.1h)}E I}k.N.fC.1F(q);if(q.1f.aW==I){if(1d.3k!=k.N.4o&&1d.3k.1h>=q.1f.aL)k.N.2H=1V.b1(k.N.fy,q.1f.53);if(k.N.1S){k.N.4u()}}E 1b},8P:u(2q,3k,1u){if(2q.1f[1u]){D 79={};aE=3k.dU(\'*\');1Y(i=0;i\');k.N.3E=k(\'#g5\')}k(\'2e\',1j).1R(\'<26 id="gc" 18="Y: 1O; Q: 0; O: 0; z-b2: jE; 19: 1n;">&7J;\');k.N.1c=k(\'#gc\');k.N.8S=k(\'aX\',k.N.1c)}E q.1B(u(){if(q.4S!=\'bz\'&&q.5n(\'1u\')!=\'31\')E;q.1f={};q.1f.aM=M.aM;q.1f.aL=14.3R(T(M.aL)||1);q.1f.aK=M.aK?M.aK:\'\';q.1f.70=M.70?M.70:\'\';q.1f.6a=M.6a&&M.6a.1K==2C?M.6a:U;q.1f.2U=M.2U&&M.2U.1K==2C?M.2U:U;q.1f.3i=M.3i&&M.3i.1K==2C?M.3i:U;q.1f.6Z=M.6Z&&M.6Z.1K==2C?M.6Z:U;q.1f.bA=M.bA||I;q.1f.aO=M.aO||I;q.1f.3K=q.1f.aO?(M.3K||\', \'):\'\';q.1f.aR=M.aR?1b:I;q.1f.53=14.3R(T(M.53)||aF);if(M.fx&&M.fx.1K==7n){if(!M.fx.1u||!/bB|1z|aT/.43(M.fx.1u)){M.fx.1u=\'1z\'}if(M.fx.1u==\'1z\'&&!k.fx.1z)E;if(M.fx.1u==\'aT\'&&!k.fx.5W)E;M.fx.1m=14.3R(T(M.fx.1m)||8w);if(M.fx.1m>q.1f.53){M.fx.1m=q.1f.53-2b}q.1f.fx=M.fx}q.1f.4h=U;q.1f.aW=I;k(q).1p(\'bu\',\'fQ\').6D(u(){k.N.1d=q;k.N.4o=q.2v}).fX(k.N.gb).6S(k.N.bu).5I(u(){k.N.2H=1V.b1(k.N.4u,jP)})})}};k.fn.jO=k.N.2s;k.1y={2H:U,4E:U,29:U,2D:10,28:u(el,4P,2D,di){k.1y.4E=el;k.1y.29=4P;k.1y.2D=T(2D)||10;k.1y.2H=1V.6I(k.1y.db,T(di)||40)},db:u(){1Y(i=0;i0&&k.1y.29[i].30.y+k.1y.29[i].30.t>6f.y){k.1y.29[i].2V-=k.1y.2D}P if(k.1y.29[i].30.t<=k.1y.29[i].30.h&&k.1y.29[i].30.t+k.1y.29[i].30.hb<6f.y+6f.hb){k.1y.29[i].2V+=k.1y.2D}if(k.1y.29[i].30.l>0&&k.1y.29[i].30.x+k.1y.29[i].30.l>6f.x){k.1y.29[i].3g-=k.1y.2D}P if(k.1y.29[i].30.l<=k.1y.29[i].30.jT&&k.1y.29[i].30.l+k.1y.29[i].30.1D<6f.x+6f.1D){k.1y.29[i].3g+=k.1y.2D}}},8v:u(){1V.6c(k.1y.2H);k.1y.4E=U;k.1y.29=U;1Y(i in k.1y.29){k.1y.29[i].30=U}}};k.6y={2s:u(M){E q.1B(u(){D el=q;el.1G={1S:k(M.1S,q),1Z:k(M.1Z,q),1M:k.1a.2R(q),2F:M.2F,aN:M.aN,7R:M.7R,dw:M.dw,51:M.51,6q:M.6q};k.6y.aJ(el,0);k(1V).1H(\'jS\',u(){el.1G.1M=k.1a.2R(el);k.6y.aJ(el,0);k.6y.7P(el)});k.6y.7P(el);el.1G.1S.1H(\'aV\',u(){k(el.1G.aN,q).K(0).18.19=\'2E\'}).1H(\'8q\',u(){k(el.1G.aN,q).K(0).18.19=\'1n\'});k(1j).1H(\'3H\',u(e){D 1s=k.1a.44(e);D 5q=0;if(el.1G.51&&el.1G.51==\'b8\')D aQ=1s.x-el.1G.1M.x-(el.4b-el.1G.2F*el.1G.1S.1P())/2-el.1G.2F/2;P if(el.1G.51&&el.1G.51==\'2N\')D aQ=1s.x-el.1G.1M.x-el.4b+el.1G.2F*el.1G.1S.1P();P D aQ=1s.x-el.1G.1M.x;D dB=14.5Y(1s.y-el.1G.1M.y-el.63/2,2);el.1G.1S.1B(u(2I){46=14.dm(14.5Y(aQ-2I*el.1G.2F,2)+dB);46-=el.1G.2F/2;46=46<0?0:46;46=46>el.1G.7R?el.1G.7R:46;46=el.1G.7R-46;bC=el.1G.6q*46/el.1G.7R;q.18.Z=el.1G.2F+bC+\'S\';q.18.O=el.1G.2F*2I+5q+\'S\';5q+=bC});k.6y.aJ(el,5q)})})},aJ:u(el,5q){if(el.1G.51)if(el.1G.51==\'b8\')el.1G.1Z.K(0).18.O=(el.4b-el.1G.2F*el.1G.1S.1P())/2-5q/2+\'S\';P if(el.1G.51==\'O\')el.1G.1Z.K(0).18.O=-5q/el.1G.1S.1P()+\'S\';P if(el.1G.51==\'2N\')el.1G.1Z.K(0).18.O=(el.4b-el.1G.2F*el.1G.1S.1P())-5q/2+\'S\';el.1G.1Z.K(0).18.Z=el.1G.2F*el.1G.1S.1P()+5q+\'S\'},7P:u(el){el.1G.1S.1B(u(2I){q.18.Z=el.1G.2F+\'S\';q.18.O=el.1G.2F*2I+\'S\'})}};k.fn.jD=k.6y.2s;k.1v={M:{2B:10,eV:\'1Q/jG.eF\',eT:\'<1U 2M="1Q/5P.eC" />\',eN:0.8,e3:\'jK ab\',e5:\'5d\',3V:8w},jI:I,jU:I,6r:U,9d:I,9e:I,ca:u(2l){if(!k.1v.9e||k.1v.9d)E;D 3O=2l.7F||2l.7A||-1;3m(3O){1e 35:if(k.1v.6r)k.1v.28(U,k(\'a[@4G=\'+k.1v.6r+\']:k7\').K(0));1r;1e 36:if(k.1v.6r)k.1v.28(U,k(\'a[@4G=\'+k.1v.6r+\']:k5\').K(0));1r;1e 37:1e 8:1e 33:1e 80:1e k8:D ar=k(\'#9a\');if(ar.K(0).52!=U){ar.K(0).52.1F(ar.K(0))}1r;1e 38:1r;1e 39:1e 34:1e 32:1e ka:1e 78:D aD=k(\'#9b\');if(aD.K(0).52!=U){aD.K(0).52.1F(aD.K(0))}1r;1e 40:1r;1e 27:k.1v.ah();1r}},7W:u(M){if(M)k.21(k.1v.M,M);if(1V.2l){k(\'2e\',1j).1H(\'6S\',k.1v.ca)}P{k(1j).1H(\'6S\',k.1v.ca)}k(\'a\').1B(u(){el=k(q);dQ=el.1p(\'4G\')||\'\';eA=el.1p(\'3f\')||\'\';eu=/\\.eC|\\.jY|\\.95|\\.eF|\\.jW/g;if(eA.5Z().bU(eu)!=U&&dQ.5Z().3F(\'eJ\')==0){el.1H(\'5G\',k.1v.28)}});if(k.3h.4I){3E=1j.3t(\'3E\');k(3E).1p({id:\'b6\',2M:\'ew:I;\',ez:\'bX\',ey:\'bX\'}).B({19:\'1n\',Y:\'1O\',Q:\'0\',O:\'0\',69:\'aw:ax.ay.c1(1J=0)\'});k(\'2e\').1R(3E)}8Q=1j.3t(\'26\');k(8Q).1p(\'id\',\'bk\').B({Y:\'1O\',19:\'1n\',Q:\'0\',O:\'0\',1J:0}).1R(1j.8F(\' \')).1H(\'5G\',k.1v.ah);6L=1j.3t(\'26\');k(6L).1p(\'id\',\'dZ\').B({4X:k.1v.M.2B+\'S\'}).1R(1j.8F(\' \'));bZ=1j.3t(\'26\');k(bZ).1p(\'id\',\'e1\').B({4X:k.1v.M.2B+\'S\',5M:k.1v.M.2B+\'S\'}).1R(1j.8F(\' \'));cc=1j.3t(\'a\');k(cc).1p({id:\'jh\',3f:\'#\'}).B({Y:\'1O\',2N:k.1v.M.2B+\'S\',Q:\'0\'}).1R(k.1v.M.eT).1H(\'5G\',k.1v.ah);7t=1j.3t(\'26\');k(7t).1p(\'id\',\'bh\').B({Y:\'2y\',b9:\'O\',6X:\'0 ao\',3B:1}).1R(6L).1R(bZ).1R(cc);2a=1j.3t(\'1U\');2a.2M=k.1v.M.eV;k(2a).1p(\'id\',\'ep\').B({Y:\'1O\'});4R=1j.3t(\'a\');k(4R).1p({id:\'9a\',3f:\'#\'}).B({Y:\'1O\',19:\'1n\',2Y:\'2O\',eQ:\'1n\'}).1R(1j.8F(\' \'));4Q=1j.3t(\'a\');k(4Q).1p({id:\'9b\',3f:\'#\'}).B({Y:\'1O\',2Y:\'2O\',eQ:\'1n\'}).1R(1j.8F(\' \'));1Z=1j.3t(\'26\');k(1Z).1p(\'id\',\'e0\').B({19:\'1n\',Y:\'2y\',2Y:\'2O\',b9:\'O\',6X:\'0 ao\',Q:\'0\',O:\'0\',3B:2}).1R([2a,4R,4Q]);6N=1j.3t(\'26\');k(6N).1p(\'id\',\'aq\').B({19:\'1n\',Y:\'1O\',2Y:\'2O\',Q:\'0\',O:\'0\',b9:\'b8\',7f:\'b7\',j7:\'0\'}).1R([1Z,7t]);k(\'2e\').1R(8Q).1R(6N)},28:u(e,C){el=C?k(C):k(q);at=el.1p(\'4G\');D 6P,4n,4R,4Q;if(at!=\'eJ\'){k.1v.6r=at;8N=k(\'a[@4G=\'+at+\']\');6P=8N.1P();4n=8N.b2(C?C:q);4R=8N.K(4n-1);4Q=8N.K(4n+1)}8H=el.1p(\'3f\');6L=el.1p(\'45\');3I=k.1a.6W();8Q=k(\'#bk\');if(!k.1v.9e){k.1v.9e=1b;if(k.3h.4I){k(\'#b6\').B(\'V\',14.3v(3I.ih,3I.h)+\'S\').B(\'Z\',14.3v(3I.iw,3I.w)+\'S\').22()}8Q.B(\'V\',14.3v(3I.ih,3I.h)+\'S\').B(\'Z\',14.3v(3I.iw,3I.w)+\'S\').22().eo(bw,k.1v.M.eN,u(){k.1v.bd(8H,6L,3I,6P,4n,4R,4Q)});k(\'#aq\').B(\'Z\',14.3v(3I.iw,3I.w)+\'S\')}P{k(\'#9a\').K(0).52=U;k(\'#9b\').K(0).52=U;k.1v.bd(8H,6L,3I,6P,4n,4R,4Q)}E I},bd:u(8H,jA,3I,6P,4n,4R,4Q){k(\'#bi\').aB();aC=k(\'#9a\');aC.2G();as=k(\'#9b\');as.2G();2a=k(\'#ep\');1Z=k(\'#e0\');6N=k(\'#aq\');7t=k(\'#bh\').B(\'3j\',\'2O\');k(\'#dZ\').3w(6L);k.1v.9d=1b;if(6P)k(\'#e1\').3w(k.1v.M.e3+\' \'+(4n+1)+\' \'+k.1v.M.e5+\' \'+6P);if(4R){aC.K(0).52=u(){q.5I();k.1v.28(U,4R);E I}}if(4Q){as.K(0).52=u(){q.5I();k.1v.28(U,4Q);E I}}2a.22();8E=k.1a.2p(1Z.K(0));5f=14.3v(8E.1D,2a.K(0).Z+k.1v.M.2B*2);5T=14.3v(8E.hb,2a.K(0).V+k.1v.M.2B*2);2a.B({O:(5f-2a.K(0).Z)/2+\'S\',Q:(5T-2a.K(0).V)/2+\'S\'});1Z.B({Z:5f+\'S\',V:5T+\'S\'}).22();e4=k.1a.bq();6N.B(\'Q\',3I.t+(e4.h/15)+\'S\');if(6N.B(\'19\')==\'1n\'){6N.22().7m(k.1v.M.3V)}6U=11 aH;k(6U).1p(\'id\',\'bi\').1H(\'jk\',u(){5f=6U.Z+k.1v.M.2B*2;5T=6U.V+k.1v.M.2B*2;2a.2G();1Z.5K({V:5T},8E.hb!=5T?k.1v.M.3V:1,u(){1Z.5K({Z:5f},8E.1D!=5f?k.1v.M.3V:1,u(){1Z.cA(6U);k(6U).B({Y:\'1O\',O:k.1v.M.2B+\'S\',Q:k.1v.M.2B+\'S\'}).7m(k.1v.M.3V,u(){dS=k.1a.2p(7t.K(0));if(4R){aC.B({O:k.1v.M.2B+\'S\',Q:k.1v.M.2B+\'S\',Z:5f/2-k.1v.M.2B*3+\'S\',V:5T-k.1v.M.2B*2+\'S\'}).22()}if(4Q){as.B({O:5f/2+k.1v.M.2B*2+\'S\',Q:k.1v.M.2B+\'S\',Z:5f/2-k.1v.M.2B*3+\'S\',V:5T-k.1v.M.2B*2+\'S\'}).22()}7t.B({Z:5f+\'S\',Q:-dS.hb+\'S\',3j:\'dR\'}).5K({Q:-1},k.1v.M.3V,u(){k.1v.9d=I})})})})});6U.2M=8H},ah:u(){k(\'#bi\').aB();k(\'#aq\').2G();k(\'#bh\').B(\'3j\',\'2O\');k(\'#bk\').eo(bw,0,u(){k(q).2G();if(k.3h.4I){k(\'#b6\').2G()}});k(\'#9a\').K(0).52=U;k(\'#9b\').K(0).52=U;k.1v.6r=U;k.1v.9e=I;k.1v.9d=I;E I}};k.2A={5E:[],eS:u(){q.5I();X=q.3e;id=k.1p(X,\'id\');if(k.2A.5E[id]!=U){1V.6c(k.2A.5E[id])}1z=X.L.3x+1;if(X.L.1Q.1h<1z){1z=1}1Q=k(\'1U\',X.L.5F);X.L.3x=1z;if(1Q.1P()>0){1Q.7k(X.L.3V,k.2A.8B)}},eG:u(){q.5I();X=q.3e;id=k.1p(X,\'id\');if(k.2A.5E[id]!=U){1V.6c(k.2A.5E[id])}1z=X.L.3x-1;1Q=k(\'1U\',X.L.5F);if(1z<1){1z=X.L.1Q.1h}X.L.3x=1z;if(1Q.1P()>0){1Q.7k(X.L.3V,k.2A.8B)}},2H:u(c){X=1j.cP(c);if(X.L.6w){1z=X.L.3x;7o(1z==X.L.3x){1z=1+T(14.6w()*X.L.1Q.1h)}}P{1z=X.L.3x+1;if(X.L.1Q.1h<1z){1z=1}}1Q=k(\'1U\',X.L.5F);X.L.3x=1z;if(1Q.1P()>0){1Q.7k(X.L.3V,k.2A.8B)}},go:u(o){D X;if(o&&o.1K==7n){if(o.2a){X=1j.cP(o.2a.X);6b=1V.kK.3f.7h("#");o.2a.6B=U;if(6b.1h==2){1z=T(6b[1]);22=6b[1].4v(1z,\'\');if(k.1p(X,\'id\')!=22){1z=1}}P{1z=1}}if(o.8A){o.8A.5I();X=o.8A.3e.3e;id=k.1p(X,\'id\');if(k.2A.5E[id]!=U){1V.6c(k.2A.5E[id])}6b=o.8A.3f.7h("#");1z=T(6b[1]);22=6b[1].4v(1z,\'\');if(k.1p(X,\'id\')!=22){1z=1}}if(X.L.1Q.1h<1z||1z<1){1z=1}X.L.3x=1z;5h=k.1a.2p(X);e8=k.1a.aj(X);e9=k.1a.6h(X);if(X.L.3s){X.L.3s.o.B(\'19\',\'1n\')}if(X.L.3r){X.L.3r.o.B(\'19\',\'1n\')}if(X.L.2a){y=T(e8.t)+T(e9.t);if(X.L.1T){if(X.L.1T.5z==\'Q\'){y+=X.L.1T.4q.hb}P{5h.h-=X.L.1T.4q.hb}}if(X.L.2w){if(X.L.2w&&X.L.2w.6s==\'Q\'){y+=X.L.2w.4q.hb}P{5h.h-=X.L.2w.4q.hb}}if(!X.L.cV){X.L.eg=o.2a?o.2a.V:(T(X.L.2a.B(\'V\'))||0);X.L.cV=o.2a?o.2a.Z:(T(X.L.2a.B(\'Z\'))||0)}X.L.2a.B(\'Q\',y+(5h.h-X.L.eg)/2+\'S\');X.L.2a.B(\'O\',(5h.1D-X.L.cV)/2+\'S\');X.L.2a.B(\'19\',\'2E\')}1Q=k(\'1U\',X.L.5F);if(1Q.1P()>0){1Q.7k(X.L.3V,k.2A.8B)}P{aP=k(\'a\',X.L.1T.o).K(1z-1);k(aP).2Z(X.L.1T.64);D 1U=11 aH();1U.X=k.1p(X,\'id\');1U.1z=1z-1;1U.2M=X.L.1Q[X.L.3x-1].2M;if(1U.23){1U.6B=U;k.2A.19.1F(1U)}P{1U.6B=k.2A.19}if(X.L.2w){X.L.2w.o.3w(X.L.1Q[1z-1].6v)}}}},8B:u(){X=q.3e.3e;X.L.5F.B(\'19\',\'1n\');if(X.L.1T.64){aP=k(\'a\',X.L.1T.o).4p(X.L.1T.64).K(X.L.3x-1);k(aP).2Z(X.L.1T.64)}D 1U=11 aH();1U.X=k.1p(X,\'id\');1U.1z=X.L.3x-1;1U.2M=X.L.1Q[X.L.3x-1].2M;if(1U.23){1U.6B=U;k.2A.19.1F(1U)}P{1U.6B=k.2A.19}if(X.L.2w){X.L.2w.o.3w(X.L.1Q[X.L.3x-1].6v)}},19:u(){X=1j.cP(q.X);if(X.L.3s){X.L.3s.o.B(\'19\',\'1n\')}if(X.L.3r){X.L.3r.o.B(\'19\',\'1n\')}5h=k.1a.2p(X);y=0;if(X.L.1T){if(X.L.1T.5z==\'Q\'){y+=X.L.1T.4q.hb}P{5h.h-=X.L.1T.4q.hb}}if(X.L.2w){if(X.L.2w&&X.L.2w.6s==\'Q\'){y+=X.L.2w.4q.hb}P{5h.h-=X.L.2w.4q.hb}}kD=k(\'.cz\',X);y=y+(5h.h-q.V)/2;x=(5h.1D-q.Z)/2;X.L.5F.B(\'Q\',y+\'S\').B(\'O\',x+\'S\').3w(\'<1U 2M="\'+q.2M+\'" />\');X.L.5F.7m(X.L.3V);3r=X.L.3x+1;if(3r>X.L.1Q.1h){3r=1}3s=X.L.3x-1;if(3s<1){3s=X.L.1Q.1h}X.L.3r.o.B(\'19\',\'2E\').B(\'Q\',y+\'S\').B(\'O\',x+2*q.Z/3+\'S\').B(\'Z\',q.Z/3+\'S\').B(\'V\',q.V+\'S\').1p(\'45\',X.L.1Q[3r-1].6v);X.L.3r.o.K(0).3f=\'#\'+3r+k.1p(X,\'id\');X.L.3s.o.B(\'19\',\'2E\').B(\'Q\',y+\'S\').B(\'O\',x+\'S\').B(\'Z\',q.Z/3+\'S\').B(\'V\',q.V+\'S\').1p(\'45\',X.L.1Q[3s-1].6v);X.L.3s.o.K(0).3f=\'#\'+3s+k.1p(X,\'id\')},2s:u(o){if(!o||!o.1Z||k.2A.5E[o.1Z])E;D 1Z=k(\'#\'+o.1Z);D el=1Z.K(0);if(el.18.Y!=\'1O\'&&el.18.Y!=\'2y\'){el.18.Y=\'2y\'}el.18.2Y=\'2O\';if(1Z.1P()==0)E;el.L={};el.L.1Q=o.1Q?o.1Q:[];el.L.6w=o.6w&&o.6w==1b||I;8b=el.dU(\'kA\');1Y(i=0;i<8b.1h;i++){7I=el.L.1Q.1h;el.L.1Q[7I]={2M:8b[i].2M,6v:8b[i].45||8b[i].kC||\'\'}}if(el.L.1Q.1h==0){E}el.L.4m=k.21(k.1a.2R(el),k.1a.2p(el));el.L.d5=k.1a.aj(el);el.L.cL=k.1a.6h(el);t=T(el.L.d5.t)+T(el.L.cL.t);b=T(el.L.d5.b)+T(el.L.cL.b);k(\'1U\',el).aB();el.L.3V=o.3V?o.3V:er;if(o.5z||o.88||o.64){el.L.1T={};1Z.1R(\'<26 6A="eL">\');el.L.1T.o=k(\'.eL\',el);if(o.88){el.L.1T.88=o.88;el.L.1T.o.2Z(o.88)}if(o.64){el.L.1T.64=o.64}el.L.1T.o.B(\'Y\',\'1O\').B(\'Z\',el.L.4m.w+\'S\');if(o.5z&&o.5z==\'Q\'){el.L.1T.5z=\'Q\';el.L.1T.o.B(\'Q\',t+\'S\')}P{el.L.1T.5z=\'4l\';el.L.1T.o.B(\'4l\',b+\'S\')}el.L.1T.au=o.au?o.au:\' \';1Y(D i=0;i\'+7I+\'\'+(7I!=el.L.1Q.1h?el.L.1T.au:\'\'))}k(\'a\',el.L.1T.o).1H(\'5G\',u(){k.2A.go({8A:q})});el.L.1T.4q=k.1a.2p(el.L.1T.o.K(0))}if(o.6s||o.8l){el.L.2w={};1Z.1R(\'<26 6A="eK">&7J;\');el.L.2w.o=k(\'.eK\',el);if(o.8l){el.L.2w.8l=o.8l;el.L.2w.o.2Z(o.8l)}el.L.2w.o.B(\'Y\',\'1O\').B(\'Z\',el.L.4m.w+\'S\');if(o.6s&&o.6s==\'Q\'){el.L.2w.6s=\'Q\';el.L.2w.o.B(\'Q\',(el.L.1T&&el.L.1T.5z==\'Q\'?el.L.1T.4q.hb+t:t)+\'S\')}P{el.L.2w.6s=\'4l\';el.L.2w.o.B(\'4l\',(el.L.1T&&el.L.1T.5z==\'4l\'?el.L.1T.4q.hb+b:b)+\'S\')}el.L.2w.4q=k.1a.2p(el.L.2w.o.K(0))}if(o.az){el.L.3r={az:o.az};1Z.1R(\'&7J;\');el.L.3r.o=k(\'.eR\',el);el.L.3r.o.B(\'Y\',\'1O\').B(\'19\',\'1n\').B(\'2Y\',\'2O\').B(\'4w\',\'eB\').2Z(el.L.3r.az);el.L.3r.o.1H(\'5G\',k.2A.eS)}if(o.av){el.L.3s={av:o.av};1Z.1R(\'&7J;\');el.L.3s.o=k(\'.ev\',el);el.L.3s.o.B(\'Y\',\'1O\').B(\'19\',\'1n\').B(\'2Y\',\'2O\').B(\'4w\',\'eB\').2Z(el.L.3s.av);el.L.3s.o.1H(\'5G\',k.2A.eG)}1Z.cA(\'<26 6A="cz">\');el.L.5F=k(\'.cz\',el);el.L.5F.B(\'Y\',\'1O\').B(\'Q\',\'3c\').B(\'O\',\'3c\').B(\'19\',\'1n\');if(o.2a){1Z.cA(\'<26 6A="eD" 18="19: 1n;"><1U 2M="\'+o.2a+\'" />\');el.L.2a=k(\'.eD\',el);el.L.2a.B(\'Y\',\'1O\');D 1U=11 aH();1U.X=o.1Z;1U.2M=o.2a;if(1U.23){1U.6B=U;k.2A.go({2a:1U})}P{1U.6B=u(){k.2A.go({2a:q})}}}P{k.2A.go({1Z:el})}if(o.cB){do=T(o.cB)*aF}k.2A.5E[o.1Z]=o.cB?1V.6I(\'k.2A.2H(\\\'\'+o.1Z+\'\\\')\',do):U}};k.X=k.2A.2s;k.8e={cN:u(e){3O=e.7F||e.7A||-1;if(3O==9){if(1V.2l){1V.2l.cj=1b;1V.2l.ci=I}P{e.al();e.am()}if(q.aI){1j.6G.du().31="\\t";q.dv=u(){q.6D();q.dv=U}}P if(q.aU){28=q.5B;2X=q.dq;q.2v=q.2v.iL(0,28)+"\\t"+q.2v.hm(2X);q.aU(28+1,28+1);q.6D()}E I}},58:u(){E q.1B(u(){if(q.7D&&q.7D==1b){k(q).3p(\'7E\',k.8e.cN);q.7D=I}})},2s:u(){E q.1B(u(){if(q.4S==\'cQ\'&&(!q.7D||q.7D==I)){k(q).1H(\'7E\',k.8e.cN);q.7D=1b}})}};k.fn.21({hS:k.8e.2s,hP:k.8e.58});',62,1292,'||||||||||||||||||||jQuery||||||this||||function||||||dragCfg|css|elm|var|return|dragged|easing|speed|false|callback|get|ss|options|iAuto|left|else|top|iResize|px|parseInt|null|height|oldStyle|slideshow|position|width||new|iDrag||Math||||style|display|iUtil|true|helper|subject|case|autoCFG|resizeOptions|length|dropCfg|document|iEL|carouselCfg|duration|none|interfaceFX|attr|sizes|break|pointer|iSort|type|ImageBox|queue|iDrop|iAutoscroller|slide|resizeElement|each|oC|wb|newSizes|apply|fisheyeCfg|bind|delta|opacity|constructor|custom|pos|axis|absolute|size|images|append|items|slideslinks|img|window|firstNum|255|for|container||extend|show|complete|cont||div||start|elsToScroll|loader|100|oR||body|oldP|selectedItem|typeof|elem|accordionCfg|props|event|parseFloat|newPosition|containment|getSize|field|ny|build|iTooltip|selectHelper|value|slideCaption|nx|relative|tp|islideshow|border|Function|step|block|itemWidth|hide|timer|nr|limit|fractions|dequeue|src|right|hidden|direction|PI|getPosition|cursorAt|onChange|onShow|scrollTop|result|end|overflow|addClass|parentData|text|||||||||scr|className|0px|iSlider|parentNode|href|scrollLeft|browser|onHide|visibility|item|pre|switch|selectdrug|wrapper|unbind|newCoords|nextslide|prevslide|createElement|values|max|html|currentslide|handle|onSlide|margins|zIndex|wrs|min|iframe|indexOf|valueToAdd|mousemove|pageSize|zones|multipleSeparator|iExpander|curCSS|canvas|pressedKey|accept|resizeDirection|abs|onStop|diff|handlers|fadeDuration|highlighted|dhs|toggle|dragElem||times||test|getPointer|title|distance||so|vp|horizontally|offsetWidth|startLeft|out|transferEl|startTop|subjectValue|lastSuggestion|vertically|ghosting|DropOutDirectiont|bottom|oP|iteration|lastValue|removeClass|dimm|slideCfg|ifxFirstDisplay|currentPointer|clear|replace|fontSize|onDrag|down|percent|onStart|nWidth|color|ratio|elToScroll|fieldData|rel|context|msie|documentElement|params|to|shs|dragHandle|fxCheckTag|els|nextImage|prevImage|tagName|tooltipCFG|up|helperclass|endLeft|paddingLeft|currentStyle|borderTopWidth||halign|onclick|delay|nodeEl||chunks|endTop|destroy|dragmoveBy|borderLeftWidth|mousedown|nHeight|from|dhe|containerW|string|slidePos|si|collected|marginLeft|overzone|marginBottom|getAttribute|marginTop|marginRight|toAdd|zonex|clonedEl|empty|newStyles|cos|hight|toWrite|zoney|linksPosition|OpenClose|selectionStart|clientScroll|cnt|slideshows|holder|click|restoreStyle|blur|onDragModifier|animate|elS|paddingBottom|toDrag|sw|close|post|animationHandler|styles|containerH|prop|sortCfg|BlindDirection|nmp|pow|toLowerCase||mouseup|oldVisibility|offsetHeight|activeLinkClass|old|paddingTop|grid|point|filter|onSelect|url|clearInterval|fxh|currentPanel|elementData|borderBottomWidth|getBorder|cur|paddingRight|borderRightWidth|puff|snapDistance|tolerance|revert|hpc|maxWidth|currentRel|captionPosition|Expander|orig|caption|random|3000|iFisheye|Scale|class|onload|wr|focus|restore|128|selection|parseColor|setInterval|current|selRange|captionText|itemHeight|outerContainer|newDimensions|totalImages|getHeight|reflections|keyup|sliders|imageEl|getWidth|getScroll|margin|Draggable|onHighlight|selectClass|getTime|Date|oldStyleAttr|onClick||scrollIntoView|firstChild||data|ActiveXObject|Array|focused|accordionPos|open|backgroundColor|zoneh|split|oD|zonew|fadeOut|user|fadeIn|Object|while|minLeft|nw|startDrag|minTop|captionEl|newTop|newLeft|frameClass|increment|F0|0x|keyCode|139|toInteger|hasTabsEnabled|keydown|charCode|cssRules|rule|indic|nbsp|rgb|np|oldDisplay|opera|radiusY|positionItems|onOut|proximity|efx|onHover|hash|changed|init|sc|inFrontOf|selectKeyHelper||selectCurrent|getSizeLite|1px|contBorders||ts|parentEl|linksClass|parentBorders|yproc|imgs|nRx|fnc|iTTabs|panels|insideParent|fontWeight|object|nRy|clientWidth|captionClass|namedColors|offsetLeft|serialize|cssSides|mouseout|activeClass|targetEl|offsetTop|expand|stop|400|pr|directionIncrement|clientHeight|link|showImage|move|sx|containerSize|createTextNode|jEl|imageSrc|ser|newPos|selectedone|minHeight|maxHeight|gallery|dir|applyOn|overlay|sh|content|maxRight|maxBottom|tooltipHelper|count|onselectstop|onselect|select|li|reflectionSize|padding|selectBorders|cursor|png|parent|finishedPre|sin|xproc|ImageBoxPrevImage|ImageBoxNextImage|bounceout|animationInProgress|opened|sy|destroyWrapper|buildWrapper|diffWidth|diffHeight|iIndex|diffX|diffY|prot|hidehelper|dEs|isDraggable|onDrop|minWidth|side|isDroppable|onActivate|dragstop|startTime|211|192|nodeName|self|oldPosition|exec|opt|getValues|styleSheets|sideEnd|borderColor|ne|handleEl|unit|DoFold|5625|oldTitle|SliderContainer|unfold|9999|ScrollTo|cssText|oldColor|alpha|2000|prev|selectKeyUp|os|selectKeyDown|selectcheck|dragEl|checkhover|DraggableDestroy|next|key|hoverclass|activeclass|sl|st|image||panelSelector|headerSelector|hoverClass|panelHeight|hideImage|headers|getPadding|iCarousel|preventDefault|stopPropagation|itemMinWidth|auto|getFieldValues|ImageBoxOuterContainer|prevEl|nextImageEl|linkRel|linksSeparator|prevslideClass|progid|DXImageTransform|Microsoft|nextslideClass|valToAdd|remove|prevImageEl|nextEl|childs|1000|default|Image|createTextRange|positionContainer|helperClass|minchars|source|itemsText|multiple|lnk|posx|autofill|reflexions|blind|setSelectionRange|mouseover|inCache|ul|protectRotation|maxRotation|gradient|setTimeout|index|elPosition|writeItems|String|ImageBoxIframe|transparent|center|textAlign|paddingRightSize|paddingTopSize|bounce|loadImage|borderLeftSize|borderBottomSize|borderRightSize|ImageBoxCaption|ImageBoxCurrentImage|moveDrag|ImageBoxOverlay|paddings|borders|idsa|firstStep|currentValue|getClient||stopDrag|borderTopSize|autocomplete|zoom|300|hidefocused|intersect|INPUT|inputWidth|fade|extraWidth|sortable|restricted|isSlider|tabindex|fitToContainer|snapToGrid|slider|prevTop|prevLeft|floats|getPositionLite|modifyContainer|getContainment|lastSi|SliderIteration|sliderEl|selectstop|match|linear|character|no|bouncein|captionImages|asin|Alpha|Selectserialize|mouse|initialPosition|measure|clearTimeout|helperSize|getMargins|tooltipURL|keyPressed|applyOnHover|closeEl|10000|parentPos|sliderSize|sliderPos|angle|returnValue|cancelBubble|spacer|oldBorder|pulse|169|entities|RegExp|Color|Pulsate||rotationSpeed|parseStyle|stopAnim|cssSidesEnd|shake|Shake|slideshowHolder|prepend|autoplay|floatVal|borderWidth|scroll|paddingY|pValue|letterSpacing|paddingX|paddingBottomSize|pause|oBor|clnt|doTab|autoSize|getElementById|TEXTAREA|Number|traverseDOM|func|draginit|loaderWidth|scrollHeight|paddingLeftSize|scrollWidth|oneIsSortable|innerWidth|innerHeight|shrink|windowSize|unselectable|oPad|dragmove|oldFloat|cssProps|colorCssProps|107|doScroll|addItem|SortableAddItem||DroppableDestroy|fxe||interval|after|insertBefore||sqrt|cloneNode|time|check|selectionEnd|offsetParent|Width|sortHelper|createRange|onblur|valign|||onout|224|posy|wid|isSortable|165|zindex|245|notColor|140|240|230|144|styleFloat|onhover|Droppable|emptyGIF|relAttr|visible|captionSize|dragstart|getElementsByTagName|listStyle|dragHelper|getHeightMinMax|onResize|ImageBoxCaptionText|ImageBoxContainer|ImageBoxCaptionImages||textImage|clientSize|textImageFrom|userSelect|onDragStop|slidePad|slideBor|highlight|shc|hlt|checkdrop|fit||loaderHeight||onDragStart|KhtmlUserSelect|remeasure|||on|fadeTo|ImageBoxLoader||500|||imageTypes|slideshowPrevslide|javascript|selectstopApply|scrolling|frameborder|hrefAttr|30px|jpg|slideshowLoader|selectedclass|gif|goprev|oldOverflow|isFunction|imagebox|slideshowCaption|slideshowLinks|directions|overlayOpacity|se|trim|textDecoration|slideshowNextSlide|gonext|closeHTML|selectcheckApply|loaderSRC|selectstart|isSelectable|360|radiusX|set|grow|hoverItem|SlideOutUp|leftUnit|boxModel|interfaceColorFX|fakeAccordionClass|togglever|elType|iBounce|paddingBottomUnit|wordSpacing|150|mousex|iAccordion|fontFamily|togglehor|fontUnit|filteredPosition|paddingTopUnit|parte|itemZIndex||selRange2|finish|paddingLeftUnit|moveStart|paddingRightUnit|xml|itransferTo|borderLeftUnit|borderTopUnit||update|BlindUp||borderRightUnit|checkCache|getSelectionStart|borderBottomUnit|tooltipTitle|easeout|expanderHelper|fontStyle|fontStretch|containerMaxx|yfrac|topUnit|containerMaxy|clickItem|BlindDown|off|inputValue|fracH|fontVariant|rgba|maxy|maxx|keypress|fracW|xfrac|horizontal|addColorStop|htmlEntities|vertical|dragmoveByKey|autocompleteIframe|onslide|fold|parts|SlideInUp|getContext|protect|autocompleteHelper|olive|orange|pink|white|maroon|navy|magenta|203|193|rotationTimer|lightpink||red|lightyellow|182|lime||purple|silver|Top|||inset|outset|SlideOutRight|SlideInRight|ridge|groove|dashed|solid|double|SlideToggleLeft|SlideOutLeft|SlideOutDown|SlideInDown|SlideToggleUp|scrollTo|selectorText|rules|borderStyle|SlideInLeft|SlideToggleDown|dotted|SlideToggleRight|textIndent|borderBottomColor|borderLeftColor|borderRightColor|outlineWidth|outlineOffset|TransferTo|transferHelper|lineHeight|borderTopColor|outlineColor|hover|Accordion|isNaN|Carousel|stopAll|||Right|Bottom|Left|yellow|215|option|frameset|optgroup|meta|substr|frame|script|col|colgroup||th|header|removeChild|float|ol|finishx|fxWrapper|starty|table|form|w_|input|textarea|button|tfoot|thead|pageX|drawImage|clientX|pageY|clientY|globalCompositeOperation|destination|DisableTabs|createLinearGradient|fillStyle|EnableTabs|scale|nextSibling|prototype|tr|td|tbody|AlphaImageLoader|fixPNG|purgeEvents|translate|centerEl|save|cssFloat|startx|fuchsia|148|gold|green|indigo|darkviolet||122||204||darkred|darksalmon|233|130|khaki||lightcyan|lightgreen|238|fillRect||fill|216|appVersion||WebKit|lightblue|173|153|darkorchid|black|220|blue|brown|cyan|beige|azure|finishOpacity|appendChild|substring|aqua|darkblue|darkcyan|darkmagenta|darkolivegreen|navigator|darkorange|183|189|darkgrey|flipv|darkgreen|darkkhaki|lightgrey|amp|BlindToggleHorizontally|BlindRight|BlindLeft|ResizableDestroy|Resizable|120|lineHeigt|collapse|BlindToggleVertically|moveEnd|elasticin|bounceboth|984375|elasticout|elasticboth|duplicate|ImageBoxClose|DropOutDown|DropInDown|load|DropToggleRight|DropInRight|Fold|UnFold|Shrink|Grow|FoldToggle|DropOutRight|DropToggleLeft|DropInUp|DropOutUp|DropToggleDown|DropToggleUp|DropOutLeft|DropInLeft|captiontext|625|9375|Fisheye|30001|list|loading|fix|imageLoaded|childNodes|Showing|onchange|30002|SortSerialize|Autocomplete|200|SortableDestroy|Sortable|resize|wh|firstResize|Slider|bmp|100000|jpeg|Selectable|ToolTip|easeboth|easein|nodeValue|http|first|before|last|112|SliderSetValues|110|SliderGetValues|array|Bounce|Autoexpand|onselectstart|CloseVertically|mozUserSelect|fromHandler|ondragstart|MozUserSelect|number|pW|toUpperCase|khtml|find|CloseHorizontally|SwitchHorizontally|ScrollToAnchors|Puff|slideshowLink|password|quot|OpenHorizontally|OpenVertically|SwitchVertically|IMG|lt|alt|par|moz|success|POST|recallDroppables|param|pt|location|Highlight|100000000|ajax|ondrop|name'.split('|'),0,{})) diff --git a/sphinx/style/jquery.js b/sphinx/style/jquery.js new file mode 100644 index 000000000..428d42da7 --- /dev/null +++ b/sphinx/style/jquery.js @@ -0,0 +1,2344 @@ +// prevent execution of jQuery if included more than once +if(typeof window.jQuery == "undefined") { +/* + * jQuery 1.1.3.1 - New Wave Javascript + * + * Copyright (c) 2007 John Resig (jquery.com) + * Dual licensed under the MIT (MIT-LICENSE.txt) + * and GPL (GPL-LICENSE.txt) licenses. + * + * $Date: 2007-07-05 00:43:24 -0400 (Thu, 05 Jul 2007) $ + * $Rev: 2243 $ + */ + +// Global undefined variable +window.undefined = window.undefined; +var jQuery = function(a,c) { + // If the context is global, return a new object + if ( window == this || !this.init ) + return new jQuery(a,c); + + return this.init(a,c); +}; + +// Map over the $ in case of overwrite +if ( typeof $ != "undefined" ) + jQuery._$ = $; + +// Map the jQuery namespace to the '$' one +var $ = jQuery; + +jQuery.fn = jQuery.prototype = { + init: function(a,c) { + // Make sure that a selection was provided + a = a || document; + + // HANDLE: $(function) + // Shortcut for document ready + if ( jQuery.isFunction(a) ) + return new jQuery(document)[ jQuery.fn.ready ? "ready" : "load" ]( a ); + + // Handle HTML strings + if ( typeof a == "string" ) { + // HANDLE: $(html) -> $(array) + var m = /^[^<]*(<(.|\s)+>)[^>]*$/.exec(a); + if ( m ) + a = jQuery.clean( [ m[1] ] ); + + // HANDLE: $(expr) + else + return new jQuery( c ).find( a ); + } + + return this.setArray( + // HANDLE: $(array) + a.constructor == Array && a || + + // HANDLE: $(arraylike) + // Watch for when an array-like object is passed as the selector + (a.jquery || a.length && a != window && !a.nodeType && a[0] != undefined && a[0].nodeType) && jQuery.makeArray( a ) || + + // HANDLE: $(*) + [ a ] ); + }, + jquery: "1.1.3.1", + + size: function() { + return this.length; + }, + + length: 0, + + get: function( num ) { + return num == undefined ? + + // Return a 'clean' array + jQuery.makeArray( this ) : + + // Return just the object + this[num]; + }, + pushStack: function( a ) { + var ret = jQuery(a); + ret.prevObject = this; + return ret; + }, + setArray: function( a ) { + this.length = 0; + [].push.apply( this, a ); + return this; + }, + each: function( fn, args ) { + return jQuery.each( this, fn, args ); + }, + index: function( obj ) { + var pos = -1; + this.each(function(i){ + if ( this == obj ) pos = i; + }); + return pos; + }, + + attr: function( key, value, type ) { + var obj = key; + + // Look for the case where we're accessing a style value + if ( key.constructor == String ) + if ( value == undefined ) + return this.length && jQuery[ type || "attr" ]( this[0], key ) || undefined; + else { + obj = {}; + obj[ key ] = value; + } + + // Check to see if we're setting style values + return this.each(function(index){ + // Set all the styles + for ( var prop in obj ) + jQuery.attr( + type ? this.style : this, + prop, jQuery.prop(this, obj[prop], type, index, prop) + ); + }); + }, + + css: function( key, value ) { + return this.attr( key, value, "curCSS" ); + }, + + text: function(e) { + if ( typeof e == "string" ) + return this.empty().append( document.createTextNode( e ) ); + + var t = ""; + jQuery.each( e || this, function(){ + jQuery.each( this.childNodes, function(){ + if ( this.nodeType != 8 ) + t += this.nodeType != 1 ? + this.nodeValue : jQuery.fn.text([ this ]); + }); + }); + return t; + }, + + wrap: function() { + // The elements to wrap the target around + var a, args = arguments; + + // Wrap each of the matched elements individually + return this.each(function(){ + if ( !a ) + a = jQuery.clean(args, this.ownerDocument); + + // Clone the structure that we're using to wrap + var b = a[0].cloneNode(true); + + // Insert it before the element to be wrapped + this.parentNode.insertBefore( b, this ); + + // Find the deepest point in the wrap structure + while ( b.firstChild ) + b = b.firstChild; + + // Move the matched element to within the wrap structure + b.appendChild( this ); + }); + }, + append: function() { + return this.domManip(arguments, true, 1, function(a){ + this.appendChild( a ); + }); + }, + prepend: function() { + return this.domManip(arguments, true, -1, function(a){ + this.insertBefore( a, this.firstChild ); + }); + }, + before: function() { + return this.domManip(arguments, false, 1, function(a){ + this.parentNode.insertBefore( a, this ); + }); + }, + after: function() { + return this.domManip(arguments, false, -1, function(a){ + this.parentNode.insertBefore( a, this.nextSibling ); + }); + }, + end: function() { + return this.prevObject || jQuery([]); + }, + find: function(t) { + var data = jQuery.map(this, function(a){ return jQuery.find(t,a); }); + return this.pushStack( /[^+>] [^+>]/.test( t ) || t.indexOf("..") > -1 ? + jQuery.unique( data ) : data ); + }, + clone: function(deep) { + // Need to remove events on the element and its descendants + var $this = this.add(this.find("*")); + $this.each(function() { + this._$events = {}; + for (var type in this.$events) + this._$events[type] = jQuery.extend({},this.$events[type]); + }).unbind(); + + // Do the clone + var r = this.pushStack( jQuery.map( this, function(a){ + return a.cloneNode( deep != undefined ? deep : true ); + }) ); + + // Add the events back to the original and its descendants + $this.each(function() { + var events = this._$events; + for (var type in events) + for (var handler in events[type]) + jQuery.event.add(this, type, events[type][handler], events[type][handler].data); + this._$events = null; + }); + + // Return the cloned set + return r; + }, + + filter: function(t) { + return this.pushStack( + jQuery.isFunction( t ) && + jQuery.grep(this, function(el, index){ + return t.apply(el, [index]) + }) || + + jQuery.multiFilter(t,this) ); + }, + + not: function(t) { + return this.pushStack( + t.constructor == String && + jQuery.multiFilter(t, this, true) || + + jQuery.grep(this, function(a) { + return ( t.constructor == Array || t.jquery ) + ? jQuery.inArray( a, t ) < 0 + : a != t; + }) + ); + }, + + add: function(t) { + return this.pushStack( jQuery.merge( + this.get(), + t.constructor == String ? + jQuery(t).get() : + t.length != undefined && (!t.nodeName || t.nodeName == "FORM") ? + t : [t] ) + ); + }, + is: function(expr) { + return expr ? jQuery.multiFilter(expr,this).length > 0 : false; + }, + + val: function( val ) { + return val == undefined ? + ( this.length ? this[0].value : null ) : + this.attr( "value", val ); + }, + + html: function( val ) { + return val == undefined ? + ( this.length ? this[0].innerHTML : null ) : + this.empty().append( val ); + }, + domManip: function(args, table, dir, fn){ + var clone = this.length > 1, a; + + return this.each(function(){ + if ( !a ) { + a = jQuery.clean(args, this.ownerDocument); + if ( dir < 0 ) + a.reverse(); + } + + var obj = this; + + if ( table && jQuery.nodeName(this, "table") && jQuery.nodeName(a[0], "tr") ) + obj = this.getElementsByTagName("tbody")[0] || this.appendChild(document.createElement("tbody")); + + jQuery.each( a, function(){ + fn.apply( obj, [ clone ? this.cloneNode(true) : this ] ); + }); + + }); + } +}; + +jQuery.extend = jQuery.fn.extend = function() { + // copy reference to target object + var target = arguments[0], a = 1; + + // extend jQuery itself if only one argument is passed + if ( arguments.length == 1 ) { + target = this; + a = 0; + } + var prop; + while ( (prop = arguments[a++]) != null ) + // Extend the base object + for ( var i in prop ) target[i] = prop[i]; + + // Return the modified object + return target; +}; + +jQuery.extend({ + noConflict: function() { + if ( jQuery._$ ) + $ = jQuery._$; + return jQuery; + }, + + // This may seem like some crazy code, but trust me when I say that this + // is the only cross-browser way to do this. --John + isFunction: function( fn ) { + return !!fn && typeof fn != "string" && !fn.nodeName && + fn.constructor != Array && /function/i.test( fn + "" ); + }, + + // check if an element is in a XML document + isXMLDoc: function(elem) { + return elem.tagName && elem.ownerDocument && !elem.ownerDocument.body; + }, + + nodeName: function( elem, name ) { + return elem.nodeName && elem.nodeName.toUpperCase() == name.toUpperCase(); + }, + // args is for internal usage only + each: function( obj, fn, args ) { + if ( obj.length == undefined ) + for ( var i in obj ) + fn.apply( obj[i], args || [i, obj[i]] ); + else + for ( var i = 0, ol = obj.length; i < ol; i++ ) + if ( fn.apply( obj[i], args || [i, obj[i]] ) === false ) break; + return obj; + }, + + prop: function(elem, value, type, index, prop){ + // Handle executable functions + if ( jQuery.isFunction( value ) ) + value = value.call( elem, [index] ); + + // exclude the following css properties to add px + var exclude = /z-?index|font-?weight|opacity|zoom|line-?height/i; + + // Handle passing in a number to a CSS property + return value && value.constructor == Number && type == "curCSS" && !exclude.test(prop) ? + value + "px" : + value; + }, + + className: { + // internal only, use addClass("class") + add: function( elem, c ){ + jQuery.each( c.split(/\s+/), function(i, cur){ + if ( !jQuery.className.has( elem.className, cur ) ) + elem.className += ( elem.className ? " " : "" ) + cur; + }); + }, + + // internal only, use removeClass("class") + remove: function( elem, c ){ + elem.className = c != undefined ? + jQuery.grep( elem.className.split(/\s+/), function(cur){ + return !jQuery.className.has( c, cur ); + }).join(" ") : ""; + }, + + // internal only, use is(".class") + has: function( t, c ) { + return jQuery.inArray( c, (t.className || t).toString().split(/\s+/) ) > -1; + } + }, + swap: function(e,o,f) { + for ( var i in o ) { + e.style["old"+i] = e.style[i]; + e.style[i] = o[i]; + } + f.apply( e, [] ); + for ( var i in o ) + e.style[i] = e.style["old"+i]; + }, + + css: function(e,p) { + if ( p == "height" || p == "width" ) { + var old = {}, oHeight, oWidth, d = ["Top","Bottom","Right","Left"]; + + jQuery.each( d, function(){ + old["padding" + this] = 0; + old["border" + this + "Width"] = 0; + }); + + jQuery.swap( e, old, function() { + if ( jQuery(e).is(':visible') ) { + oHeight = e.offsetHeight; + oWidth = e.offsetWidth; + } else { + e = jQuery(e.cloneNode(true)) + .find(":radio").removeAttr("checked").end() + .css({ + visibility: "hidden", position: "absolute", display: "block", right: "0", left: "0" + }).appendTo(e.parentNode)[0]; + + var parPos = jQuery.css(e.parentNode,"position") || "static"; + if ( parPos == "static" ) + e.parentNode.style.position = "relative"; + + oHeight = e.clientHeight; + oWidth = e.clientWidth; + + if ( parPos == "static" ) + e.parentNode.style.position = "static"; + + e.parentNode.removeChild(e); + } + }); + + return p == "height" ? oHeight : oWidth; + } + + return jQuery.curCSS( e, p ); + }, + + curCSS: function(elem, prop, force) { + var ret; + + if (prop == "opacity" && jQuery.browser.msie) { + ret = jQuery.attr(elem.style, "opacity"); + return ret == "" ? "1" : ret; + } + + if (prop.match(/float/i)) + prop = jQuery.styleFloat; + + if (!force && elem.style[prop]) + ret = elem.style[prop]; + + else if (document.defaultView && document.defaultView.getComputedStyle) { + + if (prop.match(/float/i)) + prop = "float"; + + prop = prop.replace(/([A-Z])/g,"-$1").toLowerCase(); + var cur = document.defaultView.getComputedStyle(elem, null); + + if ( cur ) + ret = cur.getPropertyValue(prop); + else if ( prop == "display" ) + ret = "none"; + else + jQuery.swap(elem, { display: "block" }, function() { + var c = document.defaultView.getComputedStyle(this, ""); + ret = c && c.getPropertyValue(prop) || ""; + }); + + } else if (elem.currentStyle) { + var newProp = prop.replace(/\-(\w)/g,function(m,c){return c.toUpperCase();}); + ret = elem.currentStyle[prop] || elem.currentStyle[newProp]; + } + + return ret; + }, + + clean: function(a, doc) { + var r = []; + doc = doc || document; + + jQuery.each( a, function(i,arg){ + if ( !arg ) return; + + if ( arg.constructor == Number ) + arg = arg.toString(); + + // Convert html string into DOM nodes + if ( typeof arg == "string" ) { + // Trim whitespace, otherwise indexOf won't work as expected + var s = jQuery.trim(arg).toLowerCase(), div = doc.createElement("div"), tb = []; + + var wrap = + // option or optgroup + !s.indexOf("", ""] || + + !s.indexOf("", ""] || + + (!s.indexOf("", ""] || + + !s.indexOf("", ""] || + + // matched above + (!s.indexOf("", ""] || + + !s.indexOf("", ""] || + + [0,"",""]; + + // Go to html and back, then peel off extra wrappers + div.innerHTML = wrap[1] + arg + wrap[2]; + + // Move to the right depth + while ( wrap[0]-- ) + div = div.firstChild; + + // Remove IE's autoinserted from table fragments + if ( jQuery.browser.msie ) { + + // String was a , *may* have spurious + if ( !s.indexOf(" or + else if ( wrap[1] == "
    " && s.indexOf("= 0 ; --n ) + if ( jQuery.nodeName(tb[n], "tbody") && !tb[n].childNodes.length ) + tb[n].parentNode.removeChild(tb[n]); + + } + + arg = jQuery.makeArray( div.childNodes ); + } + + if ( 0 === arg.length && (!jQuery.nodeName(arg, "form") && !jQuery.nodeName(arg, "select")) ) + return; + + if ( arg[0] == undefined || jQuery.nodeName(arg, "form") || arg.options ) + r.push( arg ); + else + r = jQuery.merge( r, arg ); + + }); + + return r; + }, + + attr: function(elem, name, value){ + var fix = jQuery.isXMLDoc(elem) ? {} : jQuery.props; + + // Certain attributes only work when accessed via the old DOM 0 way + if ( fix[name] ) { + if ( value != undefined ) elem[fix[name]] = value; + return elem[fix[name]]; + + } else if ( value == undefined && jQuery.browser.msie && jQuery.nodeName(elem, "form") && (name == "action" || name == "method") ) + return elem.getAttributeNode(name).nodeValue; + + // IE elem.getAttribute passes even for style + else if ( elem.tagName ) { + + + if ( value != undefined ) elem.setAttribute( name, value ); + if ( jQuery.browser.msie && /href|src/.test(name) && !jQuery.isXMLDoc(elem) ) + return elem.getAttribute( name, 2 ); + return elem.getAttribute( name ); + + // elem is actually elem.style ... set the style + } else { + // IE actually uses filters for opacity + if ( name == "opacity" && jQuery.browser.msie ) { + if ( value != undefined ) { + // IE has trouble with opacity if it does not have layout + // Force it by setting the zoom level + elem.zoom = 1; + + // Set the alpha filter to set the opacity + elem.filter = (elem.filter || "").replace(/alpha\([^)]*\)/,"") + + (parseFloat(value).toString() == "NaN" ? "" : "alpha(opacity=" + value * 100 + ")"); + } + + return elem.filter ? + (parseFloat( elem.filter.match(/opacity=([^)]*)/)[1] ) / 100).toString() : ""; + } + name = name.replace(/-([a-z])/ig,function(z,b){return b.toUpperCase();}); + if ( value != undefined ) elem[name] = value; + return elem[name]; + } + }, + trim: function(t){ + return t.replace(/^\s+|\s+$/g, ""); + }, + + makeArray: function( a ) { + var r = []; + + // Need to use typeof to fight Safari childNodes crashes + if ( typeof a != "array" ) + for ( var i = 0, al = a.length; i < al; i++ ) + r.push( a[i] ); + else + r = a.slice( 0 ); + + return r; + }, + + inArray: function( b, a ) { + for ( var i = 0, al = a.length; i < al; i++ ) + if ( a[i] == b ) + return i; + return -1; + }, + merge: function(first, second) { + // We have to loop this way because IE & Opera overwrite the length + // expando of getElementsByTagName + for ( var i = 0; second[i]; i++ ) + first.push(second[i]); + return first; + }, + unique: function(first) { + var r = [], num = jQuery.mergeNum++; + + for ( var i = 0, fl = first.length; i < fl; i++ ) + if ( num != first[i].mergeNum ) { + first[i].mergeNum = num; + r.push(first[i]); + } + + return r; + }, + + mergeNum: 0, + grep: function(elems, fn, inv) { + // If a string is passed in for the function, make a function + // for it (a handy shortcut) + if ( typeof fn == "string" ) + fn = new Function("a","i","return " + fn); + + var result = []; + + // Go through the array, only saving the items + // that pass the validator function + for ( var i = 0, el = elems.length; i < el; i++ ) + if ( !inv && fn(elems[i],i) || inv && !fn(elems[i],i) ) + result.push( elems[i] ); + + return result; + }, + map: function(elems, fn) { + // If a string is passed in for the function, make a function + // for it (a handy shortcut) + if ( typeof fn == "string" ) + fn = new Function("a","return " + fn); + + var result = []; + + // Go through the array, translating each of the items to their + // new value (or values). + for ( var i = 0, el = elems.length; i < el; i++ ) { + var val = fn(elems[i],i); + + if ( val !== null && val != undefined ) { + if ( val.constructor != Array ) val = [val]; + result = result.concat( val ); + } + } + + return result; + } +}); + +/* + * Whether the W3C compliant box model is being used. + * + * @property + * @name $.boxModel + * @type Boolean + * @cat JavaScript + */ +new function() { + var b = navigator.userAgent.toLowerCase(); + + // Figure out what browser is being used + jQuery.browser = { + version: (b.match(/.+(?:rv|it|ra|ie)[\/: ]([\d.]+)/) || [])[1], + safari: /webkit/.test(b), + opera: /opera/.test(b), + msie: /msie/.test(b) && !/opera/.test(b), + mozilla: /mozilla/.test(b) && !/(compatible|webkit)/.test(b) + }; + + // Check to see if the W3C box model is being used + jQuery.boxModel = !jQuery.browser.msie || document.compatMode == "CSS1Compat"; + + jQuery.styleFloat = jQuery.browser.msie ? "styleFloat" : "cssFloat", + + jQuery.props = { + "for": "htmlFor", + "class": "className", + "float": jQuery.styleFloat, + cssFloat: jQuery.styleFloat, + styleFloat: jQuery.styleFloat, + innerHTML: "innerHTML", + className: "className", + value: "value", + disabled: "disabled", + checked: "checked", + readonly: "readOnly", + selected: "selected", + maxlength: "maxLength" + }; +}; + +jQuery.each({ + parent: "a.parentNode", + parents: "jQuery.parents(a)", + next: "jQuery.nth(a,2,'nextSibling')", + prev: "jQuery.nth(a,2,'previousSibling')", + siblings: "jQuery.sibling(a.parentNode.firstChild,a)", + children: "jQuery.sibling(a.firstChild)" +}, function(i,n){ + jQuery.fn[ i ] = function(a) { + var ret = jQuery.map(this,n); + if ( a && typeof a == "string" ) + ret = jQuery.multiFilter(a,ret); + return this.pushStack( ret ); + }; +}); + +jQuery.each({ + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after" +}, function(i,n){ + jQuery.fn[ i ] = function(){ + var a = arguments; + return this.each(function(){ + for ( var j = 0, al = a.length; j < al; j++ ) + jQuery(a[j])[n]( this ); + }); + }; +}); + +jQuery.each( { + removeAttr: function( key ) { + jQuery.attr( this, key, "" ); + this.removeAttribute( key ); + }, + addClass: function(c){ + jQuery.className.add(this,c); + }, + removeClass: function(c){ + jQuery.className.remove(this,c); + }, + toggleClass: function( c ){ + jQuery.className[ jQuery.className.has(this,c) ? "remove" : "add" ](this, c); + }, + remove: function(a){ + if ( !a || jQuery.filter( a, [this] ).r.length ) + this.parentNode.removeChild( this ); + }, + empty: function() { + while ( this.firstChild ) + this.removeChild( this.firstChild ); + } +}, function(i,n){ + jQuery.fn[ i ] = function() { + return this.each( n, arguments ); + }; +}); + +jQuery.each( [ "eq", "lt", "gt", "contains" ], function(i,n){ + jQuery.fn[ n ] = function(num,fn) { + return this.filter( ":" + n + "(" + num + ")", fn ); + }; +}); + +jQuery.each( [ "height", "width" ], function(i,n){ + jQuery.fn[ n ] = function(h) { + return h == undefined ? + ( this.length ? jQuery.css( this[0], n ) : null ) : + this.css( n, h.constructor == String ? h : h + "px" ); + }; +}); +jQuery.extend({ + expr: { + "": "m[2]=='*'||jQuery.nodeName(a,m[2])", + "#": "a.getAttribute('id')==m[2]", + ":": { + // Position Checks + lt: "im[3]-0", + nth: "m[3]-0==i", + eq: "m[3]-0==i", + first: "i==0", + last: "i==r.length-1", + even: "i%2==0", + odd: "i%2", + + // Child Checks + "first-child": "a.parentNode.getElementsByTagName('*')[0]==a", + "last-child": "jQuery.nth(a.parentNode.lastChild,1,'previousSibling')==a", + "only-child": "!jQuery.nth(a.parentNode.lastChild,2,'previousSibling')", + + // Parent Checks + parent: "a.firstChild", + empty: "!a.firstChild", + + // Text Check + contains: "(a.textContent||a.innerText||'').indexOf(m[3])>=0", + + // Visibility + visible: '"hidden"!=a.type&&jQuery.css(a,"display")!="none"&&jQuery.css(a,"visibility")!="hidden"', + hidden: '"hidden"==a.type||jQuery.css(a,"display")=="none"||jQuery.css(a,"visibility")=="hidden"', + + // Form attributes + enabled: "!a.disabled", + disabled: "a.disabled", + checked: "a.checked", + selected: "a.selected||jQuery.attr(a,'selected')", + + // Form elements + text: "'text'==a.type", + radio: "'radio'==a.type", + checkbox: "'checkbox'==a.type", + file: "'file'==a.type", + password: "'password'==a.type", + submit: "'submit'==a.type", + image: "'image'==a.type", + reset: "'reset'==a.type", + button: '"button"==a.type||jQuery.nodeName(a,"button")', + input: "/input|select|textarea|button/i.test(a.nodeName)" + }, + "[": "jQuery.find(m[2],a).length" + }, + + // The regular expressions that power the parsing engine + parse: [ + // Match: [@value='test'], [@foo] + /^\[ *(@)([\w-]+) *([!*$^~=]*) *('?"?)(.*?)\4 *\]/, + + // Match: [div], [div p] + /^(\[)\s*(.*?(\[.*?\])?[^[]*?)\s*\]/, + + // Match: :contains('foo') + /^(:)([\w-]+)\("?'?(.*?(\(.*?\))?[^(]*?)"?'?\)/, + + // Match: :even, :last-chlid, #id, .class + new RegExp("^([:.#]*)(" + + ( jQuery.chars = jQuery.browser.safari && jQuery.browser.version < "3.0.0" ? "\\w" : "(?:[\\w\u0128-\uFFFF*_-]|\\\\.)" ) + "+)") + ], + + multiFilter: function( expr, elems, not ) { + var old, cur = []; + + while ( expr && expr != old ) { + old = expr; + var f = jQuery.filter( expr, elems, not ); + expr = f.t.replace(/^\s*,\s*/, "" ); + cur = not ? elems = f.r : jQuery.merge( cur, f.r ); + } + + return cur; + }, + find: function( t, context ) { + // Quickly handle non-string expressions + if ( typeof t != "string" ) + return [ t ]; + + // Make sure that the context is a DOM Element + if ( context && !context.nodeType ) + context = null; + + // Set the correct context (if none is provided) + context = context || document; + + // Handle the common XPath // expression + if ( !t.indexOf("//") ) { + context = context.documentElement; + t = t.substr(2,t.length); + + // And the / root expression + } else if ( !t.indexOf("/") && !context.ownerDocument ) { + context = context.documentElement; + t = t.substr(1,t.length); + if ( t.indexOf("/") >= 1 ) + t = t.substr(t.indexOf("/"),t.length); + } + + // Initialize the search + var ret = [context], done = [], last; + + // Continue while a selector expression exists, and while + // we're no longer looping upon ourselves + while ( t && last != t ) { + var r = []; + last = t; + + t = jQuery.trim(t).replace( /^\/\//, "" ); + + var foundToken = false; + + // An attempt at speeding up child selectors that + // point to a specific element tag + var re = new RegExp("^[/>]\\s*(" + jQuery.chars + "+)"); + var m = re.exec(t); + + if ( m ) { + var nodeName = m[1].toUpperCase(); + + // Perform our own iteration and filter + for ( var i = 0; ret[i]; i++ ) + for ( var c = ret[i].firstChild; c; c = c.nextSibling ) + if ( c.nodeType == 1 && (nodeName == "*" || c.nodeName.toUpperCase() == nodeName.toUpperCase()) ) + r.push( c ); + + ret = r; + t = t.replace( re, "" ); + if ( t.indexOf(" ") == 0 ) continue; + foundToken = true; + } else { + re = /^((\/?\.\.)|([>\/+~]))\s*([a-z]*)/i; + + if ( (m = re.exec(t)) != null ) { + r = []; + + var nodeName = m[4], mergeNum = jQuery.mergeNum++; + m = m[1]; + + for ( var j = 0, rl = ret.length; j < rl; j++ ) + if ( m.indexOf("..") < 0 ) { + var n = m == "~" || m == "+" ? ret[j].nextSibling : ret[j].firstChild; + for ( ; n; n = n.nextSibling ) + if ( n.nodeType == 1 ) { + if ( m == "~" && n.mergeNum == mergeNum ) break; + + if (!nodeName || n.nodeName.toUpperCase() == nodeName.toUpperCase() ) { + if ( m == "~" ) n.mergeNum = mergeNum; + r.push( n ); + } + + if ( m == "+" ) break; + } + } else + r.push( ret[j].parentNode ); + + ret = r; + + // And remove the token + t = jQuery.trim( t.replace( re, "" ) ); + foundToken = true; + } + } + + // See if there's still an expression, and that we haven't already + // matched a token + if ( t && !foundToken ) { + // Handle multiple expressions + if ( !t.indexOf(",") ) { + // Clean the result set + if ( context == ret[0] ) ret.shift(); + + // Merge the result sets + done = jQuery.merge( done, ret ); + + // Reset the context + r = ret = [context]; + + // Touch up the selector string + t = " " + t.substr(1,t.length); + + } else { + // Optomize for the case nodeName#idName + var re2 = new RegExp("^(" + jQuery.chars + "+)(#)(" + jQuery.chars + "+)"); + var m = re2.exec(t); + + // Re-organize the results, so that they're consistent + if ( m ) { + m = [ 0, m[2], m[3], m[1] ]; + + } else { + // Otherwise, do a traditional filter check for + // ID, class, and element selectors + re2 = new RegExp("^([#.]?)(" + jQuery.chars + "*)"); + m = re2.exec(t); + } + + m[2] = m[2].replace(/\\/g, ""); + + var elem = ret[ret.length-1]; + + // Try to do a global search by ID, where we can + if ( m[1] == "#" && elem && elem.getElementById ) { + // Optimization for HTML document case + var oid = elem.getElementById(m[2]); + + // Do a quick check for the existence of the actual ID attribute + // to avoid selecting by the name attribute in IE + // also check to insure id is a string to avoid selecting an element with the name of 'id' inside a form + if ( (jQuery.browser.msie||jQuery.browser.opera) && oid && typeof oid.id == "string" && oid.id != m[2] ) + oid = jQuery('[@id="'+m[2]+'"]', elem)[0]; + + // Do a quick check for node name (where applicable) so + // that div#foo searches will be really fast + ret = r = oid && (!m[3] || jQuery.nodeName(oid, m[3])) ? [oid] : []; + } else { + // We need to find all descendant elements + for ( var i = 0; ret[i]; i++ ) { + // Grab the tag name being searched for + var tag = m[1] != "" || m[0] == "" ? "*" : m[2]; + + // Handle IE7 being really dumb about s + if ( tag == "*" && ret[i].nodeName.toLowerCase() == "object" ) + tag = "param"; + + r = jQuery.merge( r, ret[i].getElementsByTagName( tag )); + } + + // It's faster to filter by class and be done with it + if ( m[1] == "." ) + r = jQuery.classFilter( r, m[2] ); + + // Same with ID filtering + if ( m[1] == "#" ) { + var tmp = []; + + // Try to find the element with the ID + for ( var i = 0; r[i]; i++ ) + if ( r[i].getAttribute("id") == m[2] ) { + tmp = [ r[i] ]; + break; + } + + r = tmp; + } + + ret = r; + } + + t = t.replace( re2, "" ); + } + + } + + // If a selector string still exists + if ( t ) { + // Attempt to filter it + var val = jQuery.filter(t,r); + ret = r = val.r; + t = jQuery.trim(val.t); + } + } + + // An error occurred with the selector; + // just return an empty set instead + if ( t ) + ret = []; + + // Remove the root context + if ( ret && context == ret[0] ) + ret.shift(); + + // And combine the results + done = jQuery.merge( done, ret ); + + return done; + }, + + classFilter: function(r,m,not){ + m = " " + m + " "; + var tmp = []; + for ( var i = 0; r[i]; i++ ) { + var pass = (" " + r[i].className + " ").indexOf( m ) >= 0; + if ( !not && pass || not && !pass ) + tmp.push( r[i] ); + } + return tmp; + }, + + filter: function(t,r,not) { + var last; + + // Look for common filter expressions + while ( t && t != last ) { + last = t; + + var p = jQuery.parse, m; + + for ( var i = 0; p[i]; i++ ) { + m = p[i].exec( t ); + + if ( m ) { + // Remove what we just matched + t = t.substring( m[0].length ); + + m[2] = m[2].replace(/\\/g, ""); + break; + } + } + + if ( !m ) + break; + + // :not() is a special case that can be optimized by + // keeping it out of the expression list + if ( m[1] == ":" && m[2] == "not" ) + r = jQuery.filter(m[3], r, true).r; + + // We can get a big speed boost by filtering by class here + else if ( m[1] == "." ) + r = jQuery.classFilter(r, m[2], not); + + else if ( m[1] == "@" ) { + var tmp = [], type = m[3]; + + for ( var i = 0, rl = r.length; i < rl; i++ ) { + var a = r[i], z = a[ jQuery.props[m[2]] || m[2] ]; + + if ( z == null || /href|src/.test(m[2]) ) + z = jQuery.attr(a,m[2]) || ''; + + if ( (type == "" && !!z || + type == "=" && z == m[5] || + type == "!=" && z != m[5] || + type == "^=" && z && !z.indexOf(m[5]) || + type == "$=" && z.substr(z.length - m[5].length) == m[5] || + (type == "*=" || type == "~=") && z.indexOf(m[5]) >= 0) ^ not ) + tmp.push( a ); + } + + r = tmp; + + // We can get a speed boost by handling nth-child here + } else if ( m[1] == ":" && m[2] == "nth-child" ) { + var num = jQuery.mergeNum++, tmp = [], + test = /(\d*)n\+?(\d*)/.exec( + m[3] == "even" && "2n" || m[3] == "odd" && "2n+1" || + !/\D/.test(m[3]) && "n+" + m[3] || m[3]), + first = (test[1] || 1) - 0, last = test[2] - 0; + + for ( var i = 0, rl = r.length; i < rl; i++ ) { + var node = r[i], parentNode = node.parentNode; + + if ( num != parentNode.mergeNum ) { + var c = 1; + + for ( var n = parentNode.firstChild; n; n = n.nextSibling ) + if ( n.nodeType == 1 ) + n.nodeIndex = c++; + + parentNode.mergeNum = num; + } + + var add = false; + + if ( first == 1 ) { + if ( last == 0 || node.nodeIndex == last ) + add = true; + } else if ( (node.nodeIndex + last) % first == 0 ) + add = true; + + if ( add ^ not ) + tmp.push( node ); + } + + r = tmp; + + // Otherwise, find the expression to execute + } else { + var f = jQuery.expr[m[1]]; + if ( typeof f != "string" ) + f = jQuery.expr[m[1]][m[2]]; + + // Build a custom macro to enclose it + eval("f = function(a,i){return " + f + "}"); + + // Execute it against the current filter + r = jQuery.grep( r, f, not ); + } + } + + // Return an array of filtered elements (r) + // and the modified expression string (t) + return { r: r, t: t }; + }, + parents: function( elem ){ + var matched = []; + var cur = elem.parentNode; + while ( cur && cur != document ) { + matched.push( cur ); + cur = cur.parentNode; + } + return matched; + }, + nth: function(cur,result,dir,elem){ + result = result || 1; + var num = 0; + + for ( ; cur; cur = cur[dir] ) + if ( cur.nodeType == 1 && ++num == result ) + break; + + return cur; + }, + sibling: function( n, elem ) { + var r = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType == 1 && (!elem || n != elem) ) + r.push( n ); + } + + return r; + } +}); +/* + * A number of helper functions used for managing events. + * Many of the ideas behind this code orignated from + * Dean Edwards' addEvent library. + */ +jQuery.event = { + + // Bind an event to an element + // Original by Dean Edwards + add: function(element, type, handler, data) { + // For whatever reason, IE has trouble passing the window object + // around, causing it to be cloned in the process + if ( jQuery.browser.msie && element.setInterval != undefined ) + element = window; + + // Make sure that the function being executed has a unique ID + if ( !handler.guid ) + handler.guid = this.guid++; + + // if data is passed, bind to handler + if( data != undefined ) { + // Create temporary function pointer to original handler + var fn = handler; + + // Create unique handler function, wrapped around original handler + handler = function() { + // Pass arguments and context to original handler + return fn.apply(this, arguments); + }; + + // Store data in unique handler + handler.data = data; + + // Set the guid of unique handler to the same of original handler, so it can be removed + handler.guid = fn.guid; + } + + // Init the element's event structure + if (!element.$events) + element.$events = {}; + + if (!element.$handle) + element.$handle = function() { + // returned undefined or false + var val; + + // Handle the second event of a trigger and when + // an event is called after a page has unloaded + if ( typeof jQuery == "undefined" || jQuery.event.triggered ) + return val; + + val = jQuery.event.handle.apply(element, arguments); + + return val; + }; + + // Get the current list of functions bound to this event + var handlers = element.$events[type]; + + // Init the event handler queue + if (!handlers) { + handlers = element.$events[type] = {}; + + // And bind the global event handler to the element + if (element.addEventListener) + element.addEventListener(type, element.$handle, false); + else + element.attachEvent("on" + type, element.$handle); + } + + // Add the function to the element's handler list + handlers[handler.guid] = handler; + + // Remember the function in a global list (for triggering) + if (!this.global[type]) + this.global[type] = []; + // Only add the element to the global list once + if (jQuery.inArray(element, this.global[type]) == -1) + this.global[type].push( element ); + }, + + guid: 1, + global: {}, + + // Detach an event or set of events from an element + remove: function(element, type, handler) { + var events = element.$events, ret, index; + + if ( events ) { + // type is actually an event object here + if ( type && type.type ) { + handler = type.handler; + type = type.type; + } + + if ( !type ) { + for ( type in events ) + this.remove( element, type ); + + } else if ( events[type] ) { + // remove the given handler for the given type + if ( handler ) + delete events[type][handler.guid]; + + // remove all handlers for the given type + else + for ( handler in element.$events[type] ) + delete events[type][handler]; + + // remove generic event handler if no more handlers exist + for ( ret in events[type] ) break; + if ( !ret ) { + if (element.removeEventListener) + element.removeEventListener(type, element.$handle, false); + else + element.detachEvent("on" + type, element.$handle); + ret = null; + delete events[type]; + + // Remove element from the global event type cache + while ( this.global[type] && ( (index = jQuery.inArray(element, this.global[type])) >= 0 ) ) + delete this.global[type][index]; + } + } + + // Remove the expando if it's no longer used + for ( ret in events ) break; + if ( !ret ) + element.$handle = element.$events = null; + } + }, + + trigger: function(type, data, element) { + // Clone the incoming data, if any + data = jQuery.makeArray(data || []); + + // Handle a global trigger + if ( !element ) + jQuery.each( this.global[type] || [], function(){ + jQuery.event.trigger( type, data, this ); + }); + + // Handle triggering a single element + else { + var val, ret, fn = jQuery.isFunction( element[ type ] || null ); + + // Pass along a fake event + data.unshift( this.fix({ type: type, target: element }) ); + + // Trigger the event + if ( jQuery.isFunction(element.$handle) && (val = element.$handle.apply( element, data )) !== false ) + this.triggered = true; + + if ( fn && val !== false && !jQuery.nodeName(element, 'a') ) + element[ type ](); + + this.triggered = false; + } + }, + + handle: function(event) { + // returned undefined or false + var val; + + // Empty object is for triggered events with no data + event = jQuery.event.fix( event || window.event || {} ); + + var c = this.$events && this.$events[event.type], args = [].slice.call( arguments, 1 ); + args.unshift( event ); + + for ( var j in c ) { + // Pass in a reference to the handler function itself + // So that we can later remove it + args[0].handler = c[j]; + args[0].data = c[j].data; + + if ( c[j].apply( this, args ) === false ) { + event.preventDefault(); + event.stopPropagation(); + val = false; + } + } + + // Clean up added properties in IE to prevent memory leak + if (jQuery.browser.msie) + event.target = event.preventDefault = event.stopPropagation = + event.handler = event.data = null; + + return val; + }, + + fix: function(event) { + // store a copy of the original event object + // and clone to set read-only properties + var originalEvent = event; + event = jQuery.extend({}, originalEvent); + + // add preventDefault and stopPropagation since + // they will not work on the clone + event.preventDefault = function() { + // if preventDefault exists run it on the original event + if (originalEvent.preventDefault) + return originalEvent.preventDefault(); + // otherwise set the returnValue property of the original event to false (IE) + originalEvent.returnValue = false; + }; + event.stopPropagation = function() { + // if stopPropagation exists run it on the original event + if (originalEvent.stopPropagation) + return originalEvent.stopPropagation(); + // otherwise set the cancelBubble property of the original event to true (IE) + originalEvent.cancelBubble = true; + }; + + // Fix target property, if necessary + if ( !event.target && event.srcElement ) + event.target = event.srcElement; + + // check if target is a textnode (safari) + if (jQuery.browser.safari && event.target.nodeType == 3) + event.target = originalEvent.target.parentNode; + + // Add relatedTarget, if necessary + if ( !event.relatedTarget && event.fromElement ) + event.relatedTarget = event.fromElement == event.target ? event.toElement : event.fromElement; + + // Calculate pageX/Y if missing and clientX/Y available + if ( event.pageX == null && event.clientX != null ) { + var e = document.documentElement, b = document.body; + event.pageX = event.clientX + (e && e.scrollLeft || b.scrollLeft); + event.pageY = event.clientY + (e && e.scrollTop || b.scrollTop); + } + + // Add which for key events + if ( !event.which && (event.charCode || event.keyCode) ) + event.which = event.charCode || event.keyCode; + + // Add metaKey to non-Mac browsers (use ctrl for PC's and Meta for Macs) + if ( !event.metaKey && event.ctrlKey ) + event.metaKey = event.ctrlKey; + + // Add which for click: 1 == left; 2 == middle; 3 == right + // Note: button is not normalized, so don't use it + if ( !event.which && event.button ) + event.which = (event.button & 1 ? 1 : ( event.button & 2 ? 3 : ( event.button & 4 ? 2 : 0 ) )); + + return event; + } +}; + +jQuery.fn.extend({ + bind: function( type, data, fn ) { + return type == "unload" ? this.one(type, data, fn) : this.each(function(){ + jQuery.event.add( this, type, fn || data, fn && data ); + }); + }, + one: function( type, data, fn ) { + return this.each(function(){ + jQuery.event.add( this, type, function(event) { + jQuery(this).unbind(event); + return (fn || data).apply( this, arguments); + }, fn && data); + }); + }, + unbind: function( type, fn ) { + return this.each(function(){ + jQuery.event.remove( this, type, fn ); + }); + }, + trigger: function( type, data ) { + return this.each(function(){ + jQuery.event.trigger( type, data, this ); + }); + }, + toggle: function() { + // Save reference to arguments for access in closure + var a = arguments; + + return this.click(function(e) { + // Figure out which function to execute + this.lastToggle = 0 == this.lastToggle ? 1 : 0; + + // Make sure that clicks stop + e.preventDefault(); + + // and execute the function + return a[this.lastToggle].apply( this, [e] ) || false; + }); + }, + hover: function(f,g) { + + // A private function for handling mouse 'hovering' + function handleHover(e) { + // Check if mouse(over|out) are still within the same parent element + var p = e.relatedTarget; + + // Traverse up the tree + while ( p && p != this ) try { p = p.parentNode } catch(e) { p = this; }; + + // If we actually just moused on to a sub-element, ignore it + if ( p == this ) return false; + + // Execute the right function + return (e.type == "mouseover" ? f : g).apply(this, [e]); + } + + // Bind the function to the two event listeners + return this.mouseover(handleHover).mouseout(handleHover); + }, + ready: function(f) { + // If the DOM is already ready + if ( jQuery.isReady ) + // Execute the function immediately + f.apply( document, [jQuery] ); + + // Otherwise, remember the function for later + else + // Add the function to the wait list + jQuery.readyList.push( function() { return f.apply(this, [jQuery]) } ); + + return this; + } +}); + +jQuery.extend({ + /* + * All the code that makes DOM Ready work nicely. + */ + isReady: false, + readyList: [], + + // Handle when the DOM is ready + ready: function() { + // Make sure that the DOM is not already loaded + if ( !jQuery.isReady ) { + // Remember that the DOM is ready + jQuery.isReady = true; + + // If there are functions bound, to execute + if ( jQuery.readyList ) { + // Execute all of them + jQuery.each( jQuery.readyList, function(){ + this.apply( document ); + }); + + // Reset the list of functions + jQuery.readyList = null; + } + // Remove event listener to avoid memory leak + if ( jQuery.browser.mozilla || jQuery.browser.opera ) + document.removeEventListener( "DOMContentLoaded", jQuery.ready, false ); + + // Remove script element used by IE hack + if( !window.frames.length ) // don't remove if frames are present (#1187) + jQuery(window).load(function(){ jQuery("#__ie_init").remove(); }); + } + } +}); + +new function(){ + + jQuery.each( ("blur,focus,load,resize,scroll,unload,click,dblclick," + + "mousedown,mouseup,mousemove,mouseover,mouseout,change,select," + + "submit,keydown,keypress,keyup,error").split(","), function(i,o){ + + // Handle event binding + jQuery.fn[o] = function(f){ + return f ? this.bind(o, f) : this.trigger(o); + }; + + }); + + // If Mozilla is used + if ( jQuery.browser.mozilla || jQuery.browser.opera ) + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", jQuery.ready, false ); + + // If IE is used, use the excellent hack by Matthias Miller + // http://www.outofhanwell.com/blog/index.php?title=the_window_onload_problem_revisited + else if ( jQuery.browser.msie ) { + + // Only works if you document.write() it + document.write("<\/script>"); + + // Use the defer script hack + var script = document.getElementById("__ie_init"); + + // script does not exist if jQuery is loaded dynamically + if ( script ) + script.onreadystatechange = function() { + if ( this.readyState != "complete" ) return; + jQuery.ready(); + }; + + // Clear from memory + script = null; + + // If Safari is used + } else if ( jQuery.browser.safari ) + // Continually check to see if the document.readyState is valid + jQuery.safariTimer = setInterval(function(){ + // loaded and complete are both valid states + if ( document.readyState == "loaded" || + document.readyState == "complete" ) { + + // If either one are found, remove the timer + clearInterval( jQuery.safariTimer ); + jQuery.safariTimer = null; + + // and execute any waiting functions + jQuery.ready(); + } + }, 10); + + // A fallback to window.onload, that will always work + jQuery.event.add( window, "load", jQuery.ready ); + +}; + +// Clean up after IE to avoid memory leaks +if (jQuery.browser.msie) + jQuery(window).one("unload", function() { + var global = jQuery.event.global; + for ( var type in global ) { + var els = global[type], i = els.length; + if ( i && type != 'unload' ) + do + els[i-1] && jQuery.event.remove(els[i-1], type); + while (--i); + } + }); +jQuery.fn.extend({ + loadIfModified: function( url, params, callback ) { + this.load( url, params, callback, 1 ); + }, + load: function( url, params, callback, ifModified ) { + if ( jQuery.isFunction( url ) ) + return this.bind("load", url); + + callback = callback || function(){}; + + // Default to a GET request + var type = "GET"; + + // If the second parameter was provided + if ( params ) + // If it's a function + if ( jQuery.isFunction( params ) ) { + // We assume that it's the callback + callback = params; + params = null; + + // Otherwise, build a param string + } else { + params = jQuery.param( params ); + type = "POST"; + } + + var self = this; + + // Request the remote document + jQuery.ajax({ + url: url, + type: type, + data: params, + ifModified: ifModified, + complete: function(res, status){ + if ( status == "success" || !ifModified && status == "notmodified" ) + // Inject the HTML into all the matched elements + self.attr("innerHTML", res.responseText) + // Execute all the scripts inside of the newly-injected HTML + .evalScripts() + // Execute callback + .each( callback, [res.responseText, status, res] ); + else + callback.apply( self, [res.responseText, status, res] ); + } + }); + return this; + }, + serialize: function() { + return jQuery.param( this ); + }, + evalScripts: function() { + return this.find("script").each(function(){ + if ( this.src ) + jQuery.getScript( this.src ); + else + jQuery.globalEval( this.text || this.textContent || this.innerHTML || "" ); + }).end(); + } + +}); + +// Attach a bunch of functions for handling common AJAX events + +jQuery.each( "ajaxStart,ajaxStop,ajaxComplete,ajaxError,ajaxSuccess,ajaxSend".split(","), function(i,o){ + jQuery.fn[o] = function(f){ + return this.bind(o, f); + }; +}); + +jQuery.extend({ + get: function( url, data, callback, type, ifModified ) { + // shift arguments if data argument was ommited + if ( jQuery.isFunction( data ) ) { + callback = data; + data = null; + } + + return jQuery.ajax({ + type: "GET", + url: url, + data: data, + success: callback, + dataType: type, + ifModified: ifModified + }); + }, + getIfModified: function( url, data, callback, type ) { + return jQuery.get(url, data, callback, type, 1); + }, + getScript: function( url, callback ) { + return jQuery.get(url, null, callback, "script"); + }, + getJSON: function( url, data, callback ) { + return jQuery.get(url, data, callback, "json"); + }, + post: function( url, data, callback, type ) { + if ( jQuery.isFunction( data ) ) { + callback = data; + data = {}; + } + + return jQuery.ajax({ + type: "POST", + url: url, + data: data, + success: callback, + dataType: type + }); + }, + ajaxTimeout: function( timeout ) { + jQuery.ajaxSettings.timeout = timeout; + }, + ajaxSetup: function( settings ) { + jQuery.extend( jQuery.ajaxSettings, settings ); + }, + + ajaxSettings: { + global: true, + type: "GET", + timeout: 0, + contentType: "application/x-www-form-urlencoded", + processData: true, + async: true, + data: null + }, + + // Last-Modified header cache for next request + lastModified: {}, + ajax: function( s ) { + // TODO introduce global settings, allowing the client to modify them for all requests, not only timeout + s = jQuery.extend({}, jQuery.ajaxSettings, s); + + // if data available + if ( s.data ) { + // convert data if not already a string + if (s.processData && typeof s.data != "string") + s.data = jQuery.param(s.data); + // append data to url for get requests + if( s.type.toLowerCase() == "get" ) { + // "?" + data or "&" + data (in case there are already params) + s.url += ((s.url.indexOf("?") > -1) ? "&" : "?") + s.data; + // IE likes to send both get and post data, prevent this + s.data = null; + } + } + + // Watch for a new set of requests + if ( s.global && ! jQuery.active++ ) + jQuery.event.trigger( "ajaxStart" ); + + var requestDone = false; + + // Create the request object; Microsoft failed to properly + // implement the XMLHttpRequest in IE7, so we use the ActiveXObject when it is available + var xml = window.ActiveXObject ? new ActiveXObject("Microsoft.XMLHTTP") : new XMLHttpRequest(); + + // Open the socket + xml.open(s.type, s.url, s.async); + + // Set the correct header, if data is being sent + if ( s.data ) + xml.setRequestHeader("Content-Type", s.contentType); + + // Set the If-Modified-Since header, if ifModified mode. + if ( s.ifModified ) + xml.setRequestHeader("If-Modified-Since", + jQuery.lastModified[s.url] || "Thu, 01 Jan 1970 00:00:00 GMT" ); + + // Set header so the called script knows that it's an XMLHttpRequest + xml.setRequestHeader("X-Requested-With", "XMLHttpRequest"); + + // Allow custom headers/mimetypes + if( s.beforeSend ) + s.beforeSend(xml); + + if ( s.global ) + jQuery.event.trigger("ajaxSend", [xml, s]); + + // Wait for a response to come back + var onreadystatechange = function(isTimeout){ + // The transfer is complete and the data is available, or the request timed out + if ( xml && (xml.readyState == 4 || isTimeout == "timeout") ) { + requestDone = true; + + // clear poll interval + if (ival) { + clearInterval(ival); + ival = null; + } + + var status; + try { + status = jQuery.httpSuccess( xml ) && isTimeout != "timeout" ? + s.ifModified && jQuery.httpNotModified( xml, s.url ) ? "notmodified" : "success" : "error"; + // Make sure that the request was successful or notmodified + if ( status != "error" ) { + // Cache Last-Modified header, if ifModified mode. + var modRes; + try { + modRes = xml.getResponseHeader("Last-Modified"); + } catch(e) {} // swallow exception thrown by FF if header is not available + + if ( s.ifModified && modRes ) + jQuery.lastModified[s.url] = modRes; + + // process the data (runs the xml through httpData regardless of callback) + var data = jQuery.httpData( xml, s.dataType ); + + // If a local callback was specified, fire it and pass it the data + if ( s.success ) + s.success( data, status ); + + // Fire the global callback + if( s.global ) + jQuery.event.trigger( "ajaxSuccess", [xml, s] ); + } else + jQuery.handleError(s, xml, status); + } catch(e) { + status = "error"; + jQuery.handleError(s, xml, status, e); + } + + // The request was completed + if( s.global ) + jQuery.event.trigger( "ajaxComplete", [xml, s] ); + + // Handle the global AJAX counter + if ( s.global && ! --jQuery.active ) + jQuery.event.trigger( "ajaxStop" ); + + // Process result + if ( s.complete ) + s.complete(xml, status); + + // Stop memory leaks + if(s.async) + xml = null; + } + }; + + // don't attach the handler to the request, just poll it instead + var ival = setInterval(onreadystatechange, 13); + + // Timeout checker + if ( s.timeout > 0 ) + setTimeout(function(){ + // Check to see if the request is still happening + if ( xml ) { + // Cancel the request + xml.abort(); + + if( !requestDone ) + onreadystatechange( "timeout" ); + } + }, s.timeout); + + // Send the data + try { + xml.send(s.data); + } catch(e) { + jQuery.handleError(s, xml, null, e); + } + + // firefox 1.5 doesn't fire statechange for sync requests + if ( !s.async ) + onreadystatechange(); + + // return XMLHttpRequest to allow aborting the request etc. + return xml; + }, + + handleError: function( s, xml, status, e ) { + // If a local callback was specified, fire it + if ( s.error ) s.error( xml, status, e ); + + // Fire the global callback + if ( s.global ) + jQuery.event.trigger( "ajaxError", [xml, s, e] ); + }, + + // Counter for holding the number of active queries + active: 0, + + // Determines if an XMLHttpRequest was successful or not + httpSuccess: function( r ) { + try { + return !r.status && location.protocol == "file:" || + ( r.status >= 200 && r.status < 300 ) || r.status == 304 || + jQuery.browser.safari && r.status == undefined; + } catch(e){} + return false; + }, + + // Determines if an XMLHttpRequest returns NotModified + httpNotModified: function( xml, url ) { + try { + var xmlRes = xml.getResponseHeader("Last-Modified"); + + // Firefox always returns 200. check Last-Modified date + return xml.status == 304 || xmlRes == jQuery.lastModified[url] || + jQuery.browser.safari && xml.status == undefined; + } catch(e){} + return false; + }, + + /* Get the data out of an XMLHttpRequest. + * Return parsed XML if content-type header is "xml" and type is "xml" or omitted, + * otherwise return plain text. + * (String) data - The type of data that you're expecting back, + * (e.g. "xml", "html", "script") + */ + httpData: function( r, type ) { + var ct = r.getResponseHeader("content-type"); + var data = !type && ct && ct.indexOf("xml") >= 0; + data = type == "xml" || data ? r.responseXML : r.responseText; + + // If the type is "script", eval it in global context + if ( type == "script" ) + jQuery.globalEval( data ); + + // Get the JavaScript object, if JSON is used. + if ( type == "json" ) + data = eval("(" + data + ")"); + + // evaluate scripts within html + if ( type == "html" ) + jQuery("
    ").html(data).evalScripts(); + + return data; + }, + + // Serialize an array of form elements or a set of + // key/values into a query string + param: function( a ) { + var s = []; + + // If an array was passed in, assume that it is an array + // of form elements + if ( a.constructor == Array || a.jquery ) + // Serialize the form elements + jQuery.each( a, function(){ + s.push( encodeURIComponent(this.name) + "=" + encodeURIComponent( this.value ) ); + }); + + // Otherwise, assume that it's an object of key/value pairs + else + // Serialize the key/values + for ( var j in a ) + // If the value is an array then the key names need to be repeated + if ( a[j] && a[j].constructor == Array ) + jQuery.each( a[j], function(){ + s.push( encodeURIComponent(j) + "=" + encodeURIComponent( this ) ); + }); + else + s.push( encodeURIComponent(j) + "=" + encodeURIComponent( a[j] ) ); + + // Return the resulting serialization + return s.join("&"); + }, + + // evalulates a script in global context + // not reliable for safari + globalEval: function( data ) { + if ( window.execScript ) + window.execScript( data ); + else if ( jQuery.browser.safari ) + // safari doesn't provide a synchronous global eval + window.setTimeout( data, 0 ); + else + eval.call( window, data ); + } + +}); +jQuery.fn.extend({ + + show: function(speed,callback){ + return speed ? + this.animate({ + height: "show", width: "show", opacity: "show" + }, speed, callback) : + + this.filter(":hidden").each(function(){ + this.style.display = this.oldblock ? this.oldblock : ""; + if ( jQuery.css(this,"display") == "none" ) + this.style.display = "block"; + }).end(); + }, + + hide: function(speed,callback){ + return speed ? + this.animate({ + height: "hide", width: "hide", opacity: "hide" + }, speed, callback) : + + this.filter(":visible").each(function(){ + this.oldblock = this.oldblock || jQuery.css(this,"display"); + if ( this.oldblock == "none" ) + this.oldblock = "block"; + this.style.display = "none"; + }).end(); + }, + + // Save the old toggle function + _toggle: jQuery.fn.toggle, + toggle: function( fn, fn2 ){ + return jQuery.isFunction(fn) && jQuery.isFunction(fn2) ? + this._toggle( fn, fn2 ) : + fn ? + this.animate({ + height: "toggle", width: "toggle", opacity: "toggle" + }, fn, fn2) : + this.each(function(){ + jQuery(this)[ jQuery(this).is(":hidden") ? "show" : "hide" ](); + }); + }, + slideDown: function(speed,callback){ + return this.animate({height: "show"}, speed, callback); + }, + slideUp: function(speed,callback){ + return this.animate({height: "hide"}, speed, callback); + }, + slideToggle: function(speed, callback){ + return this.animate({height: "toggle"}, speed, callback); + }, + fadeIn: function(speed, callback){ + return this.animate({opacity: "show"}, speed, callback); + }, + fadeOut: function(speed, callback){ + return this.animate({opacity: "hide"}, speed, callback); + }, + fadeTo: function(speed,to,callback){ + return this.animate({opacity: to}, speed, callback); + }, + animate: function( prop, speed, easing, callback ) { + return this.queue(function(){ + var hidden = jQuery(this).is(":hidden"), + opt = jQuery.speed(speed, easing, callback), + self = this; + + for ( var p in prop ) { + if ( prop[p] == "hide" && hidden || prop[p] == "show" && !hidden ) + return jQuery.isFunction(opt.complete) && opt.complete.apply(this); + + if ( p == "height" || p == "width" ) { + // Store display property + opt.display = jQuery.css(this, "display"); + + // Make sure that nothing sneaks out + opt.overflow = this.style.overflow; + } + } + + if ( opt.overflow != null ) + this.style.overflow = "hidden"; + + this.curAnim = jQuery.extend({}, prop); + + jQuery.each( prop, function(name, val){ + var e = new jQuery.fx( self, opt, name ); + if ( val.constructor == Number ) + e.custom( e.cur(), val ); + else + e[ val == "toggle" ? hidden ? "show" : "hide" : val ]( prop ); + }); + }); + }, + queue: function(type,fn){ + if ( !fn ) { + fn = type; + type = "fx"; + } + + return this.each(function(){ + if ( !this.queue ) + this.queue = {}; + + if ( !this.queue[type] ) + this.queue[type] = []; + + this.queue[type].push( fn ); + + if ( this.queue[type].length == 1 ) + fn.apply(this); + }); + } + +}); + +jQuery.extend({ + + speed: function(speed, easing, fn) { + var opt = speed && speed.constructor == Object ? speed : { + complete: fn || !fn && easing || + jQuery.isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && easing.constructor != Function && easing || (jQuery.easing.swing ? "swing" : "linear") + }; + + opt.duration = (opt.duration && opt.duration.constructor == Number ? + opt.duration : + { slow: 600, fast: 200 }[opt.duration]) || 400; + + // Queueing + opt.old = opt.complete; + opt.complete = function(){ + jQuery.dequeue(this, "fx"); + if ( jQuery.isFunction( opt.old ) ) + opt.old.apply( this ); + }; + + return opt; + }, + + easing: { + linear: function( p, n, firstNum, diff ) { + return firstNum + diff * p; + }, + swing: function( p, n, firstNum, diff ) { + return ((-Math.cos(p*Math.PI)/2) + 0.5) * diff + firstNum; + } + }, + + queue: {}, + + dequeue: function(elem,type){ + type = type || "fx"; + + if ( elem.queue && elem.queue[type] ) { + // Remove self + elem.queue[type].shift(); + + // Get next function + var f = elem.queue[type][0]; + + if ( f ) f.apply( elem ); + } + }, + + timers: [], + + /* + * I originally wrote fx() as a clone of moo.fx and in the process + * of making it small in size the code became illegible to sane + * people. You've been warned. + */ + + fx: function( elem, options, prop ){ + + var z = this; + + // The styles + var y = elem.style; + + // Simple function for setting a style value + z.a = function(){ + if ( options.step ) + options.step.apply( elem, [ z.now ] ); + + if ( prop == "opacity" ) + jQuery.attr(y, "opacity", z.now); // Let attr handle opacity + else { + y[prop] = parseInt(z.now) + "px"; + y.display = "block"; // Set display property to block for animation + } + }; + + // Figure out the maximum number to run to + z.max = function(){ + return parseFloat( jQuery.css(elem,prop) ); + }; + + // Get the current size + z.cur = function(){ + var r = parseFloat( jQuery.curCSS(elem, prop) ); + return r && r > -10000 ? r : z.max(); + }; + + // Start an animation from one number to another + z.custom = function(from,to){ + z.startTime = (new Date()).getTime(); + z.now = from; + z.a(); + + jQuery.timers.push(function(){ + return z.step(from, to); + }); + + if ( jQuery.timers.length == 1 ) { + var timer = setInterval(function(){ + var timers = jQuery.timers; + + for ( var i = 0; i < timers.length; i++ ) + if ( !timers[i]() ) + timers.splice(i--, 1); + + if ( !timers.length ) + clearInterval( timer ); + }, 13); + } + }; + + // Simple 'show' function + z.show = function(){ + if ( !elem.orig ) elem.orig = {}; + + // Remember where we started, so that we can go back to it later + elem.orig[prop] = jQuery.attr( elem.style, prop ); + + options.show = true; + + // Begin the animation + z.custom(0, this.cur()); + + // Make sure that we start at a small width/height to avoid any + // flash of content + if ( prop != "opacity" ) + y[prop] = "1px"; + + // Start by showing the element + jQuery(elem).show(); + }; + + // Simple 'hide' function + z.hide = function(){ + if ( !elem.orig ) elem.orig = {}; + + // Remember where we started, so that we can go back to it later + elem.orig[prop] = jQuery.attr( elem.style, prop ); + + options.hide = true; + + // Begin the animation + z.custom(this.cur(), 0); + }; + + // Each step of an animation + z.step = function(firstNum, lastNum){ + var t = (new Date()).getTime(); + + if (t > options.duration + z.startTime) { + z.now = lastNum; + z.a(); + + if (elem.curAnim) elem.curAnim[ prop ] = true; + + var done = true; + for ( var i in elem.curAnim ) + if ( elem.curAnim[i] !== true ) + done = false; + + if ( done ) { + if ( options.display != null ) { + // Reset the overflow + y.overflow = options.overflow; + + // Reset the display + y.display = options.display; + if ( jQuery.css(elem, "display") == "none" ) + y.display = "block"; + } + + // Hide the element if the "hide" operation was done + if ( options.hide ) + y.display = "none"; + + // Reset the properties, if the item has been hidden or shown + if ( options.hide || options.show ) + for ( var p in elem.curAnim ) + jQuery.attr(y, p, elem.orig[p]); + } + + // If a callback was provided, execute it + if ( done && jQuery.isFunction( options.complete ) ) + // Execute the complete function + options.complete.apply( elem ); + + return false; + } else { + var n = t - this.startTime; + // Figure out where in the animation we are and set the number + var p = n / options.duration; + + // Perform the easing function, defaults to swing + z.now = jQuery.easing[options.easing](p, n, firstNum, (lastNum-firstNum), options.duration); + + // Perform the next step of the animation + z.a(); + } + + return true; + }; + + } +}); +} diff --git a/sphinx/style/minus.png b/sphinx/style/minus.png new file mode 100644 index 000000000..da1c5620d Binary files /dev/null and b/sphinx/style/minus.png differ diff --git a/sphinx/style/nocomment.png b/sphinx/style/nocomment.png new file mode 100644 index 000000000..954a2454a Binary files /dev/null and b/sphinx/style/nocomment.png differ diff --git a/sphinx/style/plus.png b/sphinx/style/plus.png new file mode 100644 index 000000000..b3cb37425 Binary files /dev/null and b/sphinx/style/plus.png differ diff --git a/sphinx/style/preview.png b/sphinx/style/preview.png new file mode 100644 index 000000000..0c5df6eea Binary files /dev/null and b/sphinx/style/preview.png differ diff --git a/sphinx/style/rightsidebar.css b/sphinx/style/rightsidebar.css new file mode 100644 index 000000000..0d72b9a97 --- /dev/null +++ b/sphinx/style/rightsidebar.css @@ -0,0 +1,16 @@ +/** + * Python Doc Design -- Right Side Bar Overrides + */ + + +div.sidebar { + float: right; +} + +div.bodywrapper { + margin: 0 230px 0 0; +} + +div.inlinecomments { + right: 250px; +} diff --git a/sphinx/style/searchtools.js b/sphinx/style/searchtools.js new file mode 100644 index 000000000..ac9e97ed9 --- /dev/null +++ b/sphinx/style/searchtools.js @@ -0,0 +1,428 @@ +/** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words, hlwords is the list of normal, unstemmed + * words. the first one is used to find the occurance, the + * latter for highlighting it. + */ +jQuery.makeSearchSummary = function(text, keywords, hlwords) { + var textLower = text.toLowerCase(); + var start = 0; + $.each(keywords, function() { + var i = textLower.indexOf(this.toLowerCase()); + if (i > -1) { + start = i; + } + }); + start = Math.max(start - 120, 0); + var excerpt = ((start > 0) ? '...' : '') + + $.trim(text.substr(start, 240)) + + ((start + 240 - text.length) ? '...' : ''); + var rv = $('
    ').text(excerpt); + $.each(hlwords, function() { + rv = rv.highlightText(this, 'highlight'); + }); + return rv; +} + +/** + * Porter Stemmer + */ +var PorterStemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) { + return w; + } + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") { + w = firstch.toUpperCase() + w.substr(1); + } + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) { + w = w.replace(re,"$1$2"); + } + else if (re2.test(w)) { + w = w.replace(re2,"$1$2"); + } + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) { + w = w + "e"; + } + else if (re3.test(w)) { + re = /.$/; w = w.replace(re,""); + } + else if (re4.test(w)) { + w = w + "e"; + } + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) { w = stem + "i"; } + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) { + w = stem + step2list[suffix]; + } + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) { + w = stem + step3list[suffix]; + } + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) { + w = stem; + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) { + w = stem; + } + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) { + w = stem; + } + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") { + w = firstch.toLowerCase() + w.substr(1); + } + return w; + } +} + + + +/** + * Search Module + */ +var Search = { + + init : function() { + var params = $.getQueryParameters(); + if (params.q) { + var query = params.q[0]; + var areas = params.area || []; + + // auto default + if (areas.length == 1 && areas[0] == 'default') { + areas = ['tutorial', 'modules', 'install', 'distutils']; + } + + // update input fields + $('input[@type="checkbox"]').each(function() { + this.checked = $.contains(areas, this.value); + }); + $('input[@name="q"]')[0].value = query; + + this.performSearch(query, areas); + } + }, + + /** + * perform a search for something + */ + performSearch : function(query, areas) { + // create the required interface elements + var out = $('#search-results'); + var title = $('

    Searching

    ').appendTo(out); + var dots = $('').appendTo(title); + var status = $('

    ').appendTo(out); + var output = $('
    + + + + + + {%- for user, privileges in users|dictsort %} + + + + + + {%- endfor %} +
    UsernamePrivilegesDelete
    {{ user|e }}
    +
    + + + +
    + +{% endblock %} diff --git a/sphinx/templates/admin/moderate_comments.html b/sphinx/templates/admin/moderate_comments.html new file mode 100644 index 000000000..919105191 --- /dev/null +++ b/sphinx/templates/admin/moderate_comments.html @@ -0,0 +1,104 @@ +{% extends "admin/layout.html" %} +{% block admin_body %} +

    Moderate Comments

    +

    + From here you can delete and edit comments. If you want to be + informed about new comments you can use the feed provided. +

    +
    + {% if ask_confirmation %} +
    +

    Confirm

    +
    + {% trans amount=to_delete|length %} + Do you really want to delete one comment? + {% pluralize %} + Do you really want to delete {{ amount }} comments? + {% endtrans %} +
    +
    + + +
    +
    + {% endif %} + {% if edit_detail %} +
    +

    Edit Comment

    +
    + +
    +
    Name
    +
    +
    E-Mail
    +
    +
    Comment Title
    +
    +
    + +
    +
    + + + + +
    +
    + {% endif %} + {%- macro render_row(comment, include_page=false) %} + + + {{ comment.title|e }} + by {{ comment.author|e }}{% if include_page + %} on {{ comment.associated_page }} + + {{ comment.pub_date|datetimeformat }} + + edit + + + + {%- endmacro %} + + {% if pages_with_comments %} + + + + {%- for comment in recent_comments %} + {{- render_row(comment, true) }} + {%- endfor %} + {%- for page in pages_with_comments %} + + + + {%- if page.has_details %} + {%- for comment in page.comments %} + {{- render_row(comment) }} + {%- endfor %} + {%- endif %} + {% endfor %} + {%- else %} + + {%- endif %} +
    + Recent Comments + (feed) +
    + {{ page.title|e }} + (view | + feed) +
    no comments submitted so far
    +
    + + +
    +
    +{% endblock %} diff --git a/sphinx/templates/commentform.html b/sphinx/templates/commentform.html new file mode 100644 index 000000000..be38ab2b7 --- /dev/null +++ b/sphinx/templates/commentform.html @@ -0,0 +1,26 @@ +{% extends "layout.html" %} +{% block body %} +
    +

    New Comment

    + {{ form }} +
    +

    + You can format a comment using the + following syntax elements provided: +

    +

    + `code` / ``code too`` / **strong** / + *emphasized* / !!!important!!! / + [[link_target Link Title]] / + [[link_target_only]] / <code>code block with + syntax highlighting</code> / <quote>some + quoted text</quote>. +

    +

    + HTML is not supported, relative link targets are treated as + quicklinks and code blocks that start with ">>>" are + highlighted as interactive python sessions. +

    +
    +
    +{% endblock %} diff --git a/sphinx/templates/comments.html b/sphinx/templates/comments.html new file mode 100644 index 000000000..81d6542ba --- /dev/null +++ b/sphinx/templates/comments.html @@ -0,0 +1,22 @@ +
    +

    Comments

    + {% for comment in comments %} +
    +

    {{ comment.title|e }} + {%- if comment.associated_name %} — on + {{- + comment.associated_name }}{% endif %}

    +
    {{ comment.parsed_comment_body }}
    +
    by {{ comment.author|e }}, written on + {{ comment.pub_date|datetimeformat }} | + #
    +
    + {% else %} +
    + There are no user contributed notes for this page. +
    + {% endfor %} + +
    diff --git a/sphinx/templates/edit.html b/sphinx/templates/edit.html new file mode 100644 index 000000000..69b8e9ce7 --- /dev/null +++ b/sphinx/templates/edit.html @@ -0,0 +1,53 @@ +{% extends "layout.html" %} +{% if rendered %}{% set title = "Suggest changes - Preview" %} +{% else %}{% set title = "Suggest changes" %}{% endif %} +{% block body %} +{% if rendered %} +

    Preview

    +
    +
    + {{ rendered }} +
    +
    + {% if warnings %} +

    Warnings

    +

    You must fix these warnings before you can submit your patch.

    +
      + {% for warning in warnings %} +
    • {{ warning }}
    • + {% endfor %} +
    + {% endif %} +{% endif %} +

    Suggest changes for this page

    +{% if not rendered %} +

    Here you can edit the source of “{{ doctitle|striptags }}” and + submit the results as a patch to the Python documentation team. If you want + to know more about reST, the markup language used, read + Documenting Python.

    +{% endif %} +
    +
    + + {# XXX: shortcuts to make the edit area larger/smaller #} + {% if form_error %} +
    {{ form_error|e }}
    + {% endif %} +
    +
    Name:
    +
    +
    E-mail Address:
    +
    +
    Summary of the change:
    +
    +
    + +
    + + + + +
    +
    +
    +{% endblock %} diff --git a/sphinx/templates/genindex.html b/sphinx/templates/genindex.html new file mode 100644 index 000000000..a97c5485d --- /dev/null +++ b/sphinx/templates/genindex.html @@ -0,0 +1,46 @@ +{% extends "layout.html" %} +{% set title = 'Index' %} +{% block body %} + +

    Index

    + + {% for key, dummy in genindexentries -%} + {{ key }} {% if not loop.last %}| {% endif %} + {%- endfor %} + +
    + + {% for key, entries in genindexentries %} +

    {{ key }}

    +
    +
    + {%- set breakat = genindexcounts[loop.index0] // 2 %} + {%- set numcols = 1 %} + {%- set numitems = 0 %} + {% for entryname, (links, subitems) in entries %} +
    {%- if links -%} + {{ entryname }} + {%- for link in links[1:] %}, [Link]{% endfor -%} + {%- else -%} + {{ entryname }} + {%- endif -%}
    + {%- if subitems %} +
    + {%- for subentryname, subentrylinks in subitems %} +
    {{ subentryname }} + {%- for link in subentrylinks[1:] %}, [Link]{% endfor -%} +
    + {%- endfor %} +
    + {%- endif -%} + {%- set numitems = numitems + 1 + len(subitems) -%} + {%- if numcols < 2 and numitems > breakat -%} + {%- set numcols = numcols+1 -%} +
    + {%- endif -%} + {% endfor %} +
    + + {% endfor %} + +{% endblock %} diff --git a/sphinx/templates/index.html b/sphinx/templates/index.html new file mode 100644 index 000000000..3217306c0 --- /dev/null +++ b/sphinx/templates/index.html @@ -0,0 +1,67 @@ +{% extends "layout.html" %} +{% set title = 'Overview' %} +{% set current_page_name = 'index' %} +{% set page_links = [ + (pathto('@rss/recent'), 'application/rss+xml', 'Recent Comments') +] %} +{% block body %} +

    Python Documentation

    +

    + Welcome! This is the documentation for Python + {{ release }}{% if last_updated %}, last updated {{ last_updated }}{% endif %}. +

    + +

    Parts of the documentation:

    + + +
    + + + + + + + + + + + +
    + +

    Indices and tables:

    + + +
    + + + + + +
    + +

    Meta information:

    + + +
    + + + + + +
    + +{% endblock %} diff --git a/sphinx/templates/inlinecomments.html b/sphinx/templates/inlinecomments.html new file mode 100644 index 000000000..efefca8ab --- /dev/null +++ b/sphinx/templates/inlinecomments.html @@ -0,0 +1,36 @@ +{# rendered for inline comments -#} +
    +{%- if mode == 'bottom' %} + {%- if comments -%} + [Read Comments] + {%- else -%} + [Write Comments] + {%- endif %} +{%- else %} +
    + {%- if comments -%} + [{{ comments|length }} Comments] + {%- else -%} + [Write Comment] + {%- endif -%} +
    + {%- if comments %} +
    +

    Comments

    + + {%- for comment in comments %} +
    +

    {{ comment.title|e }}

    +
    {{ comment.parsed_comment_body }}
    +
    by {{ comment.author|e }}, written on + {{ comment.pub_date|datetimeformat }} | + #
    +
    + {%- endfor %} +
    + {%- endif %} +{%- endif %} +
    diff --git a/sphinx/templates/keyword_not_found.html b/sphinx/templates/keyword_not_found.html new file mode 100644 index 000000000..3dc8496a2 --- /dev/null +++ b/sphinx/templates/keyword_not_found.html @@ -0,0 +1,31 @@ +{% extends "layout.html" %} +{% set title = 'Keyword Not Found' %} +{% block body %} +

    Keyword Not Found

    +

    + The keyword {{ keyword|e }} is not directly associated with + a page. {% if close_matches %}A similarity search returned {{ + close_matches|length }} items that are possible matches. + {% if good_matches_count %}{{ good_matches_count }} of them are really + good matches and emphasized.{% endif %}{% endif %} +

    + {% if close_matches %} +
      + {% for item in close_matches %} + {{ item.title|e }} ({{ + item.type }}) {% if item.description + %} — {{ item.description|e }}{% endif %} + {% endfor %} +
    + {% endif %} +

    + If you want to search the entire Python documentation for the string + "{{ keyword|e }}", then use the search function. +

    +

    + For a quick overview over all documented modules, + click here. +

    +{% endblock %} diff --git a/sphinx/templates/layout.html b/sphinx/templates/layout.html new file mode 100644 index 000000000..515b3a222 --- /dev/null +++ b/sphinx/templates/layout.html @@ -0,0 +1,89 @@ +{% if builder != 'htmlhelp' %}{% set titlesuffix = " — Python Documentation" %}{% endif -%} + + + + + {{ title|striptags }}{{ titlesuffix }} + {%- if builder == 'web' %} + + {%- for link, type, title in page_links %} + + {%- endfor %} + {%- else %} + + + {%- endif %} + + + + + + + + + + + {%- if parents %} + + {%- endif %} + {%- if next %} + + {%- endif %} + {%- if prev %} + + {%- endif %} + {% block head %}{% endblock %} + + + +
    +
    + {%- if builder != 'htmlhelp' %} +
    + {%- endif %} +
    + {% block body %}{% endblock %} +
    + {%- if builder != 'htmlhelp' %} +
    + {%- endif %} +
    + {%- if builder != 'htmlhelp' %} + {%- include "sidebar.html" %} + {%- endif %} +
    +
    + + + diff --git a/sphinx/templates/modindex.html b/sphinx/templates/modindex.html new file mode 100644 index 000000000..e075db014 --- /dev/null +++ b/sphinx/templates/modindex.html @@ -0,0 +1,45 @@ +{% extends "layout.html" %} +{% set title = 'Global Module Index' %} +{% block body %} + +

    Global Module Index

    +{% if builder == 'web' and freqentries %} +

    Most popular modules:

    +
    + {%- for module in freqentries %} + {{ module.name|e }} + {%- endfor %} +
    +{% endif %} +
    + Show modules only available on these platforms:
    + {% for pl in platforms -%} + + + {% endfor %} + +
    + + + {%- for modname, collapse, cgroup, indent, fname, synops, pform in modindexentries %} + {%- if not modname -%} + + + {%- else -%} + + + + {%- endif -%} + {% endfor %} +
     
    {{ fname }}
    {% if collapse -%} + + {%- endif %}{% if indent %}   {% endif %} + {% if fname %}{% endif -%} + {{ modname|e }} + {%- if fname %}{% endif %} + {%- if pform[0] %} ({{ pform|join(', ') }}){% endif -%} + {{ synops|e }}
    + +{% endblock %} diff --git a/sphinx/templates/not_found.html b/sphinx/templates/not_found.html new file mode 100644 index 000000000..5c4fa109e --- /dev/null +++ b/sphinx/templates/not_found.html @@ -0,0 +1,11 @@ +{% extends "layout.html" %} +{% set title = 'Page Not Found' %} +{% block body %} +

    Page Not Found

    +

    + The page {{ req.path|e }} does not exist on this server. +

    +

    + Click here to return to the index. +

    +{% endblock %} diff --git a/sphinx/templates/page.html b/sphinx/templates/page.html new file mode 100644 index 000000000..ce4aef0d8 --- /dev/null +++ b/sphinx/templates/page.html @@ -0,0 +1,14 @@ +{% extends "layout.html" %} +{% set page_links = [ + (pathto('@rss/' + sourcename), 'application/rss+xml', 'Page Comments'), +] %} +{% block body %} + {% if oldurl %} +
    + Note: You requested an out-of-date URL from this server. + We've tried to redirect you to the new location of this page, but it may not + be the right one. +
    + {% endif %} + {{ body }} +{% endblock %} diff --git a/sphinx/templates/search.html b/sphinx/templates/search.html new file mode 100644 index 000000000..c23864b21 --- /dev/null +++ b/sphinx/templates/search.html @@ -0,0 +1,60 @@ +{% extends "layout.html" %} +{% set title = 'Search Documentation' %} +{% block header %} + +{% endblock %} +{% block body %} +

    Search Documentation

    +

    + From here you can search the Python documentation. Enter your search + words into the box below and click "search". Note that the search + function will automatically search for all of the words. Pages + containing less words won't appear in the result list. +

    +

    + In order to speed up the results you can limit your search by + excluding some of the sections listed below. +

    +
    + + +

    + Sections: +

    +
      + {% for id, name, checked in [ + ('tutorial', 'Python Tutorial', true), + ('modules', 'Library Reference', true), + ('macmodules', 'Macintosh Library Modules', false), + ('extending', 'Extending and Embedding', false), + ('c-api', 'Python/C API', false), + ('install', 'Installing Python Modules', true), + ('distutils', 'Distributing Python Modules', true), + ('documenting', 'Documenting Python', false), + ('whatsnew', 'What\'s new in Python?', false), + ('reference', 'Language Reference', false) + ] -%} +
    • +
    • + {% endfor %} +
    +
    + {% if search_performed %} +

    Search Results

    + {% if not search_results %} +

    Your search did not match any results.

    + {% endif %} + {% endif %} +
    + {% if search_results %} +
      + {% for href, caption, context in search_results %} +
    • {{ caption }} +
      {{ context|e }}
      +
    • + {% endfor %} +
    + {% endif %} +
    +{% endblock %} diff --git a/sphinx/templates/settings.html b/sphinx/templates/settings.html new file mode 100644 index 000000000..670526713 --- /dev/null +++ b/sphinx/templates/settings.html @@ -0,0 +1,37 @@ +{% extends "layout.html" %} +{% set title = 'Settings' %} +{% set current_page_name = 'settings' %} +{% block body %} +

    Python Documentation Settings

    +

    + Here you can customize how you want to view the Python documentation. + These settings are saved using a cookie on your computer. +

    + +
    +

    Select your stylesheet:

    +

    + {%- for design, (foo, descr) in known_designs %} + +
    + {%- endfor %} +

    + +

    Select how you want to view comments:

    +

    + {%- for meth, descr in comments_methods %} + +
    + {%- endfor %} +

    + +

    +    +    +    +

    +
    + +{% endblock %} diff --git a/sphinx/templates/show_source.html b/sphinx/templates/show_source.html new file mode 100644 index 000000000..96ef5c643 --- /dev/null +++ b/sphinx/templates/show_source.html @@ -0,0 +1,6 @@ +{% extends "layout.html" %} +{% set title = 'Page Source' %} +{% block body %} +

    Page Source

    + {{ highlighted_code }} +{% endblock %} diff --git a/sphinx/templates/sidebar.html b/sphinx/templates/sidebar.html new file mode 100644 index 000000000..a244f2298 --- /dev/null +++ b/sphinx/templates/sidebar.html @@ -0,0 +1,48 @@ +{# this file is included by layout.html #} + diff --git a/sphinx/templates/submitted.html b/sphinx/templates/submitted.html new file mode 100644 index 000000000..845f809a7 --- /dev/null +++ b/sphinx/templates/submitted.html @@ -0,0 +1,12 @@ +{% extends "layout.html" %} +{% set title = "Patch submitted" %} +{% block head %} + +{% endblock %} +{% block body %} +

    Patch submitted

    +

    Your patch has been submitted to the Python documentation team and will be + processed shortly.

    +

    You will be redirected to the + original documentation page shortly.

    +{% endblock %} \ No newline at end of file diff --git a/sphinx/util.py b/sphinx/util.py new file mode 100644 index 000000000..f927fcd20 --- /dev/null +++ b/sphinx/util.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +""" + sphinx.util + ~~~~~~~~~~~ + + Utility functions for Sphinx. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +import os +import sys +import fnmatch +from os import path + + +def relative_uri(base, to): + """Return a relative URL from ``base`` to ``to``.""" + b2 = base.split('/') + t2 = to.split('/') + # remove common segments + for x, y in zip(b2, t2): + if x != y: + break + b2.pop(0) + t2.pop(0) + return '../' * (len(b2)-1) + '/'.join(t2) + + +def ensuredir(path): + """Ensure that a path exists.""" + try: + os.makedirs(path) + except OSError, err: + if not err.errno == 17: + raise + + +def status_iterator(iterable, colorfunc=lambda x: x, stream=sys.stdout): + """Print out each item before yielding it.""" + for item in iterable: + print >>stream, colorfunc(item), + stream.flush() + yield item + print >>stream + + +def get_matching_files(dirname, pattern, exclude=()): + """Get all files matching a pattern in a directory, recursively.""" + # dirname is a normalized absolute path. + dirname = path.normpath(path.abspath(dirname)) + dirlen = len(dirname) + 1 # exclude slash + for root, dirs, files in os.walk(dirname): + dirs.sort() + files.sort() + for sfile in files: + if not fnmatch.fnmatch(sfile, pattern): + continue + qualified_name = path.join(root[dirlen:], sfile) + if qualified_name in exclude: + continue + yield qualified_name + + +def get_category(filename): + """Get the "category" part of a RST filename.""" + parts = filename.split('/', 1) + if len(parts) < 2: + return + return parts[0] + + +def shorten_result(text='', keywords=[], maxlen=240, fuzz=60): + if not text: + text = '' + text_low = text.lower() + beg = -1 + for k in keywords: + i = text_low.find(k.lower()) + if (i > -1 and i < beg) or beg == -1: + beg = i + excerpt_beg = 0 + if beg > fuzz: + for sep in ('.', ':', ';', '='): + eb = text.find(sep, beg - fuzz, beg - 1) + if eb > -1: + eb += 1 + break + else: + eb = beg - fuzz + excerpt_beg = eb + if excerpt_beg < 0: + excerpt_beg = 0 + msg = text[excerpt_beg:beg+maxlen] + if beg > fuzz: + msg = '... ' + msg + if beg < len(text)-maxlen: + msg = msg + ' ...' + return msg + + +class attrdict(dict): + def __getattr__(self, key): + return self[key] + def __setattr__(self, key, val): + self[key] = val + def __delattr__(self, key): + del self[key] diff --git a/sphinx/web/__init__.py b/sphinx/web/__init__.py new file mode 100644 index 000000000..d0ebd074b --- /dev/null +++ b/sphinx/web/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +""" + sphinx.web + ~~~~~~~~~~ + + A web application to serve the Python docs interactively. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" diff --git a/sphinx/web/admin.py b/sphinx/web/admin.py new file mode 100644 index 000000000..1930531b7 --- /dev/null +++ b/sphinx/web/admin.py @@ -0,0 +1,258 @@ +# -*- coding: utf-8 -*- +""" + sphinx.web.admin + ~~~~~~~~~~~~~~~~ + + Admin application parts. + + :copyright: 2007 by Georg Brandl, Armin Ronacher. + :license: Python license. +""" + +from .util import render_template +from .wsgiutil import Response, RedirectResponse, NotFound +from .database import Comment + + +class AdminPanel(object): + """ + Provide the admin functionallity. + """ + + def __init__(self, app): + self.app = app + self.env = app.env + self.userdb = app.userdb + + def dispatch(self, req, page): + """ + Dispatch the requests for the current user in the admin panel. + """ + is_logged_in = req.user is not None + if is_logged_in: + privileges = self.userdb.privileges[req.user] + is_master_admin = 'master' in privileges + can_change_password = 'frozenpassword' not in privileges + else: + privileges = set() + can_change_password = is_master_admin = False + + # login and logout + if page == 'login': + return self.do_login(req) + elif not is_logged_in: + return RedirectResponse('@admin/login/') + elif page == 'logout': + return self.do_logout(req) + + # account maintance + elif page == 'change_password' and can_change_password: + return self.do_change_password(req) + elif page == 'manage_users' and is_master_admin: + return self.do_manage_users(req) + + # moderate comments + elif page.split('/')[0] == 'moderate_comments': + return self.do_moderate_comments(req, page[18:]) + + # missing page + elif page != '': + raise NotFound() + return Response(render_template(req, 'admin/index.html', { + 'is_master_admin': is_master_admin, + 'can_change_password': can_change_password + })) + + def do_login(self, req): + """ + Display login form and do the login procedure. + """ + if req.user is not None: + return RedirectResponse('@admin/') + login_failed = False + if req.method == 'POST': + if req.form.get('cancel'): + return RedirectResponse('') + username = req.form.get('username') + password = req.form.get('password') + if self.userdb.check_password(username, password): + req.login(username) + return RedirectResponse('@admin/') + login_failed = True + return Response(render_template(req, 'admin/login.html', { + 'login_failed': login_failed + })) + + def do_logout(self, req): + """ + Log the user out. + """ + req.logout() + return RedirectResponse('admin/login/') + + def do_change_password(self, req): + """ + Allows the user to change his password. + """ + change_failed = change_successful = False + if req.method == 'POST': + if req.form.get('cancel'): + return RedirectResponse('@admin/') + pw = req.form.get('pw1') + if pw and pw == req.form.get('pw2'): + self.userdb.set_password(req.user, pw) + self.userdb.save() + change_successful = True + else: + change_failed = True + return Response(render_template(req, 'admin/change_password.html', { + 'change_failed': change_failed, + 'change_successful': change_successful + })) + + def do_manage_users(self, req): + """ + Manage other user accounts. Requires master privileges. + """ + add_user_mode = False + user_privileges = {} + users = sorted((user, []) for user in self.userdb.users) + to_delete = set() + generated_user = generated_password = None + user_exists = False + + if req.method == 'POST': + for item in req.form.getlist('delete'): + try: + to_delete.add(item) + except ValueError: + pass + for name, item in req.form.iteritems(): + if name.startswith('privileges-'): + user_privileges[name[11:]] = [x.strip() for x + in item.split(',')] + if req.form.get('cancel'): + return RedirectResponse('@admin/') + elif req.form.get('add_user'): + username = req.form.get('username') + if username: + if username in self.userdb.users: + user_exists = username + else: + generated_password = self.userdb.add_user(username) + self.userdb.save() + generated_user = username + else: + add_user_mode = True + elif req.form.get('aborted'): + return RedirectResponse('@admin/manage_users/') + + users = {} + for user in self.userdb.users: + if user not in user_privileges: + users[user] = sorted(self.userdb.privileges[user]) + else: + users[user] = user_privileges[user] + + new_users = users.copy() + for user in to_delete: + new_users.pop(user, None) + + self_destruction = req.user not in new_users or \ + 'master' not in new_users[req.user] + + if req.method == 'POST' and (not to_delete or + (to_delete and req.form.get('confirmed'))) and \ + req.form.get('update'): + old_users = self.userdb.users.copy() + for user in old_users: + if user not in new_users: + del self.userdb.users[user] + else: + self.userdb.privileges[user].clear() + self.userdb.privileges[user].update(new_users[user]) + self.userdb.save() + return RedirectResponse('@admin/manage_users/') + + return Response(render_template(req, 'admin/manage_users.html', { + 'users': users, + 'add_user_mode': add_user_mode, + 'to_delete': to_delete, + 'ask_confirmation': req.method == 'POST' and to_delete \ + and not self_destruction, + 'generated_user': generated_user, + 'generated_password': generated_password, + 'self_destruction': self_destruction, + 'user_exists': user_exists + })) + + def do_moderate_comments(self, req, url): + """ + Comment moderation panel. + """ + if url == 'recent_comments': + details_for = None + recent_comments = Comment.get_recent(20) + else: + details_for = url and self.env.get_real_filename(url) or None + recent_comments = None + to_delete = set() + edit_detail = None + + if 'edit' in req.args: + try: + edit_detail = Comment.get(int(req.args['edit'])) + except ValueError: + pass + + if req.method == 'POST': + for item in req.form.getlist('delete'): + try: + to_delete.add(int(item)) + except ValueError: + pass + if req.form.get('cancel'): + return RedirectResponse('@admin/') + elif req.form.get('confirmed'): + for comment_id in to_delete: + try: + Comment.get(comment_id).delete() + except ValueError: + pass + return RedirectResponse(req.path) + elif req.form.get('aborted'): + return RedirectResponse(req.path) + elif req.form.get('edit') and not to_delete: + if 'delete_this' in req.form: + try: + to_delete.add(req.form['delete_this']) + except ValueError: + pass + else: + try: + edit_detail = c = Comment.get(int(req.args['edit'])) + except ValueError: + pass + else: + if req.form.get('view'): + return RedirectResponse(c.url) + c.author = req.form.get('author', '') + c.author_mail = req.form.get('author_mail', '') + c.title = req.form.get('title', '') + c.comment_body = req.form.get('comment_body', '') + c.save() + self.app.cache.pop(edit_detail.associated_page, None) + return RedirectResponse(req.path) + + return Response(render_template(req, 'admin/moderate_comments.html', { + 'pages_with_comments': [{ + 'page_id': page_id, + 'title': page_id, #XXX: get title somehow + 'has_details': details_for == page_id, + 'comments': comments + } for page_id, comments in Comment.get_overview(details_for)], + 'recent_comments': recent_comments, + 'to_delete': to_delete, + 'ask_confirmation': req.method == 'POST' and to_delete, + 'edit_detail': edit_detail + })) diff --git a/sphinx/web/antispam.py b/sphinx/web/antispam.py new file mode 100644 index 000000000..200171414 --- /dev/null +++ b/sphinx/web/antispam.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +""" + sphinx.web.antispam + ~~~~~~~~~~~~~~~~~~~ + + Small module that performs anti spam tests based on the bad content + regex list provided by moin moin. + + :copyright: 2007 by Armin Ronacher. + :license: Python license. +""" +from __future__ import with_statement +import re +import urllib +import time +from os import path + +DOWNLOAD_URL = 'http://moinmaster.wikiwikiweb.de/BadContent?action=raw' +UPDATE_INTERVAL = 60 * 60 * 24 * 7 + + +class AntiSpam(object): + """ + Class that reads a bad content database (flat file that is automatically + updated from the moin moin server) and checks strings against it. + """ + + def __init__(self, bad_content_file): + self.bad_content_file = bad_content_file + lines = None + + if not path.exists(self.bad_content_file): + last_change = 0 + else: + last_change = path.getmtime(self.bad_content_file) + + if last_change + UPDATE_INTERVAL < time.time(): + try: + f = urllib.urlopen(DOWNLOAD_URL) + data = f.read() + except: + pass + else: + lines = [l.strip() for l in data.splitlines() + if not l.startswith('#')] + f = file(bad_content_file, 'w') + f.write('\n'.join(lines)) + last_change = int(time.time()) + + if lines is None: + with file(bad_content_file) as f: + lines = [l.strip() for l in f] + self.rules = [re.compile(rule) for rule in lines if rule] + + def is_spam(self, fields): + for regex in self.rules: + for field in fields: + if regex.search(field) is not None: + return True + return False diff --git a/sphinx/web/application.py b/sphinx/web/application.py new file mode 100644 index 000000000..ecb24088c --- /dev/null +++ b/sphinx/web/application.py @@ -0,0 +1,790 @@ +# -*- coding: utf-8 -*- +""" + sphinx.web.application + ~~~~~~~~~~~~~~~~~~~~~~ + + A simple WSGI application that serves an interactive version + of the python documentation. + + :copyright: 2007 by Georg Brandl, Armin Ronacher. + :license: Python license. +""" +from __future__ import with_statement + +import os +import re +import copy +import time +import heapq +import math +import difflib +import tempfile +import threading +import cPickle as pickle +import cStringIO as StringIO +from os import path +from itertools import groupby +from collections import defaultdict + +from .feed import Feed +from .mail import Email +from .util import render_template, render_simple_template, get_target_uri, \ + blackhole_dict, striptags +from .admin import AdminPanel +from .userdb import UserDatabase +from .oldurls import handle_html_url +from .antispam import AntiSpam +from .database import connect, set_connection, Comment +from .wsgiutil import Request, Response, RedirectResponse, \ + JSONResponse, SharedDataMiddleware, NotFound, get_base_uri + +from ..util import relative_uri, shorten_result +from ..search import SearchFrontend +from ..writer import HTMLWriter +from ..builder import LAST_BUILD_FILENAME, ENV_PICKLE_FILENAME + +from docutils.io import StringOutput +from docutils.utils import Reporter +from docutils.frontend import OptionParser + +_mail_re = re.compile(r'^([a-zA-Z0-9_\.\-])+\@' + r'(([a-zA-Z0-9\-])+\.)+([a-zA-Z0-9]{2,})+$') + +env_lock = threading.Lock() + + +PATCH_MESSAGE = '''\ +A new documentation patch has been submitted. + Author: %(author)s <%(email)s> + Date: %(asctime)s + Page: %(page_id)s + Summary: %(summary)s + +''' + +known_designs = { + 'default': (['default.css', 'pygments.css'], + 'The default design, with the sidebar on the left side.'), + 'rightsidebar': (['default.css', 'rightsidebar.css', 'pygments.css'], + 'Display the sidebar on the right side.'), + 'stickysidebar': (['default.css', 'stickysidebar.css', 'pygments.css'], + '''\ + Display the sidebar on the left and don\'t scroll it + with the content. This can cause parts of the content to + become inaccessible when the table of contents is too long.'''), + 'traditional': (['traditional.css'], + '''\ + A design similar to the old documentation style.'''), +} + +comments_methods = { + 'inline': 'Show all comments inline.', + 'bottom': 'Show all comments at the page bottom.', + 'none': 'Don\'t show comments at all.', +} + + +class MockBuilder(object): + def get_relative_uri(self, from_, to): + return '' + + +NoCache = object() + +def cached(inner): + """ + Response caching system. + """ + def caching_function(self, *args, **kwds): + gen = inner(self, *args, **kwds) + cache_id = gen.next() + if cache_id is NoCache: + response = gen.next() + gen.close() + # this could also return a RedirectResponse... + if isinstance(response, Response): + return response + else: + return Response(response) + try: + text = self.cache[cache_id] + gen.close() + except KeyError: + text = gen.next() + self.cache[cache_id] = text + return Response(text) + return caching_function + + +class DocumentationApplication(object): + """ + Serves the documentation. + """ + + def __init__(self, config): + self.cache = blackhole_dict() if config['debug'] else {} + self.freqmodules = defaultdict(int) + self.last_most_frequent = [] + self.generated_stylesheets = {} + self.config = config + self.data_root = config['data_root_path'] + self.buildfile = path.join(self.data_root, LAST_BUILD_FILENAME) + self.buildmtime = -1 + self.load_env(0) + self.db_con = connect(path.join(self.data_root, 'sphinx.db')) + self.antispam = AntiSpam(path.join(self.data_root, 'bad_content')) + self.userdb = UserDatabase(path.join(self.data_root, 'docusers')) + self.admin_panel = AdminPanel(self) + + + def load_env(self, new_mtime): + env_lock.acquire() + try: + if self.buildmtime == new_mtime: + # happens if another thread already reloaded the env + return + print "* Loading the environment..." + with file(path.join(self.data_root, ENV_PICKLE_FILENAME)) as f: + self.env = pickle.load(f) + with file(path.join(self.data_root, 'globalcontext.pickle')) as f: + self.globalcontext = pickle.load(f) + with file(path.join(self.data_root, 'searchindex.pickle')) as f: + self.search_frontend = SearchFrontend(pickle.load(f)) + self.buildmtime = path.getmtime(self.buildfile) + self.cache.clear() + finally: + env_lock.release() + + + def search(self, req): + """ + Search the database. Currently just a keyword based search. + """ + if not req.args.get('q'): + return RedirectResponse('') + return RedirectResponse('q/%s/' % req.args['q']) + + + def get_page_source(self, page): + """ + Get the reST source of a page. + """ + page_id = self.env.get_real_filename(page) + if page_id is None: + raise NotFound() + filename = path.join(self.data_root, 'sources', page_id)[:-3] + 'txt' + with file(filename) as f: + return page_id, f.read() + + + def show_source(self, req, page): + """ + Show the highlighted source for a given page. + """ + return Response(self.get_page_source(page)[1], mimetype='text/plain') + + + def suggest_changes(self, req, page): + """ + Show a "suggest changes" form. + """ + page_id, contents = self.get_page_source(page) + + return Response(render_template(req, 'edit.html', self.globalcontext, dict( + contents=contents, + pagename=page, + doctitle=self.globalcontext['titles'].get(page_id) or 'this page', + submiturl=relative_uri('/@edit/'+page+'/', '/@submit/'+page), + ))) + + def _generate_preview(self, page_id, contents): + """ + Generate a preview for suggested changes. + """ + handle, pathname = tempfile.mkstemp() + os.write(handle, contents.encode('utf-8')) + os.close(handle) + + warning_stream = StringIO.StringIO() + env2 = copy.deepcopy(self.env) + destination = StringOutput(encoding='utf-8') + writer = HTMLWriter(env2.config) + doctree = env2.read_file(page_id, pathname, save_parsed=False) + doctree = env2.get_and_resolve_doctree(page_id, MockBuilder(), doctree) + doctree.settings = OptionParser(defaults=env2.settings, + components=(writer,)).get_default_values() + doctree.reporter = Reporter(page_id, 2, 4, stream=warning_stream) + output = writer.write(doctree, destination) + writer.assemble_parts() + return writer.parts['fragment'] + + + def submit_changes(self, req, page): + """ + Submit the suggested changes as a patch. + """ + if req.method != 'POST': + # only available via POST + raise NotFound() + if req.form.get('cancel'): + # handle cancel requests directly + return RedirectResponse(page) + # raises NotFound if page doesn't exist + page_id, orig_contents = self.get_page_source(page) + author = req.form.get('name') + email = req.form.get('email') + summary = req.form.get('summary') + contents = req.form.get('contents') + fields = (author, email, summary, contents) + + form_error = None + rendered = None + + if not all(fields): + form_error = 'You have to fill out all fields.' + elif not _mail_re.search(email): + form_error = 'You have to provide a valid e-mail address.' + elif req.form.get('homepage') or self.antispam.is_spam(fields): + form_error = 'Your text contains blocked URLs or words.' + else: + if req.form.get('preview'): + rendered = self._generate_preview(page_id, contents) + + else: + asctime = time.asctime() + contents = contents.splitlines() + orig_contents = orig_contents.splitlines() + diffname = 'suggestion on %s by %s <%s>' % (asctime, author, email) + diff = difflib.unified_diff(orig_contents, contents, n=3, + fromfile=page_id, tofile=diffname, + lineterm='') + diff_text = '\n'.join(diff) + try: + mail = Email( + self.config['patch_mail_from'], 'Python Documentation Patches', + self.config['patch_mail_to'], '', + 'Patch for %s by %s' % (page_id, author), + PATCH_MESSAGE % locals(), + self.config['patch_mail_smtp'], + ) + mail.attachments.add_string('patch.diff', diff_text, 'text/x-diff') + mail.send() + except: + import traceback + traceback.print_exc() + # XXX: how to report? + pass + return Response(render_template(req, 'submitted.html', + self.globalcontext, dict( + backlink=relative_uri('/@submit/'+page+'/', page+'/') + ))) + + return Response(render_template(req, 'edit.html', self.globalcontext, dict( + contents=contents, + author=author, + email=email, + summary=summary, + pagename=page, + form_error=form_error, + rendered=rendered, + submiturl=relative_uri('/@edit/'+page+'/', '/@submit/'+page), + ))) + + + def get_settings_page(self, req): + """ + Handle the settings page. + """ + referer = req.environ.get('HTTP_REFERER') or '' + if referer: + base = get_base_uri(req.environ) + if not referer.startswith(base): + referer = '' + else: + referer = referer[len(base):] + referer = referer.rpartition('?')[0] or referer + + if req.method == 'POST': + if req.form.get('cancel'): + if req.form.get('referer'): + return RedirectResponse(req.form['referer']) + return RedirectResponse('') + new_style = req.form.get('design') + if new_style and new_style in known_designs: + req.session['design'] = new_style + new_comments = req.form.get('comments') + if new_comments and new_comments in comments_methods: + req.session['comments'] = new_comments + if req.form.get('goback') and req.form.get('referer'): + return RedirectResponse(req.form['referer']) + # else display the same page again + referer = '' + + context = { + 'known_designs': sorted(known_designs.iteritems()), + 'comments_methods': comments_methods.items(), + 'curdesign': req.session.get('design') or 'default', + 'curcomments': req.session.get('comments') or 'inline', + 'referer': referer, + } + + return Response(render_template(req, 'settings.html', + self.globalcontext, context)) + + + @cached + def get_module_index(self, req): + """ + Get the module index or redirect to a module from the module index. + """ + most_frequent = heapq.nlargest(30, self.freqmodules.iteritems(), + lambda x: x[1]) + most_frequent = [{ + 'name': x[0], + 'size': 100 + math.log(x[1] or 1) * 20, + 'count': x[1] + } for x in sorted(most_frequent)] + + showpf = None + newpf = req.args.get('pf') + sesspf = req.session.get('pf') + if newpf or sesspf: + yield NoCache + if newpf: + req.session['pf'] = showpf = req.args.getlist('pf') + else: + showpf = sesspf + else: + if most_frequent != self.last_most_frequent: + self.cache.pop('@modindex', None) + yield '@modindex' + + filename = path.join(self.data_root, 'modindex.fpickle') + with open(filename, 'rb') as f: + context = pickle.load(f) + if showpf: + entries = context['modindexentries'] + i = 0 + while i < len(entries): + if entries[i][6]: + for pform in entries[i][6]: + if pform in showpf: + break + else: + del entries[i] + continue + i += 1 + context['freqentries'] = most_frequent + context['showpf'] = showpf or context['platforms'] + self.last_most_frequent = most_frequent + yield render_template(req, 'modindex.html', + self.globalcontext, context) + + def show_comment_form(self, req, page): + """ + Show the "new comment" form. + """ + page_id = self.env.get_real_filename(page) + ajax_mode = req.args.get('mode') == 'ajax' + target = req.args.get('target') + page_comment_mode = not target + + form_error = preview = None + title = req.form.get('title', '').strip() + if 'author' in req.form: + author = req.form['author'] + else: + author = req.session.get('author', '') + if 'author_mail' in req.form: + author_mail = req.form['author_mail'] + else: + author_mail = req.session.get('author_mail', '') + comment_body = req.form.get('comment_body', '') + fields = (title, author, author_mail, comment_body) + + if req.method == 'POST': + if req.form.get('preview'): + preview = Comment(page_id, target, title, author, author_mail, + comment_body) + # 'homepage' is a forbidden field to thwart bots + elif req.form.get('homepage') or self.antispam.is_spam(fields): + form_error = 'Your text contains blocked URLs or words.' + else: + if not all(fields): + form_error = 'You have to fill out all fields.' + elif _mail_re.search(author_mail) is None: + form_error = 'You have to provide a valid e-mail address.' + elif len(comment_body) < 20: + form_error = 'You comment is too short ' \ + '(must have at least 20 characters).' + else: + # '|none' can stay since it doesn't include comments + self.cache.pop(page_id + '|inline', None) + self.cache.pop(page_id + '|bottom', None) + comment = Comment(page_id, target, + title, author, author_mail, + comment_body) + comment.save() + req.session['author'] = author + req.session['author_mail'] = author_mail + if ajax_mode: + return JSONResponse({'posted': True, 'error': False, + 'commentID': comment.comment_id}) + return RedirectResponse(comment.url) + + output = render_template(req, '_commentform.html', { + 'ajax_mode': ajax_mode, + 'preview': preview, + 'suggest_url': '@edit/%s/' % page, + 'comments_form': { + 'target': target, + 'title': title, + 'author': author, + 'author_mail': author_mail, + 'comment_body': comment_body, + 'error': form_error + } + }) + + if ajax_mode: + return JSONResponse({ + 'body': output, + 'error': bool(form_error), + 'posted': False + }) + return Response(render_template(req, 'commentform.html', { + 'form': output + })) + + def _insert_comments(self, req, url, context, mode): + """ + Insert inline comments into a page context. + """ + if 'body' not in context: + return + + comment_url = '@comments/%s/' % url + page_id = self.env.get_real_filename(url) + tx = context['body'] + all_comments = Comment.get_for_page(page_id) + global_comments = [] + for name, comments in groupby(all_comments, lambda x: x.associated_name): + if not name: + global_comments.extend(comments) + continue + comments = list(comments) + if not comments: + continue + tx = re.sub('' % name, + render_template(req, 'inlinecomments.html', { + 'comments': comments, + 'id': name, + 'comment_url': comment_url, + 'mode': mode}), + tx) + if mode == 'bottom': + global_comments.extend(comments) + if mode == 'inline': + # replace all markers for items without comments + tx = re.sub('', + (lambda match: + render_template(req, 'inlinecomments.html', { + 'id': match.group(1), + 'mode': 'inline', + 'comment_url': comment_url + },)), + tx) + tx += render_template(req, 'comments.html', { + 'comments': global_comments, + 'comment_url': comment_url + }) + context['body'] = tx + + + @cached + def get_page(self, req, url): + """ + Show the requested documentation page or raise an + `NotFound` exception to display a page with close matches. + """ + page_id = self.env.get_real_filename(url) + if page_id is None: + raise NotFound(show_keyword_matches=True) + # increment view count of all modules on that page + for modname in self.env.filemodules.get(page_id, ()): + self.freqmodules[modname] += 1 + # comments enabled? + comments = self.env.metadata[page_id].get('comments_enabled', True) + + # how does the user want to view comments? + commentmode = req.session.get('comments', 'inline') if comments else '' + + # show "old URL" message? -> no caching possible + oldurl = req.args.get('oldurl') + if oldurl: + yield NoCache + else: + # there must be different cache entries per comment mode + yield page_id + '|' + commentmode + + # cache miss; load the page and render it + filename = path.join(self.data_root, page_id[:-3] + 'fpickle') + with open(filename, 'rb') as f: + context = pickle.load(f) + + # add comments to paqe text + if commentmode != 'none': + self._insert_comments(req, url, context, commentmode) + + yield render_template(req, 'page.html', self.globalcontext, context, + {'oldurl': oldurl}) + + + @cached + def get_special_page(self, req, name): + yield '@'+name + filename = path.join(self.data_root, name + '.fpickle') + with open(filename, 'rb') as f: + context = pickle.load(f) + yield render_template(req, name+'.html', + self.globalcontext, context) + + + def comments_feed(self, req, url): + if url == 'recent': + feed = Feed(req, 'Recent Comments', 'Recent Comments', '') + for comment in Comment.get_recent(): + feed.add_item(comment.title, comment.author, comment.url, + comment.parsed_comment_body, comment.pub_date) + else: + page_id = self.env.get_real_filename(url) + doctitle = striptags(self.globalcontext['titles'].get(page_id, url)) + feed = Feed(req, 'Comments for "%s"' % doctitle, + 'List of comments for the topic "%s"' % doctitle, url) + for comment in Comment.get_for_page(page_id): + feed.add_item(comment.title, comment.author, comment.url, + comment.parsed_comment_body, comment.pub_date) + return Response(feed.generate(), mimetype='application/rss+xml') + + + def get_error_404(self, req): + """ + Show a simple error 404 page. + """ + return Response(render_template(req, 'not_found.html', self.globalcontext)) + + + pretty_type = { + 'data': 'module data', + 'cfunction': 'C function', + 'cmember': 'C member', + 'cmacro': 'C macro', + 'ctype': 'C type', + 'cvar': 'C variable', + } + + def get_keyword_matches(self, req, term=None, avoid_fuzzy=False, + is_error_page=False): + """ + Find keyword matches. If there is an exact match, just redirect: + http://docs.python.org/os.path.exists would automatically + redirect to http://docs.python.org/modules/os.path/#os.path.exists. + Else, show a page with close matches. + + Module references are processed first so that "os.path" is handled as + a module and not as member of os. + """ + if term is None: + term = req.path.strip('/') + + matches = self.env.find_keyword(term, avoid_fuzzy) + + # if avoid_fuzzy is False matches can be None + if matches is None: + return + + if isinstance(matches, tuple): + url = get_target_uri(matches[1]) + if matches[0] != 'module': + url += '#' + matches[2] + return RedirectResponse(url) + else: + # get some close matches + close_matches = [] + good_matches = 0 + for ratio, type, filename, anchorname, desc in matches: + link = get_target_uri(filename) + if type != 'module': + link += '#' + anchorname + good_match = ratio > 0.75 + good_matches += good_match + close_matches.append({ + 'href': relative_uri(req.path, link), + 'title': anchorname, + 'good_match': good_match, + 'type': self.pretty_type.get(type, type), + 'description': desc, + }) + return Response(render_template(req, 'keyword_not_found.html', { + 'close_matches': close_matches, + 'good_matches_count': good_matches, + 'keyword': term + }, self.globalcontext), status=404 if is_error_page else 404) + + + def get_user_stylesheet(self, req): + """ + Stylesheets are exchangeable. Handle them here and + cache them on the server side until server shuts down + and on the client side for 1 hour (not in debug mode). + """ + style = req.session.get('design') + if style not in known_designs: + style = 'default' + + if style in self.generated_stylesheets: + stylesheet = self.generated_stylesheets[style] + else: + stylesheet = [] + for filename in known_designs[style][0]: + with file(path.join(self.data_root, 'style', filename)) as f: + stylesheet.append(f.read()) + stylesheet = '\n'.join(stylesheet) + if not self.config.get('debug'): + self.generated_stylesheets[style] = stylesheet + + if req.args.get('admin') == 'yes': + with file(path.join(self.data_root, 'style', 'admin.css')) as f: + stylesheet += '\n' + f.read() + + # XXX: add timestamp based http caching + return Response(stylesheet, mimetype='text/css') + + def __call__(self, environ, start_response): + """ + Dispatch requests. + """ + set_connection(self.db_con) + req = Request(environ) + url = req.path.strip('/') or 'index' + + # check if the environment was updated + new_mtime = path.getmtime(self.buildfile) + if self.buildmtime != new_mtime: + self.load_env(new_mtime) + + try: + if req.path == 'favicon.ico': + # TODO: change this to real favicon? + resp = self.get_error_404() + elif not req.path.endswith('/') and req.method == 'GET': + # may be an old URL + if url.endswith('.html'): + resp = handle_html_url(self, url) + else: + # else, require a trailing slash on GET requests + # this ensures nice looking urls and working relative + # links for cached resources. + query = req.environ.get('QUERY_STRING', '') + resp = RedirectResponse(req.path + '/' + (query and '?'+query)) + # index page is special + elif url == 'index': + # presets for settings + if req.args.get('design') and req.args['design'] in known_designs: + req.session['design'] = req.args['design'] + if req.args.get('comments') and req.args['comments'] in comments_methods: + req.session['comments'] = req.args['comments'] + # alias for fuzzy search + if 'q' in req.args: + resp = RedirectResponse('q/%s/' % req.args['q']) + # stylesheet + elif req.args.get('do') == 'stylesheet': + resp = self.get_user_stylesheet(req) + else: + resp = self.get_special_page(req, 'index') + # go to the search page + # XXX: this is currently just a redirect to /q/ which is handled below + elif url == 'search': + resp = self.search(req) + # settings page cannot be cached + elif url == 'settings': + resp = self.get_settings_page(req) + # module index page is special + elif url == 'modindex': + resp = self.get_module_index(req) + # genindex page is special too + elif url == 'genindex': + resp = self.get_special_page(req, 'genindex') + # start the fuzzy search + elif url[:2] == 'q/': + resp = self.get_keyword_matches(req, url[2:]) + # special URLs + elif url[0] == '@': + # source view + if url[:8] == '@source/': + resp = self.show_source(req, url[8:]) + # suggest changes view + elif url[:6] == '@edit/': + resp = self.suggest_changes(req, url[6:]) + # suggest changes submit + elif url[:8] == '@submit/': + resp = self.submit_changes(req, url[8:]) + # show that comment form + elif url[:10] == '@comments/': + resp = self.show_comment_form(req, url[10:]) + # comments RSS feed + elif url[:5] == '@rss/': + resp = self.comments_feed(req, url[5:]) + # dispatch requests to the admin panel + elif url == '@admin' or url[:7] == '@admin/': + resp = self.admin_panel.dispatch(req, url[7:]) + else: + raise NotFound() + # everything else is handled as page or fuzzy search + # if a page does not exist. + else: + resp = self.get_page(req, url) + # views can raise a NotFound exception to show an error page. + # Either a real not found page or a similar matches page. + except NotFound, e: + if e.show_keyword_matches: + resp = self.get_keyword_matches(req, is_error_page=True) + else: + resp = self.get_error_404(req) + return resp(environ, start_response) + + +def _check_superuser(app): + """Check if there is a superuser and create one if necessary.""" + if not app.userdb.users: + print 'Warning: you have no user database or no master "admin" account.' + create = raw_input('Do you want to create an admin account now? [y/n] ') + if not create or create.lower().startswith('y'): + import getpass + print 'Creating "admin" user.' + pw1 = getpass.getpass('Enter password: ') + pw2 = getpass.getpass('Enter password again: ') + if pw1 != pw2: + print 'Error: Passwords don\'t match.' + sys.exit(1) + app.userdb.set_password('admin', pw1) + app.userdb.privileges['admin'].add('master') + app.userdb.save() + + +def setup_app(config, check_superuser=False): + """ + Create the WSGI application based on a configuration dict. + Handled configuration values so far: + + `data_root_path` + the folder containing the documentation data as generated + by sphinx with the web builder. + """ + app = DocumentationApplication(config) + if check_superuser: + _check_superuser(app) + app = SharedDataMiddleware(app, { + '/style': path.join(config['data_root_path'], 'style') + }) + return app diff --git a/sphinx/web/database.py b/sphinx/web/database.py new file mode 100644 index 000000000..3dc5b15e7 --- /dev/null +++ b/sphinx/web/database.py @@ -0,0 +1,194 @@ +# -*- coding: utf-8 -*- +""" + sphinx.web.database + ~~~~~~~~~~~~~~~~~~~ + + The database connections are thread local. To set the connection + for a thread use the `set_connection` function provided. The + `connect` method automatically sets up new tables and returns a + usable connection which is also set as the connection for the + thread that called that function. + + :copyright: 2007 by Georg Brandl, Armin Ronacher. + :license: Python license. +""" +import time +import sqlite3 +from datetime import datetime +from threading import local + +from .markup import markup + + +_thread_local = local() + + +def connect(path): + """Connect and create tables if required. Also assigns + the connection for the current thread.""" + con = sqlite3.connect(path, detect_types=sqlite3.PARSE_DECLTYPES) + con.isolation_level = None + + # create tables that do not exist. + for table in tables: + try: + con.execute('select * from %s limit 1;' % table) + except sqlite3.OperationalError: + con.execute(tables[table]) + + set_connection(con) + return con + + +def get_cursor(): + """Return a new cursor.""" + return _thread_local.connection.cursor() + + +def set_connection(con): + """Call this after thread creation to make this connection + the connection for this thread.""" + _thread_local.connection = con + + +#: tables that we use +tables = { + 'comments': ''' + create table comments ( + comment_id integer primary key, + associated_page varchar(200), + associated_name varchar(200), + title varchar(120), + author varchar(200), + author_mail varchar(250), + comment_body text, + pub_date timestamp + );''' +} + + +class Comment(object): + """ + Represents one comment. + """ + + def __init__(self, associated_page, associated_name, title, author, + author_mail, comment_body, pub_date=None): + self.comment_id = None + self.associated_page = associated_page + self.associated_name = associated_name + self.title = title + if pub_date is None: + pub_date = datetime.utcnow() + self.pub_date = pub_date + self.author = author + self.author_mail = author_mail + self.comment_body = comment_body + + @property + def url(self): + return '%s#comment-%s' % ( + self.associated_page[:-4], + self.comment_id + ) + + @property + def parsed_comment_body(self): + from .util import get_target_uri + from ..util import relative_uri + uri = get_target_uri(self.associated_page) + def make_rel_link(keyword): + return relative_uri(uri, 'q/%s/' % keyword) + return markup(self.comment_body, make_rel_link) + + def save(self): + """ + Save the comment and use the cursor provided. + """ + cur = get_cursor() + args = (self.associated_page, self.associated_name, self.title, + self.author, self.author_mail, self.comment_body, self.pub_date) + if self.comment_id is None: + cur.execute('''insert into comments (associated_page, associated_name, + title, + author, author_mail, + comment_body, pub_date) + values (?, ?, ?, ?, ?, ?, ?)''', args) + self.comment_id = cur.lastrowid + else: + args += (self.comment_id,) + cur.execute('''update comments set associated_page=?, + associated_name=?, + title=?, author=?, + author_mail=?, comment_body=?, + pub_date=? where comment_id = ?''', args) + cur.close() + + def delete(self): + cur = get_cursor() + cur.execute('delete from comments where comment_id = ?', + (self.comment_id,)) + cur.close() + + @staticmethod + def _make_comment(row): + rv = Comment(*row[1:]) + rv.comment_id = row[0] + return rv + + @staticmethod + def get(comment_id): + cur = get_cursor() + cur.execute('select * from comments where comment_id = ?', (comment_id,)) + row = cur.fetchone() + if row is None: + raise ValueError('comment not found') + try: + return Comment._make_comment(row) + finally: + cur.close() + + @staticmethod + def get_for_page(associated_page, reverse=False): + cur = get_cursor() + cur.execute('''select * from comments where associated_page = ? + order by associated_name, comment_id %s''' % + ('desc' if reverse else 'asc'), + (associated_page,)) + try: + return [Comment._make_comment(row) for row in cur] + finally: + cur.close() + + @staticmethod + def get_recent(n=10): + cur = get_cursor() + cur.execute('select * from comments order by comment_id desc limit ?', + (n,)) + try: + return [Comment._make_comment(row) for row in cur] + finally: + cur.close() + + @staticmethod + def get_overview(detail_for=None): + cur = get_cursor() + cur.execute('''select distinct associated_page from comments + order by associated_page asc''') + pages = [] + for row in cur: + page_id = row[0] + if page_id == detail_for: + pages.append((page_id, Comment.get_for_page(page_id, True))) + else: + pages.append((page_id, [])) + cur.close() + return pages + + def __repr__(self): + return '' % ( + self.author, + self.associated_page, + self.associated_name, + self.comment_id or 'not saved' + ) diff --git a/sphinx/web/feed.py b/sphinx/web/feed.py new file mode 100644 index 000000000..cd89ea85c --- /dev/null +++ b/sphinx/web/feed.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +""" + sphinx.web.feed + ~~~~~~~~~~~~~~~ + + Nifty module that generates RSS feeds. + + :copyright: 2007 by Armin Ronacher. + :license: Python license. +""" +import time +from datetime import datetime +from xml.dom.minidom import Document +from email.Utils import formatdate + + +def format_rss_date(date): + """ + Pass it a datetime object to receive the string representation + for RSS date fields. + """ + return formatdate(time.mktime(date.timetuple()) + date.microsecond / 1e6) + + +class Feed(object): + """ + Abstract feed creation class. To generate feeds use one of + the subclasses `RssFeed` or `AtomFeed`. + """ + + def __init__(self, req, title, description, link): + self.req = req + self.title = title + self.description = description + self.link = req.make_external_url(link) + self.items = [] + self._last_update = None + + def add_item(self, title, author, link, description, pub_date): + if self._last_update is None or pub_date > self._last_update: + self._last_update = pub_date + date = pub_date or datetime.utcnow() + self.items.append({ + 'title': title, + 'author': author, + 'link': self.req.make_external_url(link), + 'description': description, + 'pub_date': date + }) + + def generate(self): + return self.generate_document().toxml('utf-8') + + def generate_document(self): + doc = Document() + Element = doc.createElement + Text = doc.createTextNode + + rss = doc.appendChild(Element('rss')) + rss.setAttribute('version', '2.0') + + channel = rss.appendChild(Element('channel')) + for key in ('title', 'description', 'link'): + value = getattr(self, key) + channel.appendChild(Element(key)).appendChild(Text(value)) + date = format_rss_date(self._last_update or datetime.utcnow()) + channel.appendChild(Element('pubDate')).appendChild(Text(date)) + + for item in self.items: + d = Element('item') + for key in ('title', 'author', 'link', 'description'): + d.appendChild(Element(key)).appendChild(Text(item[key])) + pub_date = format_rss_date(item['pub_date']) + d.appendChild(Element('pubDate')).appendChild(Text(pub_date)) + d.appendChild(Element('guid')).appendChild(Text(item['link'])) + channel.appendChild(d) + + return doc diff --git a/sphinx/web/mail.py b/sphinx/web/mail.py new file mode 100644 index 000000000..631ead672 --- /dev/null +++ b/sphinx/web/mail.py @@ -0,0 +1,278 @@ +# -*- coding: utf-8 -*- +""" + sphinx.web.mail + ~~~~~~~~~~~~~~~ + + A simple module for sending e-mails, based on simplemail.py. + + :copyright: 2004-2007 by Gerold Penz. + 2007 by Georg Brandl. + :license: Python license. +""" + +import os.path +import sys +import time +import smtplib +import mimetypes + +from email import Encoders +from email.Header import Header +from email.MIMEText import MIMEText +from email.MIMEMultipart import MIMEMultipart +from email.Utils import formataddr +from email.Utils import formatdate +from email.Message import Message +from email.MIMEAudio import MIMEAudio +from email.MIMEBase import MIMEBase +from email.MIMEImage import MIMEImage + + + +# Exceptions +#---------------------------------------------------------------------- +class SimpleMail_Exception(Exception): + def __str__(self): + return self.__doc__ + +class NoFromAddress_Exception(SimpleMail_Exception): + pass + +class NoToAddress_Exception(SimpleMail_Exception): + pass + +class NoSubject_Exception(SimpleMail_Exception): + pass + +class AttachmentNotFound_Exception(SimpleMail_Exception): + pass + + +class Attachments(object): + def __init__(self): + self._attachments = [] + + def add_filename(self, filename = ''): + self._attachments.append(('file', filename)) + + def add_string(self, filename, text, mimetype): + self._attachments.append(('string', (filename, text, mimetype))) + + def count(self): + return len(self._attachments) + + def get_list(self): + return self._attachments + + +class Recipients(object): + def __init__(self): + self._recipients = [] + + def add(self, address, caption = ''): + self._recipients.append(formataddr((caption, address))) + + def count(self): + return len(self._recipients) + + def __repr__(self): + return str(self._recipients) + + def get_list(self): + return self._recipients + + +class CCRecipients(Recipients): + pass + + +class BCCRecipients(Recipients): + pass + + +class Email(object): + + def __init__( + self, + from_address = "", + from_caption = "", + to_address = "", + to_caption = "", + subject = "", + message = "", + smtp_server = "localhost", + smtp_user = "", + smtp_password = "", + user_agent = "", + reply_to_address = "", + reply_to_caption = "", + use_tls = False, + ): + """ + Initialize the email object + from_address = the email address of the sender + from_caption = the caption (name) of the sender + to_address = the email address of the recipient + to_caption = the caption (name) of the recipient + subject = the subject of the email message + message = the body text of the email message + smtp_server = the ip-address or the name of the SMTP-server + smtp_user = (optional) Login name for the SMTP-Server + smtp_password = (optional) Password for the SMTP-Server + user_agent = (optional) program identification + reply_to_address = (optional) Reply-to email address + reply_to_caption = (optional) Reply-to caption (name) + use_tls = (optional) True, if the connection should use TLS + to encrypt. + """ + + self.from_address = from_address + self.from_caption = from_caption + self.recipients = Recipients() + self.cc_recipients = CCRecipients() + self.bcc_recipients = BCCRecipients() + if to_address: + self.recipients.add(to_address, to_caption) + self.subject = subject + self.message = message + self.smtp_server = smtp_server + self.smtp_user = smtp_user + self.smtp_password = smtp_password + self.attachments = Attachments() + self.content_subtype = "plain" + self.content_charset = "iso-8859-1" + self.header_charset = "us-ascii" + self.statusdict = None + self.user_agent = user_agent + self.reply_to_address = reply_to_address + self.reply_to_caption = reply_to_caption + self.use_tls = use_tls + + + def send(self): + """ + Send the mail. Returns True if successfully sent to at least one + recipient. + """ + + # validation + if len(self.from_address.strip()) == 0: + raise NoFromAddress_Exception + if self.recipients.count() == 0: + if ( + (self.cc_recipients.count() == 0) and + (self.bcc_recipients.count() == 0) + ): + raise NoToAddress_Exception + if len(self.subject.strip()) == 0: + raise NoSubject_Exception + + # assemble + if self.attachments.count() == 0: + msg = MIMEText( + _text = self.message, + _subtype = self.content_subtype, + _charset = self.content_charset + ) + else: + msg = MIMEMultipart() + if self.message: + att = MIMEText( + _text = self.message, + _subtype = self.content_subtype, + _charset = self.content_charset + ) + msg.attach(att) + + # add headers + from_str = formataddr((self.from_caption, self.from_address)) + msg["From"] = from_str + if self.reply_to_address: + reply_to_str = formataddr((self.reply_to_caption, self.reply_to_address)) + msg["Reply-To"] = reply_to_str + if self.recipients.count() > 0: + msg["To"] = ", ".join(self.recipients.get_list()) + if self.cc_recipients.count() > 0: + msg["Cc"] = ", ".join(self.cc_recipients.get_list()) + msg["Date"] = formatdate(time.time()) + msg["User-Agent"] = self.user_agent + try: + msg["Subject"] = Header( + self.subject, self.header_charset + ) + except(UnicodeDecodeError): + msg["Subject"] = Header( + self.subject, self.content_charset + ) + msg.preamble = "You will not see this in a MIME-aware mail reader.\n" + msg.epilogue = "" + + # assemble multipart + if self.attachments.count() > 0: + for typ, info in self.attachments.get_list(): + if typ == 'file': + filename = info + if not os.path.isfile(filename): + raise AttachmentNotFound_Exception, filename + mimetype, encoding = mimetypes.guess_type(filename) + if mimetype is None or encoding is not None: + mimetype = 'application/octet-stream' + if mimetype.startswith('text/'): + fp = file(filename) + else: + fp = file(filename, 'rb') + text = fp.read() + fp.close() + else: + filename, text, mimetype = info + maintype, subtype = mimetype.split('/', 1) + if maintype == 'text': + # Note: we should handle calculating the charset + att = MIMEText(text, _subtype=subtype) + elif maintype == 'image': + att = MIMEImage(text, _subtype=subtype) + elif maintype == 'audio': + att = MIMEAudio(text, _subtype=subtype) + else: + att = MIMEBase(maintype, subtype) + att.set_payload(text) + # Encode the payload using Base64 + Encoders.encode_base64(att) + # Set the filename parameter + att.add_header( + 'Content-Disposition', + 'attachment', + filename = os.path.basename(filename).strip() + ) + msg.attach(att) + + # connect to server + smtp = smtplib.SMTP() + if self.smtp_server: + smtp.connect(self.smtp_server) + else: + smtp.connect() + + # TLS? + if self.use_tls: + smtp.ehlo() + smtp.starttls() + smtp.ehlo() + + # authenticate + if self.smtp_user: + smtp.login(user = self.smtp_user, password = self.smtp_password) + + # send + self.statusdict = smtp.sendmail( + from_str, + ( + self.recipients.get_list() + + self.cc_recipients.get_list() + + self.bcc_recipients.get_list() + ), + msg.as_string() + ) + smtp.close() + + return True diff --git a/sphinx/web/markup.py b/sphinx/web/markup.py new file mode 100644 index 000000000..84a82322a --- /dev/null +++ b/sphinx/web/markup.py @@ -0,0 +1,239 @@ +# -*- coding: utf-8 -*- +""" + sphinx.web.markup + ~~~~~~~~~~~~~~~~~ + + Awfully simple markup used in comments. Syntax: + + `this is some ` + like in HTML + + ``this is like ` just that i can contain backticks`` + like in HTML + + *emphasized* + translates to + + **strong** + translates to + + !!!very important message!!! + use this to mark important or dangerous things. + Translates to + + [[http://www.google.com/]] + Simple link with the link target as caption. If the + URL is relative the provided callback is called to get + the full URL. + + [[http://www.google.com/ go to google]] + Link with "go to google" as caption. + + preformatted code that could by python code + Python code (most of the time), otherwise preformatted. + + cite someone + Like
    in HTML. + + :copyright: 2007 by Armin Ronacher. + :license: Python license. +""" +import cgi +import re +from urlparse import urlparse + +from ..highlighting import highlight_block + + +inline_formatting = { + 'escaped_code': ('``', '``'), + 'code': ('`', '`'), + 'strong': ('**', '**'), + 'emphasized': ('*', '*'), + 'important': ('!!!', '!!!'), + 'link': ('[[', ']]'), + 'quote': ('', ''), + 'code_block': ('', ''), + 'paragraph': (r'\n{2,}', None), + 'newline': (r'\\$', None) +} + +simple_formattings = { + 'strong_begin': '', + 'strong_end': '', + 'emphasized_begin': '', + 'emphasized_end': '', + 'important_begin': '', + 'important_end': '', + 'quote_begin': '
    ', + 'quote_end': '
    ' +} + +raw_formatting = set(['link', 'code', 'escaped_code', 'code_block']) + +formatting_start_re = re.compile('|'.join( + '(?P<%s>%s)' % (name, end is not None and re.escape(start) or start) + for name, (start, end) + in sorted(inline_formatting.items(), key=lambda x: -len(x[1][0])) +), re.S | re.M) + +formatting_end_res = dict( + (name, re.compile(re.escape(end))) for name, (start, end) + in inline_formatting.iteritems() if end is not None +) + +without_end_tag = set(name for name, (_, end) in inline_formatting.iteritems() + if end is None) + + + +class StreamProcessor(object): + + def __init__(self, stream): + self._pushed = [] + self._stream = stream + + def __iter__(self): + return self + + def next(self): + if self._pushed: + return self._pushed.pop() + return self._stream.next() + + def push(self, token, data): + self._pushed.append((token, data)) + + def get_data(self, drop_needle=False): + result = [] + try: + while True: + token, data = self.next() + if token != 'text': + if not drop_needle: + self.push(token, data) + break + result.append(data) + except StopIteration: + pass + return ''.join(result) + + +class MarkupParser(object): + + def __init__(self, make_rel_url): + self.make_rel_url = make_rel_url + + def tokenize(self, text): + text = '\n'.join(text.splitlines()) + last_pos = 0 + pos = 0 + end = len(text) + stack = [] + text_buffer = [] + + while pos < end: + if stack: + m = formatting_end_res[stack[-1]].match(text, pos) + if m is not None: + if text_buffer: + yield 'text', ''.join(text_buffer) + del text_buffer[:] + yield stack[-1] + '_end', None + stack.pop() + pos = m.end() + continue + + m = formatting_start_re.match(text, pos) + if m is not None: + if text_buffer: + yield 'text', ''.join(text_buffer) + del text_buffer[:] + + for key, value in m.groupdict().iteritems(): + if value is not None: + if key in without_end_tag: + yield key, None + else: + if key in raw_formatting: + regex = formatting_end_res[key] + m2 = regex.search(text, m.end()) + if m2 is None: + yield key, text[m.end():] + else: + yield key, text[m.end():m2.start()] + m = m2 + else: + yield key + '_begin', None + stack.append(key) + break + + if m is None: + break + else: + pos = m.end() + continue + + text_buffer.append(text[pos]) + pos += 1 + + yield 'text', ''.join(text_buffer) + for token in reversed(stack): + yield token + '_end', None + + def stream_to_html(self, text): + stream = StreamProcessor(self.tokenize(text)) + paragraph = [] + result = [] + + def new_paragraph(): + result.append(paragraph[:]) + del paragraph[:] + + for token, data in stream: + if token in simple_formattings: + paragraph.append(simple_formattings[token]) + elif token in ('text', 'escaped_code', 'code'): + if data: + data = cgi.escape(data) + if token in ('escaped_code', 'code'): + data = '%s' % data + paragraph.append(data) + elif token == 'link': + if ' ' in data: + href, caption = data.split(' ', 1) + else: + href = caption = data + protocol = urlparse(href)[0] + nofollow = True + if not protocol: + href = self.make_rel_url(href) + nofollow = False + elif protocol == 'javascript': + href = href[11:] + paragraph.append('%s' % (cgi.escape(href), + ' rel="nofollow"' if nofollow else '', + cgi.escape(caption))) + elif token == 'code_block': + result.append(highlight_block(data, 'python')) + new_paragraph() + elif token == 'paragraph': + new_paragraph() + elif token == 'newline': + paragraph.append('
    ') + + if paragraph: + result.append(paragraph) + for item in result: + if isinstance(item, list): + if item: + yield '

    %s

    ' % ''.join(item) + else: + yield item + + def to_html(self, text): + return ''.join(self.stream_to_html(text)) + + +def markup(text, make_rel_url=lambda x: './' + x): + return MarkupParser(make_rel_url).to_html(text) diff --git a/sphinx/web/oldurls.py b/sphinx/web/oldurls.py new file mode 100644 index 000000000..441dc1152 --- /dev/null +++ b/sphinx/web/oldurls.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +""" + sphinx.web.oldurls + ~~~~~~~~~~~~~~~~~~ + + Handle old URLs gracefully. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +import re + +from .wsgiutil import RedirectResponse, NotFound + + +_module_re = re.compile(r'module-(.*)\.html') +_modobj_re = re.compile(r'(.*)-objects\.html') +_modsub_re = re.compile(r'(.*?)-(.*)\.html') + + +special_module_names = { + 'main': '__main__', + 'builtin': '__builtin__', + 'future': '__future__', + 'pycompile': 'py_compile', +} + +tutorial_nodes = [ + '', '', '', + 'appetite', + 'interpreter', + 'introduction', + 'controlflow', + 'datastructures', + 'modules', + 'inputoutput', + 'errors', + 'classes', + 'stdlib', + 'stdlib2', + 'whatnow', + 'interactive', + 'floatingpoint', + '', + 'glossary', +] + + +def handle_html_url(req, url): + def inner(): + # global special pages + if url.endswith('/contents.html'): + return 'contents/' + if url.endswith('/genindex.html'): + return 'genindex/' + if url.endswith('/about.html'): + return 'about/' + if url.endswith('/reporting-bugs.html'): + return 'bugs/' + if url == 'modindex.html' or url.endswith('/modindex.html'): + return 'modindex/' + # modules, macmodules + if url[:4] in ('lib/', 'mac/'): + p = '' if url[0] == 'l' else 'mac' + m = _module_re.match(url[4:]) + if m: + mn = m.group(1) + return p + 'modules/' + special_module_names.get(mn, mn) + # module sub-pages + m = _modsub_re.match(url[4:]) + if m and not _modobj_re.match(url[4:]): + mn = m.group(1) + return p + 'modules/' + special_module_names.get(mn, mn) + # XXX: handle all others + # tutorial + elif url[:4] == 'tut/': + try: + node = int(url[8:].partition('.html')[0]) + except ValueError: + pass + else: + if tutorial_nodes[node]: + return 'tutorial/' + tutorial_nodes[node] + # installing: all in one (ATM) + elif url[:5] == 'inst/': + return 'install/' + # no mapping for "documenting Python..." + # nothing found + raise NotFound() + return RedirectResponse('%s?oldurl=1' % inner()) diff --git a/sphinx/web/serve.py b/sphinx/web/serve.py new file mode 100644 index 000000000..b5ff12552 --- /dev/null +++ b/sphinx/web/serve.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +""" + sphinx.web.serve + ~~~~~~~~~~~~~~~~ + + This module optionally wraps the `wsgiref` module so that it reloads code + automatically. Works with any WSGI application but it won't help in non + `wsgiref` environments. Use it only for development. + + :copyright: 2007 by Armin Ronacher, Georg Brandl. + :license: Python license. +""" +import os +import sys +import time +import thread + + +def reloader_loop(extra_files): + """When this function is run from the main thread, it will force other + threads to exit when any modules currently loaded change. + + :param extra_files: a list of additional files it should watch. + """ + mtimes = {} + while True: + for filename in filter(None, [getattr(module, '__file__', None) + for module in sys.modules.values()] + + extra_files): + while not os.path.isfile(filename): + filename = os.path.dirname(filename) + if not filename: + break + if not filename: + continue + + if filename[-4:] in ('.pyc', '.pyo'): + filename = filename[:-1] + + mtime = os.stat(filename).st_mtime + if filename not in mtimes: + mtimes[filename] = mtime + continue + if mtime > mtimes[filename]: + sys.exit(3) + time.sleep(1) + + +def restart_with_reloader(): + """Spawn a new Python interpreter with the same arguments as this one, + but running the reloader thread.""" + while True: + print '* Restarting with reloader...' + args = [sys.executable] + sys.argv + if sys.platform == 'win32': + args = ['"%s"' % arg for arg in args] + new_environ = os.environ.copy() + new_environ['RUN_MAIN'] = 'true' + exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ) + if exit_code != 3: + return exit_code + + +def run_with_reloader(main_func, extra_watch): + """ + Run the given function in an independent python interpreter. + """ + if os.environ.get('RUN_MAIN') == 'true': + thread.start_new_thread(main_func, ()) + try: + reloader_loop(extra_watch) + except KeyboardInterrupt: + return + try: + sys.exit(restart_with_reloader()) + except KeyboardInterrupt: + pass + + +def run_simple(hostname, port, make_app, use_reloader=False, + extra_files=None): + """ + Start an application using wsgiref and with an optional reloader. + """ + from wsgiref.simple_server import make_server + def inner(): + application = make_app() + print '* Startup complete.' + srv = make_server(hostname, port, application) + try: + srv.serve_forever() + except KeyboardInterrupt: + pass + if os.environ.get('RUN_MAIN') != 'true': + print '* Running on http://%s:%d/' % (hostname, port) + if use_reloader: + run_with_reloader(inner, extra_files or []) + else: + inner() diff --git a/sphinx/web/userdb.py b/sphinx/web/userdb.py new file mode 100644 index 000000000..8ce0560df --- /dev/null +++ b/sphinx/web/userdb.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +""" + sphinx.web.userdb + ~~~~~~~~~~~~~~~~~ + + A module that provides pythonic access to the `docusers` file + that stores users and their passwords so that they can gain access + to the administration system. + + :copyright: 2007 by Armin Ronacher. + :license: Python license. +""" +from __future__ import with_statement +from os import path +from hashlib import sha1 +from random import choice, randrange +from collections import defaultdict + + +def gen_password(length=8, add_numbers=True, mix_case=True, + add_special_char=True): + """ + Generate a pronounceable password. + """ + if length <= 0: + raise ValueError('requested password of length <= 0') + consonants = 'bcdfghjklmnprstvwz' + vowels = 'aeiou' + if mix_case: + consonants = consonants * 2 + consonants.upper() + vowels = vowels * 2 + vowels.upper() + pw = ''.join([choice(consonants) + + choice(vowels) + + choice(consonants + vowels) for _ + in xrange(length // 3 + 1)])[:length] + if add_numbers: + n = length // 3 + if n > 0: + pw = pw[:-n] + for _ in xrange(n): + pw += choice('0123456789') + if add_special_char: + tmp = randrange(0, len(pw)) + l1 = pw[:tmp] + l2 = pw[tmp:] + if max(len(l1), len(l2)) == len(l1): + l1 = l1[:-1] + else: + l2 = l2[:-1] + return l1 + choice('#$&%?!') + l2 + return pw + + +class UserDatabase(object): + + def __init__(self, filename): + self.filename = filename + self.users = {} + self.privileges = defaultdict(set) + if path.exists(filename): + with file(filename) as f: + for line in f: + line = line.strip() + if line and line[0] != '#': + parts = line.split(':') + self.users[parts[0]] = parts[1] + self.privileges[parts[0]].update(x for x in + parts[2].split(',') + if x) + + def set_password(self, user, password): + """Encode the password for a user (also adds users).""" + self.users[user] = sha1('%s|%s' % (user, password)).hexdigest() + + def add_user(self, user): + """Add a new user and return the generated password.""" + pw = gen_password(8, add_special_char=False) + self.set_password(user, pw) + self.privileges[user].clear() + return pw + + def check_password(self, user, password): + return user in self.users and \ + self.users[user] == sha1('%s|%s' % (user, password)).hexdigest() + + def save(self): + with file(self.filename, 'w') as f: + for username, password in self.users.iteritems(): + privileges = ','.join(self.privileges.get(username, ())) + f.write('%s:%s:%s\n' % (username, password, privileges)) diff --git a/sphinx/web/util.py b/sphinx/web/util.py new file mode 100644 index 000000000..616c724e4 --- /dev/null +++ b/sphinx/web/util.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +""" + sphinx.web.util + ~~~~~~~~~~~~~~~ + + Miscellaneous utilities. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" +from __future__ import with_statement + +import re +from os import path + +from ..util import relative_uri +from .._jinja import Environment, FileSystemLoader + + +def get_target_uri(source_filename): + """Get the web-URI for a given reST file name.""" + if source_filename == 'index.rst': + return '' + if source_filename.endswith('/index.rst'): + return source_filename[:-9] # up to / + return source_filename[:-4] + '/' + + +# ------------------------------------------------------------------------------ +# Setup the templating environment + +templates_path = path.join(path.dirname(__file__), '..', 'templates') +jinja_env = Environment(loader=FileSystemLoader(templates_path, + use_memcache=True), + friendly_traceback=True) + +def do_datetime_format(): + def wrapped(env, ctx, value): + return value.strftime('%a, %d %b %Y %H:%M') + return wrapped + +jinja_env.filters['datetimeformat'] = do_datetime_format + + +_striptags_re = re.compile(r'(|<[^>]+>)') + +def striptags(text): + return ' '.join(_striptags_re.sub('', text).split()) + + +def render_template(req, template_name, *contexts): + context = {} + for ctx in contexts: + context.update(ctx) + tmpl = jinja_env.get_template(template_name) + + path = req.path.lstrip('/') + if not path[-1:] == '/': + path += '/' + def relative_path_to(otheruri, resource=False): + if not resource: + otheruri = get_target_uri(otheruri) + return relative_uri(path, otheruri) + context['pathto'] = relative_path_to + + # add it here a second time for templates that don't + # get the builder information from the environment (such as search) + context['builder'] = 'web' + context['req'] = req + + return tmpl.render(context) + + +def render_simple_template(template_name, context): + tmpl = jinja_env.get_template(template_name) + return tmpl.render(context) + + +class lazy_property(object): + """ + Descriptor implementing a "lazy property", i.e. the function + calculating the property value is called only once. + """ + + def __init__(self, func, name=None, doc=None): + self._func = func + self._name = name or func.func_name + self.__doc__ = doc or func.__doc__ + + def __get__(self, obj, objtype=None): + if obj is None: + return self + value = self._func(obj) + setattr(obj, self._name, value) + return value + + +class blackhole_dict(dict): + def __setitem__(self, key, value): + pass diff --git a/sphinx/web/webconf.py b/sphinx/web/webconf.py new file mode 100644 index 000000000..97377a878 --- /dev/null +++ b/sphinx/web/webconf.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +# +# Python documentation web application configuration file +# + +# Where the server listens. +listen_addr = 'localhost' +listen_port = 3000 + +# How patch mails are sent. +patch_mail_from = 'patches@localhost' +patch_mail_to = 'docs@localhost' +patch_mail_smtp = 'localhost' diff --git a/sphinx/web/wsgiutil.py b/sphinx/web/wsgiutil.py new file mode 100644 index 000000000..292863279 --- /dev/null +++ b/sphinx/web/wsgiutil.py @@ -0,0 +1,697 @@ +# -*- coding: utf-8 -*- +""" + sphinx.web.wsgiutil + ~~~~~~~~~~~~~~~~~~~ + + To avoid further dependencies this module collects some of the + classes werkzeug provides and use in other views. + + :copyright: 2007 by Armin Ronacher. + :license: Python license. +""" +from __future__ import with_statement + +import cgi +import urllib +import cPickle as pickle +import tempfile +from os import path +from time import gmtime, time, asctime +from random import random +from Cookie import SimpleCookie +from hashlib import sha1 +from datetime import datetime +from cStringIO import StringIO + +from .util import lazy_property +from .json import dump_json + + +HTTP_STATUS_CODES = { + 100: 'CONTINUE', + 101: 'SWITCHING PROTOCOLS', + 102: 'PROCESSING', + 200: 'OK', + 201: 'CREATED', + 202: 'ACCEPTED', + 203: 'NON-AUTHORITATIVE INFORMATION', + 204: 'NO CONTENT', + 205: 'RESET CONTENT', + 206: 'PARTIAL CONTENT', + 207: 'MULTI STATUS', + 300: 'MULTIPLE CHOICES', + 301: 'MOVED PERMANENTLY', + 302: 'FOUND', + 303: 'SEE OTHER', + 304: 'NOT MODIFIED', + 305: 'USE PROXY', + 306: 'RESERVED', + 307: 'TEMPORARY REDIRECT', + 400: 'BAD REQUEST', + 401: 'UNAUTHORIZED', + 402: 'PAYMENT REQUIRED', + 403: 'FORBIDDEN', + 404: 'NOT FOUND', + 405: 'METHOD NOT ALLOWED', + 406: 'NOT ACCEPTABLE', + 407: 'PROXY AUTHENTICATION REQUIRED', + 408: 'REQUEST TIMEOUT', + 409: 'CONFLICT', + 410: 'GONE', + 411: 'LENGTH REQUIRED', + 412: 'PRECONDITION FAILED', + 413: 'REQUEST ENTITY TOO LARGE', + 414: 'REQUEST-URI TOO LONG', + 415: 'UNSUPPORTED MEDIA TYPE', + 416: 'REQUESTED RANGE NOT SATISFIABLE', + 417: 'EXPECTATION FAILED', + 500: 'INTERNAL SERVER ERROR', + 501: 'NOT IMPLEMENTED', + 502: 'BAD GATEWAY', + 503: 'SERVICE UNAVAILABLE', + 504: 'GATEWAY TIMEOUT', + 505: 'HTTP VERSION NOT SUPPORTED', + 506: 'VARIANT ALSO VARIES', + 507: 'INSUFFICIENT STORAGE', + 510: 'NOT EXTENDED' +} + +SID_COOKIE_NAME = 'python_doc_sid' + + +# ------------------------------------------------------------------------------ +# Support for HTTP parameter parsing, requests and responses + + +class _StorageHelper(cgi.FieldStorage): + """ + Helper class used by `Request` to parse submitted file and + form data. Don't use this class directly. + """ + + FieldStorageClass = cgi.FieldStorage + + def __init__(self, environ, get_stream): + cgi.FieldStorage.__init__(self, + fp=environ['wsgi.input'], + environ={ + 'REQUEST_METHOD': environ['REQUEST_METHOD'], + 'CONTENT_TYPE': environ['CONTENT_TYPE'], + 'CONTENT_LENGTH': environ['CONTENT_LENGTH'] + }, + keep_blank_values=True + ) + self.get_stream = get_stream + + def make_file(self, binary=None): + return self.get_stream() + + +class MultiDict(dict): + """ + A dict that takes a list of multiple values as only argument + in order to store multiple values per key. + """ + + def __init__(self, mapping=()): + if isinstance(mapping, MultiDict): + dict.__init__(self, mapping.lists()) + elif isinstance(mapping, dict): + tmp = {} + for key, value in mapping: + tmp[key] = [value] + dict.__init__(self, tmp) + else: + tmp = {} + for key, value in mapping: + tmp.setdefault(key, []).append(value) + dict.__init__(self, tmp) + + def __getitem__(self, key): + """ + Return the first data value for this key; + raises KeyError if not found. + """ + return dict.__getitem__(self, key)[0] + + def __setitem__(self, key, value): + """Set an item as list.""" + dict.__setitem__(self, key, [value]) + + def get(self, key, default=None): + """Return the default value if the requested data doesn't exist""" + try: + return self[key] + except KeyError: + return default + + def getlist(self, key): + """Return an empty list if the requested data doesn't exist""" + try: + return dict.__getitem__(self, key) + except KeyError: + return [] + + def setlist(self, key, new_list): + """Set new values for an key.""" + dict.__setitem__(self, key, list(new_list)) + + def setdefault(self, key, default=None): + if key not in self: + self[key] = default + else: + default = self[key] + return default + + def setlistdefault(self, key, default_list=()): + if key not in self: + default_list = list(default_list) + dict.__setitem__(self, key, default_list) + else: + default_list = self.getlist(key) + return default_list + + def items(self): + """ + Return a list of (key, value) pairs, where value is the last item in + the list associated with the key. + """ + return [(key, self[key]) for key in self.iterkeys()] + + lists = dict.items + + def values(self): + """Returns a list of the last value on every key list.""" + return [self[key] for key in self.iterkeys()] + + listvalues = dict.values + + def iteritems(self): + for key, values in dict.iteritems(self): + yield key, values[0] + + iterlists = dict.iteritems + + def itervalues(self): + for values in dict.itervalues(self): + yield values[0] + + iterlistvalues = dict.itervalues + + def copy(self): + """Return a shallow copy of this object.""" + return self.__class__(self) + + def update(self, other_dict): + """update() extends rather than replaces existing key lists.""" + if isinstance(other_dict, MultiDict): + for key, value_list in other_dict.iterlists(): + self.setlistdefault(key, []).extend(value_list) + elif isinstance(other_dict, dict): + for key, value in other_dict.items(): + self.setlistdefault(key, []).append(value) + else: + for key, value in other_dict: + self.setlistdefault(key, []).append(value) + + def pop(self, *args): + """Pop the first item for a list on the dict.""" + return dict.pop(self, *args)[0] + + def popitem(self): + """Pop an item from the dict.""" + item = dict.popitem(self) + return (item[0], item[1][0]) + + poplist = dict.pop + popitemlist = dict.popitem + + def __repr__(self): + tmp = [] + for key, values in self.iterlists(): + for value in values: + tmp.append((key, value)) + return '%s(%r)' % (self.__class__.__name__, tmp) + + +class Headers(object): + """ + An object that stores some headers. + """ + + def __init__(self, defaults=None): + self._list = [] + if isinstance(defaults, dict): + for key, value in defaults.iteritems(): + if isinstance(value, (tuple, list)): + for v in value: + self._list.append((key, v)) + else: + self._list.append((key, value)) + elif defaults is not None: + for key, value in defaults: + self._list.append((key, value)) + + def __getitem__(self, key): + ikey = key.lower() + for k, v in self._list: + if k.lower() == ikey: + return v + raise KeyError(key) + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def getlist(self, key): + ikey = key.lower() + result = [] + for k, v in self._list: + if k.lower() == ikey: + result.append((k, v)) + return result + + def setlist(self, key, values): + del self[key] + self.addlist(key, values) + + def addlist(self, key, values): + self._list.extend(values) + + def lists(self, lowercased=False): + if not lowercased: + return self._list[:] + return [(x.lower(), y) for x, y in self._list] + + def iterlists(self, lowercased=False): + for key, value in self._list: + if lowercased: + key = key.lower() + yield key, value + + def iterkeys(self): + for key, _ in self.iterlists(): + yield key + + def itervalues(self): + for _, value in self.iterlists(): + yield value + + def keys(self): + return list(self.iterkeys()) + + def values(self): + return list(self.itervalues()) + + def __delitem__(self, key): + key = key.lower() + new = [] + for k, v in self._list: + if k != key: + new.append((k, v)) + self._list[:] = new + + remove = __delitem__ + + def __contains__(self, key): + key = key.lower() + for k, v in self._list: + if k.lower() == key: + return True + return False + + has_key = __contains__ + + def __iter__(self): + return iter(self._list) + + def add(self, key, value): + """add a new header tuple to the list""" + self._list.append((key, value)) + + def clear(self): + """clears all headers""" + del self._list[:] + + def set(self, key, value): + """remove all header tuples for key and add + a new one + """ + del self[key] + self.add(key, value) + + __setitem__ = set + + def to_list(self, charset): + """Create a str only list of the headers.""" + result = [] + for k, v in self: + if isinstance(v, unicode): + v = v.encode(charset) + else: + v = str(v) + result.append((k, v)) + return result + + def copy(self): + return self.__class__(self._list) + + def __repr__(self): + return '%s(%r)' % ( + self.__class__.__name__, + self._list + ) + + +class Session(dict): + + def __init__(self, sid): + self.sid = sid + if sid is not None: + if path.exists(self.filename): + with file(self.filename, 'rb') as f: + self.update(pickle.load(f)) + self._orig = dict(self) + + @property + def filename(self): + if self.sid is not None: + return path.join(tempfile.gettempdir(), '__pydoc_sess' + self.sid) + + @property + def worth_saving(self): + return self != self._orig + + def save(self): + if self.sid is None: + self.sid = sha1('%s|%s' % (time(), random())).hexdigest() + with file(self.filename, 'wb') as f: + pickle.dump(dict(self), f, pickle.HIGHEST_PROTOCOL) + self._orig = dict(self) + + +class Request(object): + charset = 'utf-8' + + def __init__(self, environ): + self.environ = environ + self.environ['werkzeug.request'] = self + self.session = Session(self.cookies.get(SID_COOKIE_NAME)) + self.user = self.session.get('user') + + def login(self, user): + self.user = self.session['user'] = user + + def logout(self): + self.user = None + self.session.pop('user', None) + + def _get_file_stream(self): + """Called to get a stream for the file upload. + + This must provide a file-like class with `read()`, `readline()` + and `seek()` methods that is both writeable and readable.""" + return tempfile.TemporaryFile('w+b') + + def _load_post_data(self): + """Method used internally to retrieve submitted data.""" + self._data = '' + post = [] + files = [] + if self.environ['REQUEST_METHOD'] in ('POST', 'PUT'): + storage = _StorageHelper(self.environ, self._get_file_stream) + for key in storage.keys(): + values = storage[key] + if not isinstance(values, list): + values = [values] + for item in values: + if getattr(item, 'filename', None) is not None: + fn = item.filename.decode(self.charset, 'ignore') + # fix stupid IE bug + if len(fn) > 1 and fn[1] == ':' and '\\' in fn: + fn = fn[fn.index('\\') + 1:] + files.append((key, FileStorage(key, fn, item.type, + item.length, item.file))) + else: + post.append((key, item.value.decode(self.charset, + 'ignore'))) + self._form = MultiDict(post) + self._files = MultiDict(files) + + def read(self, *args): + if not hasattr(self, '_buffered_stream'): + self._buffered_stream = StringIO(self.data) + return self._buffered_stream.read(*args) + + def readline(self, *args): + if not hasattr(self, '_buffered_stream'): + self._buffered_stream = StringIO(self.data) + return self._buffered_stream.readline(*args) + + def make_external_url(self, path): + url = self.environ['wsgi.url_scheme'] + '://' + if 'HTTP_HOST' in self.environ: + url += self.environ['HTTP_HOST'] + else: + url += self.environ['SERVER_NAME'] + if (self.environ['wsgi.url_scheme'], self.environ['SERVER_PORT']) not \ + in (('https', '443'), ('http', '80')): + url += ':' + self.environ['SERVER_PORT'] + + url += urllib.quote(self.environ.get('SCRIPT_INFO', '').rstrip('/')) + if not path.startswith('/'): + path = '/' + path + return url + path + + def args(self): + """URL parameters""" + items = [] + qs = self.environ.get('QUERY_STRING', '') + for key, values in cgi.parse_qs(qs, True).iteritems(): + for value in values: + value = value.decode(self.charset, 'ignore') + items.append((key, value)) + return MultiDict(items) + args = lazy_property(args) + + def data(self): + """raw value of input stream.""" + if not hasattr(self, '_data'): + self._load_post_data() + return self._data + data = lazy_property(data) + + def form(self): + """form parameters.""" + if not hasattr(self, '_form'): + self._load_post_data() + return self._form + form = lazy_property(form) + + def files(self): + """File uploads.""" + if not hasattr(self, '_files'): + self._load_post_data() + return self._files + files = lazy_property(files) + + def cookies(self): + """Stored Cookies.""" + cookie = SimpleCookie() + cookie.load(self.environ.get('HTTP_COOKIE', '')) + result = {} + for key, value in cookie.iteritems(): + result[key] = value.value.decode(self.charset, 'ignore') + return result + cookies = lazy_property(cookies) + + def method(self): + """Request method.""" + return self.environ['REQUEST_METHOD'] + method = property(method, doc=method.__doc__) + + def path(self): + """Requested path.""" + path = '/' + (self.environ.get('PATH_INFO') or '').lstrip('/') + path = path.decode(self.charset, self.charset) + parts = path.replace('+', ' ').split('/') + return u'/'.join(p for p in parts if p != '..') + path = lazy_property(path) + + +class Response(object): + charset = 'utf-8' + default_mimetype = 'text/html' + + def __init__(self, response=None, headers=None, status=200, mimetype=None): + if response is None: + self.response = [] + elif isinstance(response, basestring): + self.response = [response] + else: + self.response = iter(response) + if not headers: + self.headers = Headers() + elif isinstance(headers, Headers): + self.headers = headers + else: + self.headers = Headers(headers) + if mimetype is None and 'Content-Type' not in self.headers: + mimetype = self.default_mimetype + if mimetype is not None: + if 'charset=' not in mimetype and mimetype.startswith('text/'): + mimetype += '; charset=' + self.charset + self.headers['Content-Type'] = mimetype + self.status = status + self._cookies = None + + def write(self, value): + if not isinstance(self.response, list): + raise RuntimeError('cannot write to streaming response') + self.write = self.response.append + self.response.append(value) + + def set_cookie(self, key, value='', max_age=None, expires=None, + path='/', domain=None, secure=None): + if self._cookies is None: + self._cookies = SimpleCookie() + if isinstance(value, unicode): + value = value.encode(self.charset) + self._cookies[key] = value + if max_age is not None: + self._cookies[key]['max-age'] = max_age + if expires is not None: + if isinstance(expires, basestring): + self._cookies[key]['expires'] = expires + expires = None + elif isinstance(expires, datetime): + expires = expires.utctimetuple() + elif not isinstance(expires, (int, long)): + expires = gmtime(expires) + else: + raise ValueError('datetime or integer required') + month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', + 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][expires.tm_mon - 1] + day = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', + 'Friday', 'Saturday', 'Sunday'][expires.tm_wday] + date = '%02d-%s-%s' % ( + expires.tm_mday, month, str(expires.tm_year)[-2:] + ) + d = '%s, %s %02d:%02d:%02d GMT' % (day, date, expires.tm_hour, + expires.tm_min, expires.tm_sec) + self._cookies[key]['expires'] = d + if path is not None: + self._cookies[key]['path'] = path + if domain is not None: + self._cookies[key]['domain'] = domain + if secure is not None: + self._cookies[key]['secure'] = secure + + def delete_cookie(self, key): + if self._cookies is None: + self._cookies = SimpleCookie() + if key not in self._cookies: + self._cookies[key] = '' + self._cookies[key]['max-age'] = 0 + + def __call__(self, environ, start_response): + req = environ['werkzeug.request'] + if req.session.worth_saving: + req.session.save() + self.set_cookie(SID_COOKIE_NAME, req.session.sid) + + headers = self.headers.to_list(self.charset) + if self._cookies is not None: + for morsel in self._cookies.values(): + headers.append(('Set-Cookie', morsel.output(header=''))) + status = '%d %s' % (self.status, HTTP_STATUS_CODES[self.status]) + + charset = self.charset or 'ascii' + start_response(status, headers) + for item in self.response: + if isinstance(item, unicode): + yield item.encode(charset) + else: + yield str(item) + +def get_base_uri(environ): + url = environ['wsgi.url_scheme'] + '://' + if 'HTTP_HOST' in environ: + url += environ['HTTP_HOST'] + else: + url += environ['SERVER_NAME'] + if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \ + in (('https', '443'), ('http', '80')): + url += ':' + environ['SERVER_PORT'] + url += urllib.quote(environ.get('SCRIPT_INFO', '').rstrip('/')) + return url + + +class RedirectResponse(Response): + + def __init__(self, target_url, code=302): + if not target_url.startswith('/'): + target_url = '/' + target_url + self.target_url = target_url + super(RedirectResponse, self).__init__('Moved...', status=code) + + def __call__(self, environ, start_response): + url = get_base_uri(environ) + self.target_url + self.headers['Location'] = url + return super(RedirectResponse, self).__call__(environ, start_response) + + +class JSONResponse(Response): + + def __init__(self, data): + assert not isinstance(data, list), 'list unsafe for json dumping' + super(JSONResponse, self).__init__(dump_json(data), mimetype='text/javascript') + + +class SharedDataMiddleware(object): + """ + Redirects calls to an folder with static data. + """ + + def __init__(self, app, exports): + self.app = app + self.exports = exports + self.cache = {} + + def serve_file(self, filename, start_response): + from mimetypes import guess_type + guessed_type = guess_type(filename) + mime_type = guessed_type[0] or 'text/plain' + expiry = time() + 3600 # one hour + expiry = asctime(gmtime(expiry)) + start_response('200 OK', [('Content-Type', mime_type), + ('Cache-Control', 'public'), + ('Expires', expiry)]) + with open(filename, 'rb') as f: + return [f.read()] + + def __call__(self, environ, start_response): + p = environ.get('PATH_INFO', '') + if p in self.cache: + return self.serve_file(self.cache[p], start_response) + for search_path, file_path in self.exports.iteritems(): + if not search_path.endswith('/'): + search_path += '/' + if p.startswith(search_path): + real_path = path.join(file_path, p[len(search_path):]) + if path.exists(real_path) and path.isfile(real_path): + self.cache[p] = real_path + return self.serve_file(real_path, start_response) + return self.app(environ, start_response) + + +class NotFound(Exception): + """ + Raise to display the 404 error page. + """ + + def __init__(self, show_keyword_matches=False): + self.show_keyword_matches = show_keyword_matches + Exception.__init__(self, show_keyword_matches) diff --git a/sphinx/writer.py b/sphinx/writer.py new file mode 100644 index 000000000..cd081af52 --- /dev/null +++ b/sphinx/writer.py @@ -0,0 +1,229 @@ +# -*- coding: utf-8 -*- +""" + sphinx.writer + ~~~~~~~~~~~~~ + + docutils writers handling Sphinx' custom nodes. + + :copyright: 2007 by Georg Brandl. + :license: Python license. +""" + +from docutils import nodes +from docutils.writers.html4css1 import Writer, HTMLTranslator as BaseTranslator + +from .smartypants import sphinx_smarty_pants + + +class HTMLWriter(Writer): + def __init__(self, config): + Writer.__init__(self) + if config.get('use_smartypants', False): + self.translator_class = SmartyPantsHTMLTranslator + else: + self.translator_class = HTMLTranslator + + +version_text = { + 'deprecated': 'Deprecated in version %s', + 'versionchanged': 'Changed in version %s', + 'versionadded': 'New in version %s', +} + +class HTMLTranslator(BaseTranslator): + """ + Our custom HTML translator. + """ + + def __init__(self, *args, **kwds): + self.no_smarty = 0 + BaseTranslator.__init__(self, *args, **kwds) + self.highlightlang = 'python' + + def visit_desc(self, node): + self.body.append(self.starttag(node, 'dl', CLASS=node['desctype'])) + def depart_desc(self, node): + self.body.append('\n\n') + + def visit_desc_signature(self, node): + # the id is set automatically + self.body.append(self.starttag(node, 'dt')) + # anchor for per-desc interactive data + if node.parent['desctype'] != 'describe' and node['ids'] and node['first']: + self.body.append('' % node['ids'][0]) + if node.parent['desctype'] in ('class', 'exception'): + self.body.append('%s ' % node.parent['desctype']) + def depart_desc_signature(self, node): + self.body.append('\n') + + def visit_desc_classname(self, node): + self.body.append(self.starttag(node, 'tt', '', CLASS='descclassname')) + def depart_desc_classname(self, node): + self.body.append('
    ') + + def visit_desc_name(self, node): + self.body.append(self.starttag(node, 'tt', '', CLASS='descname')) + def depart_desc_name(self, node): + self.body.append('
    ') + + def visit_desc_parameterlist(self, node): + self.body.append('(') + self.first_param = 1 + def depart_desc_parameterlist(self, node): + self.body.append(')') + + def visit_desc_parameter(self, node): + if not self.first_param: + self.body.append(', ') + else: + self.first_param = 0 + if not node.hasattr('noemph'): + self.body.append('') + def depart_desc_parameter(self, node): + if not node.hasattr('noemph'): + self.body.append('') + + def visit_desc_optional(self, node): + self.body.append('[') + def depart_desc_optional(self, node): + self.body.append(']') + + def visit_desc_content(self, node): + self.body.append(self.starttag(node, 'dd', '')) + def depart_desc_content(self, node): + self.body.append('') + + def visit_refcount(self, node): + self.body.append(self.starttag(node, 'em', '', CLASS='refcount')) + def depart_refcount(self, node): + self.body.append('
    ') + + def visit_versionmodified(self, node): + self.body.append(self.starttag(node, 'p')) + text = version_text[node['type']] % node['version'] + if len(node): + text += ': ' + else: + text += '.' + self.body.append('%s' % text) + def depart_versionmodified(self, node): + self.body.append('

    \n') + + # overwritten -- we don't want source comments to show up in the HTML + def visit_comment(self, node): + raise nodes.SkipNode + + # overwritten + def visit_admonition(self, node, name=''): + self.body.append(self.start_tag_with_title( + node, 'div', CLASS=('admonition ' + name))) + if name and name != 'seealso': + node.insert(0, nodes.title(name, self.language.labels[name])) + self.set_first_last(node) + + def visit_seealso(self, node): + self.visit_admonition(node, 'seealso') + def depart_seealso(self, node): + self.depart_admonition(node) + + # overwritten + def visit_title(self, node, move_ids=1): + # if we have a section we do our own processing in order + # to have ids in the hN-tags and not in additional a-tags + if isinstance(node.parent, nodes.section): + h_level = self.section_level + self.initial_header_level - 1 + if node.parent.get('ids'): + attrs = {'ids': node.parent['ids']} + else: + attrs = {} + self.body.append(self.starttag(node, 'h%d' % h_level, '', **attrs)) + self.context.append('\n' % h_level) + else: + BaseTranslator.visit_title(self, node, move_ids) + + # overwritten + def visit_literal_block(self, node): + from .highlighting import highlight_block + self.body.append(highlight_block(node.rawsource, self.highlightlang)) + raise nodes.SkipNode + + def visit_productionlist(self, node): + self.body.append(self.starttag(node, 'pre')) + names = [] + for production in node: + names.append(production['tokenname']) + maxlen = max(len(name) for name in names) + for production in node: + if production['tokenname']: + self.body.append(self.starttag(production, 'strong', '')) + self.body.append(production['tokenname'].ljust(maxlen) + + '
    ::= ') + lastname = production['tokenname'] + else: + self.body.append('%s ' % (' '*len(lastname))) + production.walkabout(self) + self.body.append('\n') + self.body.append('\n') + raise nodes.SkipNode + def depart_productionlist(self, node): + pass + + def visit_production(self, node): + pass + def depart_production(self, node): + pass + + def visit_centered(self, node): + self.body.append(self.starttag(node, 'center') + '') + def depart_centered(self, node): + self.body.append('') + + def visit_compact_paragraph(self, node): + pass + def depart_compact_paragraph(self, node): + pass + + def visit_highlightlang(self, node): + self.highlightlang = node['lang'] + def depart_highlightlang(self, node): + pass + + def visit_toctree(self, node): + # this only happens when formatting a toc from env.tocs -- in this + # case we don't want to include the subtree + raise nodes.SkipNode + + def visit_index(self, node): + raise nodes.SkipNode + + +class SmartyPantsHTMLTranslator(HTMLTranslator): + """ + Handle ordinary text via smartypants, converting quotes and dashes + to the correct entities. + """ + + def __init__(self, *args, **kwds): + self.no_smarty = 0 + HTMLTranslator.__init__(self, *args, **kwds) + + def visit_literal(self, node): + self.no_smarty += 1 + try: + # this raises SkipNode + HTMLTranslator.visit_literal(self, node) + finally: + self.no_smarty -= 1 + + def visit_productionlist(self, node): + self.no_smarty += 1 + try: + HTMLTranslator.visit_productionlist(self, node) + finally: + self.no_smarty -= 1 + + def encode(self, text): + text = HTMLTranslator.encode(self, text) + if self.no_smarty <= 0: + text = sphinx_smarty_pants(text) + return text diff --git a/utils/check_sources.py b/utils/check_sources.py new file mode 100755 index 000000000..761f05ae0 --- /dev/null +++ b/utils/check_sources.py @@ -0,0 +1,241 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" + Checker for file headers + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Make sure each Python file has a correct file header + including copyright and license information. + + :copyright: 2006-2007 by Georg Brandl. + :license: GNU GPL, see LICENSE for more details. +""" + +import sys, os, re +import getopt +import cStringIO +from os.path import join, splitext, abspath + + +checkers = {} + +def checker(*suffixes, **kwds): + only_pkg = kwds.pop('only_pkg', False) + def deco(func): + for suffix in suffixes: + checkers.setdefault(suffix, []).append(func) + func.only_pkg = only_pkg + return func + return deco + + +name_mail_re = r'[\w ]+(<.*?>)?' +copyright_re = re.compile(r'^ :copyright: 200\d(-200\d)? by %s(, %s)*[,.]$' % + (name_mail_re, name_mail_re)) +license_re = re.compile(r" :license: (.*?).\n") +copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' % + (name_mail_re, name_mail_re)) +coding_re = re.compile(r'coding[:=]\s*([-\w.]+)') +not_ix_re = re.compile(r'\bnot\s+\S+?\s+i[sn]\s\S+') +is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b') + +misspellings = ["developement", "adress", "verificate", # ALLOW-MISSPELLING + "informations"] # ALLOW-MISSPELLING + + +@checker('.py') +def check_syntax(fn, lines): + try: + compile(''.join(lines), fn, "exec") + except SyntaxError, err: + yield 0, "not compilable: %s" % err + + +@checker('.py') +def check_style_and_encoding(fn, lines): + encoding = 'ascii' + for lno, line in enumerate(lines): + if len(line) > 90: + yield lno+1, "line too long" + m = not_ix_re.search(line) + if m: + yield lno+1, '"' + m.group() + '"' + if is_const_re.search(line): + yield lno+1, 'using == None/True/False' + if lno < 2: + co = coding_re.search(line) + if co: + encoding = co.group(1) + try: + line.decode(encoding) + except UnicodeDecodeError, err: + yield lno+1, "not decodable: %s\n Line: %r" % (err, line) + except LookupError, err: + yield 0, "unknown encoding: %s" % encoding + encoding = 'latin1' + + +@checker('.py', only_pkg=True) +def check_fileheader(fn, lines): + # line number correction + c = 1 + if lines[0:1] == ['#!/usr/bin/env python\n']: + lines = lines[1:] + c = 2 + + llist = [] + docopen = False + for lno, l in enumerate(lines): + llist.append(l) + if lno == 0: + if l == '# -*- coding: rot13 -*-\n': + # special-case pony package + return + elif l != '# -*- coding: utf-8 -*-\n': + yield 1, "missing coding declaration" + elif lno == 1: + if l != '"""\n' and l != 'r"""\n': + yield 2, 'missing docstring begin (""")' + else: + docopen = True + elif docopen: + if l == '"""\n': + # end of docstring + if lno <= 4: + yield lno+c, "missing module name in docstring" + break + + if l != "\n" and l[:4] != ' ' and docopen: + yield lno+c, "missing correct docstring indentation" + + if lno == 2: + # if not in package, don't check the module name + modname = fn[:-3].replace('/', '.').replace('.__init__', '') + while modname: + if l.lower()[4:-1] == modname: + break + modname = '.'.join(modname.split('.')[1:]) + else: + yield 3, "wrong module name in docstring heading" + modnamelen = len(l.strip()) + elif lno == 3: + if l.strip() != modnamelen * "~": + yield 4, "wrong module name underline, should be ~~~...~" + + else: + yield 0, "missing end and/or start of docstring..." + + # check for copyright and license fields + license = llist[-2:-1] + if not license or not license_re.match(license[0]): + yield 0, "no correct license info" + + ci = -3 + copyright = llist[ci:ci+1] + while copyright and copyright_2_re.match(copyright[0]): + ci -= 1 + copyright = llist[ci:ci+1] + if not copyright or not copyright_re.match(copyright[0]): + yield 0, "no correct copyright info" + + +@checker('.py', '.html', '.js') +def check_whitespace_and_spelling(fn, lines): + for lno, line in enumerate(lines): + if "\t" in line: + yield lno+1, "OMG TABS!!!1 " + if line[:-1].rstrip(' \t') != line[:-1]: + yield lno+1, "trailing whitespace" + for word in misspellings: + if word in line and 'ALLOW-MISSPELLING' not in line: + yield lno+1, '"%s" used' % word + + +bad_tags = ('', '', '', '', '' + '
    ', '', '', '>out, "%s:%d: %s" % (fn, lno, msg) + num += 1 + if verbose: + print + if num == 0: + print "No errors found." + else: + print out.getvalue().rstrip('\n') + print "%d error%s found." % (num, num > 1 and "s" or "") + return int(num > 0) + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/utils/pylintrc b/utils/pylintrc new file mode 100644 index 000000000..aa04e12e5 --- /dev/null +++ b/utils/pylintrc @@ -0,0 +1,301 @@ +# lint Python modules using external checkers. +# +# This is the main checker controling the other ones and the reports +# generation. It is itself both a raw checker and an astng checker in order +# to: +# * handle message activation / deactivation at the module level +# * handle some basic but necessary stats'data (number of classes, methods...) +# +[MASTER] + +# Specify a configuration file. +#rcfile= + +# Profiled execution. +profile=no + +# Add to the black list. It should be a base name, not a +# path. You may set this option multiple times. +ignore=.svn + +# Pickle collected data for later comparisons. +persistent=yes + +# Set the cache size for astng objects. +cache-size=500 + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + + +[MESSAGES CONTROL] + +# Enable only checker(s) with the given id(s). This option conflict with the +# disable-checker option +#enable-checker= + +# Enable all checker(s) except those with the given id(s). This option conflict +# with the disable-checker option +#disable-checker= + +# Enable all messages in the listed categories. +#enable-msg-cat= + +# Disable all messages in the listed categories. +#disable-msg-cat= + +# Enable the message(s) with the given id(s). +#enable-msg= + +# Disable the message(s) with the given id(s). +disable-msg=C0323,W0142,C0301,C0103,C0111,E0213,C0302,C0203,W0703,R0201 + + +[REPORTS] + +# set the output format. Available formats are text, parseable, colorized and +# html +output-format=colorized + +# Include message's id in output +include-ids=yes + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". +files-output=no + +# Tells wether to display a full report or only the messages +reports=yes + +# Python expression which should return a note less than 10 (10 is the highest +# note).You have access to the variables errors warning, statement which +# respectivly contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (R0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Add a comment according to your evaluation note. This is used by the global +# evaluation report (R0004). +comment=no + +# Enable the report(s) with the given id(s). +#enable-report= + +# Disable the report(s) with the given id(s). +#disable-report= + + +# checks for +# * unused variables / imports +# * undefined variables +# * redefinition of variable from builtins or from an outer scope +# * use of variable before assigment +# +[VARIABLES] + +# Tells wether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching names used for dummy variables (i.e. not used). +dummy-variables-rgx=_|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + + +# try to find bugs in the code using type inference +# +[TYPECHECK] + +# Tells wether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# When zope mode is activated, consider the acquired-members option to ignore +# access to some undefined attributes. +zope=no + +# List of members which are usually get through zope's acquisition mecanism and +# so shouldn't trigger E0201 when accessed (need zope=yes to be considered). +acquired-members=REQUEST,acl_users,aq_parent + + +# checks for : +# * doc strings +# * modules / classes / functions / methods / arguments / variables name +# * number of arguments, local variables, branchs, returns and statements in +# functions, methods +# * required module attributes +# * dangerous default values as arguments +# * redefinition of function / method / class +# * uses of the global statement +# +[BASIC] + +# Required attributes for module, separated by a comma +required-attributes= + +# Regular expression which should only match functions or classes name which do +# not require a docstring +no-docstring-rgx=__.*__ + +# Regular expression which should only match correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression which should only match correct module level names +const-rgx=(([A-Z_][A-Z1-9_]*)|(__.*__))$ + +# Regular expression which should only match correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression which should only match correct function names +function-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct method names +method-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct instance attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct list comprehension / +# generator expression variable names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# List of builtins function names that should not be used, separated by a comma +bad-functions=apply,input + + +# checks for sign of poor/misdesign: +# * number of methods, attributes, local variables... +# * size, complexity of functions, methods +# +[DESIGN] + +# Maximum number of arguments for function / method +max-args=12 + +# Maximum number of locals for function / method body +max-locals=30 + +# Maximum number of return / yield for function / method body +max-returns=12 + +# Maximum number of branch for function / method body +max-branchs=30 + +# Maximum number of statements in function / method body +max-statements=60 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=20 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=0 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + + +# checks for +# * external modules dependencies +# * relative / wildcard imports +# * cyclic imports +# * uses of deprecated modules +# +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,string,TERMIOS,Bastion,rexec + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report R0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report R0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report R0402 must +# not be disabled) +int-import-graph= + + +# checks for : +# * methods without self as first argument +# * overridden methods signature +# * access only to existant members via self +# * attributes not defined in the __init__ method +# * supported interfaces implementation +# * unreachable code +# +[CLASSES] + +# List of interface methods to ignore, separated by a comma. This is used for +# instance to not check methods defines in Zope's Interface base class. +ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + + +# checks for similarities and duplicated code. This computation may be +# memory / CPU intensive, so you should disable it if you experiments some +# problems. +# +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=10 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + + +# checks for: +# * warning notes in the code like FIXME, XXX +# * PEP 263: source code with non ascii character but no encoding declaration +# +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +# checks for : +# * unauthorized constructions +# * strict indentation +# * line length +# * use of <> instead of != +# +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=90 + +# Maximum number of lines in a module +max-module-lines=1000 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' diff --git a/utils/reindent.py b/utils/reindent.py new file mode 100755 index 000000000..e6ee82872 --- /dev/null +++ b/utils/reindent.py @@ -0,0 +1,291 @@ +#! /usr/bin/env python + +# Released to the public domain, by Tim Peters, 03 October 2000. +# -B option added by Georg Brandl, 2006. + +"""reindent [-d][-r][-v] [ path ... ] + +-d (--dryrun) Dry run. Analyze, but don't make any changes to files. +-r (--recurse) Recurse. Search for all .py files in subdirectories too. +-B (--no-backup) Don't write .bak backup files. +-v (--verbose) Verbose. Print informative msgs; else only names of changed files. +-h (--help) Help. Print this usage information and exit. + +Change Python (.py) files to use 4-space indents and no hard tab characters. +Also trim excess spaces and tabs from ends of lines, and remove empty lines +at the end of files. Also ensure the last line ends with a newline. + +If no paths are given on the command line, reindent operates as a filter, +reading a single source file from standard input and writing the transformed +source to standard output. In this case, the -d, -r and -v flags are +ignored. + +You can pass one or more file and/or directory paths. When a directory +path, all .py files within the directory will be examined, and, if the -r +option is given, likewise recursively for subdirectories. + +If output is not to standard output, reindent overwrites files in place, +renaming the originals with a .bak extension. If it finds nothing to +change, the file is left alone. If reindent does change a file, the changed +file is a fixed-point for future runs (i.e., running reindent on the +resulting .py file won't change it again). + +The hard part of reindenting is figuring out what to do with comment +lines. So long as the input files get a clean bill of health from +tabnanny.py, reindent should do a good job. +""" + +__version__ = "1" + +import tokenize +import os +import sys + +verbose = 0 +recurse = 0 +dryrun = 0 +no_backup = 0 + +def usage(msg=None): + if msg is not None: + print >> sys.stderr, msg + print >> sys.stderr, __doc__ + +def errprint(*args): + sep = "" + for arg in args: + sys.stderr.write(sep + str(arg)) + sep = " " + sys.stderr.write("\n") + +def main(): + import getopt + global verbose, recurse, dryrun, no_backup + + try: + opts, args = getopt.getopt(sys.argv[1:], "drvhB", + ["dryrun", "recurse", "verbose", "help", + "no-backup"]) + except getopt.error, msg: + usage(msg) + return + for o, a in opts: + if o in ('-d', '--dryrun'): + dryrun += 1 + elif o in ('-r', '--recurse'): + recurse += 1 + elif o in ('-v', '--verbose'): + verbose += 1 + elif o in ('-B', '--no-backup'): + no_backup += 1 + elif o in ('-h', '--help'): + usage() + return + if not args: + r = Reindenter(sys.stdin) + r.run() + r.write(sys.stdout) + return + for arg in args: + check(arg) + +def check(file): + if os.path.isdir(file) and not os.path.islink(file): + if verbose: + print "listing directory", file + names = os.listdir(file) + for name in names: + fullname = os.path.join(file, name) + if ((recurse and os.path.isdir(fullname) and + not os.path.islink(fullname)) + or name.lower().endswith(".py")): + check(fullname) + return + + if verbose: + print "checking", file, "...", + try: + f = open(file) + except IOError, msg: + errprint("%s: I/O Error: %s" % (file, str(msg))) + return + + r = Reindenter(f) + f.close() + if r.run(): + if verbose: + print "changed." + if dryrun: + print "But this is a dry run, so leaving it alone." + else: + print "reindented", file, (dryrun and "(dry run => not really)" or "") + if not dryrun: + if not no_backup: + bak = file + ".bak" + if os.path.exists(bak): + os.remove(bak) + os.rename(file, bak) + if verbose: + print "renamed", file, "to", bak + f = open(file, "w") + r.write(f) + f.close() + if verbose: + print "wrote new", file + else: + if verbose: + print "unchanged." + + +class Reindenter: + + def __init__(self, f): + self.find_stmt = 1 # next token begins a fresh stmt? + self.level = 0 # current indent level + + # Raw file lines. + self.raw = f.readlines() + + # File lines, rstripped & tab-expanded. Dummy at start is so + # that we can use tokenize's 1-based line numbering easily. + # Note that a line is all-blank iff it's "\n". + self.lines = [line.rstrip('\n \t').expandtabs() + "\n" + for line in self.raw] + self.lines.insert(0, None) + self.index = 1 # index into self.lines of next line + + # List of (lineno, indentlevel) pairs, one for each stmt and + # comment line. indentlevel is -1 for comment lines, as a + # signal that tokenize doesn't know what to do about them; + # indeed, they're our headache! + self.stats = [] + + def run(self): + tokenize.tokenize(self.getline, self.tokeneater) + # Remove trailing empty lines. + lines = self.lines + while lines and lines[-1] == "\n": + lines.pop() + # Sentinel. + stats = self.stats + stats.append((len(lines), 0)) + # Map count of leading spaces to # we want. + have2want = {} + # Program after transformation. + after = self.after = [] + # Copy over initial empty lines -- there's nothing to do until + # we see a line with *something* on it. + i = stats[0][0] + after.extend(lines[1:i]) + for i in range(len(stats)-1): + thisstmt, thislevel = stats[i] + nextstmt = stats[i+1][0] + have = getlspace(lines[thisstmt]) + want = thislevel * 4 + if want < 0: + # A comment line. + if have: + # An indented comment line. If we saw the same + # indentation before, reuse what it most recently + # mapped to. + want = have2want.get(have, -1) + if want < 0: + # Then it probably belongs to the next real stmt. + for j in xrange(i+1, len(stats)-1): + jline, jlevel = stats[j] + if jlevel >= 0: + if have == getlspace(lines[jline]): + want = jlevel * 4 + break + if want < 0: # Maybe it's a hanging + # comment like this one, + # in which case we should shift it like its base + # line got shifted. + for j in xrange(i-1, -1, -1): + jline, jlevel = stats[j] + if jlevel >= 0: + want = have + getlspace(after[jline-1]) - \ + getlspace(lines[jline]) + break + if want < 0: + # Still no luck -- leave it alone. + want = have + else: + want = 0 + assert want >= 0 + have2want[have] = want + diff = want - have + if diff == 0 or have == 0: + after.extend(lines[thisstmt:nextstmt]) + else: + for line in lines[thisstmt:nextstmt]: + if diff > 0: + if line == "\n": + after.append(line) + else: + after.append(" " * diff + line) + else: + remove = min(getlspace(line), -diff) + after.append(line[remove:]) + return self.raw != self.after + + def write(self, f): + f.writelines(self.after) + + # Line-getter for tokenize. + def getline(self): + if self.index >= len(self.lines): + line = "" + else: + line = self.lines[self.index] + self.index += 1 + return line + + # Line-eater for tokenize. + def tokeneater(self, type, token, (sline, scol), end, line, + INDENT=tokenize.INDENT, + DEDENT=tokenize.DEDENT, + NEWLINE=tokenize.NEWLINE, + COMMENT=tokenize.COMMENT, + NL=tokenize.NL): + + if type == NEWLINE: + # A program statement, or ENDMARKER, will eventually follow, + # after some (possibly empty) run of tokens of the form + # (NL | COMMENT)* (INDENT | DEDENT+)? + self.find_stmt = 1 + + elif type == INDENT: + self.find_stmt = 1 + self.level += 1 + + elif type == DEDENT: + self.find_stmt = 1 + self.level -= 1 + + elif type == COMMENT: + if self.find_stmt: + self.stats.append((sline, -1)) + # but we're still looking for a new stmt, so leave + # find_stmt alone + + elif type == NL: + pass + + elif self.find_stmt: + # This is the first "real token" following a NEWLINE, so it + # must be the first token of the next program statement, or an + # ENDMARKER. + self.find_stmt = 0 + if line: # not endmarker + self.stats.append((sline, self.level)) + +# Count number of leading blanks. +def getlspace(line): + i, n = 0, len(line) + while i < n and line[i] == " ": + i += 1 + return i + +if __name__ == '__main__': + main()