diff --git a/.hgignore b/.hgignore
index 70ea36a52..b68cccf91 100644
--- a/.hgignore
+++ b/.hgignore
@@ -15,3 +15,5 @@
^env/
\.DS_Store$
~$
+^utils/.*3\.py$
+^distribute-
diff --git a/CHANGES b/CHANGES
index b247543f8..a30d490e7 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,9 +1,41 @@
Release 1.1 (in development)
============================
+* Added Python 3.x support.
+
+
Release 1.0.2 (in development)
==============================
+* Allow breaking long signatures, continuing with backlash-escaped
+ newlines.
+
+* Fix unwanted styling of C domain references (because of a namespace
+ clash with Pygments styles).
+
+* Allow references to PEPs and RFCs with explicit anchors.
+
+* #471: Fix LaTeX references to figures.
+
+* #482: When doing a non-exact search, match only the given type
+ of object.
+
+* #481: Apply non-exact search for Python reference targets with
+ ``.name`` for modules too.
+
+* #484: Fix crash when duplicating a parameter in an info field list.
+
+* #487: Fix setting the default role to one provided by the
+ ``oldcmarkup`` extension.
+
+* #488: Fix crash when json-py is installed, which provides a
+ ``json`` module but is incompatible to simplejson.
+
+* #480: Fix handling of target naming in intersphinx.
+
+* #486: Fix removal of ``!`` for all cross-reference roles.
+
+
Release 1.0.1 (Jul 27, 2010)
============================
diff --git a/CHANGES.DasIch b/CHANGES.DasIch
new file mode 100644
index 000000000..3f7167263
--- /dev/null
+++ b/CHANGES.DasIch
@@ -0,0 +1,36 @@
+Changes
+=======
+
+This file contains changes made by Daniel Neuhäuser, during the Google Summer
+of Code 2010, to port Sphinx to Python 3.x. Changes are ordered descending by
+date.
+
+May 16: - Added utils/convert.py which converts entire directories of python
+ files with 2to3 and names the converted files foo3.py.
+ - Modified the Makefile so that in case Python 3 is used the scripts in
+ utils get converted with utils/convert.py and are used instead of the
+ Python 2 scripts.
+
+May 10: Fixed a couple of tests and made several small changes.
+
+May 9: - Removed ez_setup.py which does not work with Python 3.x. and replaced
+ it with distribute_setup.py
+ - Use distribute (at least on 3.x) in order to run 2to3 automatically.
+ - Reverted some of the changes made in revision bac40c7c924c which
+ caused errors.
+ - Modified tests/run.py to test against the build created by
+ setup.py build in order to run the test suite with 3.x
+ - Several small changes to fix 3.x compatibilty.
+
+May 1: - Removed deprecated tuple parameter unpacking.
+ - Removed a pre-2.3 workaround for booleans because this creates a
+ deprecation warning for 3.x, in which you can't assign values to
+ booleans.
+ - Moved :func:`open()` calls out of the try-blocks, which fixes revision
+ c577c25bd44b.
+
+April 30: Made :cls:`sphinx.domains.cpp.DefExpr` unhashable as described by the
+ documentation because classes in 3.x don't inherit ``__hash__`` if
+ they implement ``__eq__``.
+
+April 29: Removed several deprecated function/method calls.
diff --git a/EXAMPLES b/EXAMPLES
index 44c511e9e..778b235e1 100644
--- a/EXAMPLES
+++ b/EXAMPLES
@@ -97,6 +97,7 @@ Documentation using the sphinxdoc theme
* Satchmo: http://www.satchmoproject.com/docs/svn/
* Sphinx: http://sphinx.pocoo.org/
* Sqlkit: http://sqlkit.argolinux.org/
+* Tau: http://www.tango-controls.org/static/tau/latest/doc/html/index.html
* Total Open Station: http://tops.berlios.de/
* WebFaction: http://docs.webfaction.com/
diff --git a/MANIFEST.in b/MANIFEST.in
index 25cbc334f..5e3104a82 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -7,7 +7,7 @@ include TODO
include babel.cfg
include Makefile
-include ez_setup.py
+include setup_distribute.py
include sphinx-autogen.py
include sphinx-build.py
include sphinx-quickstart.py
diff --git a/Makefile b/Makefile
index 7057a7152..09aa3c962 100644
--- a/Makefile
+++ b/Makefile
@@ -1,36 +1,63 @@
PYTHON ?= python
-export PYTHONPATH = $(shell echo "$$PYTHONPATH"):./sphinx
+.PHONY: all check clean clean-pyc clean-patchfiles clean-backupfiles \
+ clean-generated pylint reindent test covertest build convert-utils
-.PHONY: all check clean clean-pyc clean-patchfiles pylint reindent test
+DONT_CHECK = -i build -i dist -i sphinx/style/jquery.js \
+ -i sphinx/pycode/pgen2 -i sphinx/util/smartypants.py \
+ -i .ropeproject -i doc/_build -i tests/path.py \
+ -i tests/coverage.py -i env -i utils/convert.py \
+ -i utils/reindent3.py -i utils/check_sources3.py -i .tox
-all: clean-pyc check test
+all: clean-pyc clean-backupfiles check test
+ifeq ($(PYTHON), python3)
+check: convert-utils
+ @$(PYTHON) utils/check_sources3.py $(DONT_CHECK) .
+else
check:
- @$(PYTHON) utils/check_sources.py -i build -i dist -i sphinx/style/jquery.js \
- -i sphinx/pycode/pgen2 -i sphinx/util/smartypants.py -i .ropeproject \
- -i doc/_build -i ez_setup.py -i tests/path.py -i tests/coverage.py \
- -i env -i .tox .
+ @$(PYTHON) utils/check_sources.py $(DONT_CHECK) .
+endif
-clean: clean-pyc clean-patchfiles
+clean: clean-pyc clean-patchfiles clean-backupfiles clean-generated
clean-pyc:
find . -name '*.pyc' -exec rm -f {} +
find . -name '*.pyo' -exec rm -f {} +
- find . -name '*~' -exec rm -f {} +
clean-patchfiles:
find . -name '*.orig' -exec rm -f {} +
find . -name '*.rej' -exec rm -f {} +
+clean-backupfiles:
+ find . -name '*~' -exec rm -f {} +
+ find . -name '*.bak' -exec rm -f {} +
+
+clean-generated:
+ rm -f utils/*3.py*
+
pylint:
@pylint --rcfile utils/pylintrc sphinx
+ifeq ($(PYTHON), python3)
+reindent: convert-utils
+ @$(PYTHON) utils/reindent3.py -r -n .
+else
reindent:
- @$(PYTHON) utils/reindent.py -r -B .
+ @$(PYTHON) utils/reindent.py -r -n .
+endif
-test:
+test: build
@cd tests; $(PYTHON) run.py -d -m '^[tT]est' $(TEST)
-covertest:
- @cd tests; $(PYTHON) run.py -d -m '^[tT]est' --with-coverage --cover-package=sphinx $(TEST)
+covertest: build
+ @cd tests; $(PYTHON) run.py -d -m '^[tT]est' --with-coverage \
+ --cover-package=sphinx $(TEST)
+
+build:
+ @$(PYTHON) setup.py build
+
+ifeq ($(PYTHON), python3)
+convert-utils:
+ @python3 utils/convert.py -i utils/convert.py utils/
+endif
diff --git a/README b/README
index bb2dea9d6..e31d6b936 100644
--- a/README
+++ b/README
@@ -26,6 +26,18 @@ Then, direct your browser to ``_build/html/index.html``.
Or read them online at .
+Testing
+=======
+
+To run the tests with the interpreter available as ``python``, use::
+
+ make test
+
+If you want to use a different interpreter, e.g. ``python3``, use::
+
+ PYTHON=python3 make test
+
+
Contributing
============
diff --git a/custom_fixers/__init__.py b/custom_fixers/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/custom_fixers/fix_alt_unicode.py b/custom_fixers/fix_alt_unicode.py
new file mode 100644
index 000000000..55175e90f
--- /dev/null
+++ b/custom_fixers/fix_alt_unicode.py
@@ -0,0 +1,12 @@
+from lib2to3.fixer_base import BaseFix
+from lib2to3.fixer_util import Name
+
+class FixAltUnicode(BaseFix):
+ PATTERN = """
+ func=funcdef< 'def' name='__unicode__'
+ parameters< '(' NAME ')' > any+ >
+ """
+
+ def transform(self, node, results):
+ name = results['name']
+ name.replace(Name('__str__', prefix=name.prefix))
diff --git a/distribute_setup.py b/distribute_setup.py
new file mode 100644
index 000000000..37117b34e
--- /dev/null
+++ b/distribute_setup.py
@@ -0,0 +1,485 @@
+#!python
+"""Bootstrap distribute installation
+
+If you want to use setuptools in your package's setup.py, just include this
+file in the same directory with it, and add this to the top of your setup.py::
+
+ from distribute_setup import use_setuptools
+ use_setuptools()
+
+If you want to require a specific version of setuptools, set a download
+mirror, or use an alternate download directory, you can do so by supplying
+the appropriate options to ``use_setuptools()``.
+
+This file can also be run as a script to install or upgrade setuptools.
+"""
+import os
+import sys
+import time
+import fnmatch
+import tempfile
+import tarfile
+from distutils import log
+
+try:
+ from site import USER_SITE
+except ImportError:
+ USER_SITE = None
+
+try:
+ import subprocess
+
+ def _python_cmd(*args):
+ args = (sys.executable,) + args
+ return subprocess.call(args) == 0
+
+except ImportError:
+ # will be used for python 2.3
+ def _python_cmd(*args):
+ args = (sys.executable,) + args
+ # quoting arguments if windows
+ if sys.platform == 'win32':
+ def quote(arg):
+ if ' ' in arg:
+ return '"%s"' % arg
+ return arg
+ args = [quote(arg) for arg in args]
+ return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
+
+DEFAULT_VERSION = "0.6.13"
+DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
+SETUPTOOLS_FAKED_VERSION = "0.6c11"
+
+SETUPTOOLS_PKG_INFO = """\
+Metadata-Version: 1.0
+Name: setuptools
+Version: %s
+Summary: xxxx
+Home-page: xxx
+Author: xxx
+Author-email: xxx
+License: xxx
+Description: xxx
+""" % SETUPTOOLS_FAKED_VERSION
+
+
+def _install(tarball):
+ # extracting the tarball
+ tmpdir = tempfile.mkdtemp()
+ log.warn('Extracting in %s', tmpdir)
+ old_wd = os.getcwd()
+ try:
+ os.chdir(tmpdir)
+ tar = tarfile.open(tarball)
+ _extractall(tar)
+ tar.close()
+
+ # going in the directory
+ subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+ os.chdir(subdir)
+ log.warn('Now working in %s', subdir)
+
+ # installing
+ log.warn('Installing Distribute')
+ if not _python_cmd('setup.py', 'install'):
+ log.warn('Something went wrong during the installation.')
+ log.warn('See the error message above.')
+ finally:
+ os.chdir(old_wd)
+
+
+def _build_egg(egg, tarball, to_dir):
+ # extracting the tarball
+ tmpdir = tempfile.mkdtemp()
+ log.warn('Extracting in %s', tmpdir)
+ old_wd = os.getcwd()
+ try:
+ os.chdir(tmpdir)
+ tar = tarfile.open(tarball)
+ _extractall(tar)
+ tar.close()
+
+ # going in the directory
+ subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+ os.chdir(subdir)
+ log.warn('Now working in %s', subdir)
+
+ # building an egg
+ log.warn('Building a Distribute egg in %s', to_dir)
+ _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
+
+ finally:
+ os.chdir(old_wd)
+ # returning the result
+ log.warn(egg)
+ if not os.path.exists(egg):
+ raise IOError('Could not build the egg.')
+
+
+def _do_download(version, download_base, to_dir, download_delay):
+ egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
+ % (version, sys.version_info[0], sys.version_info[1]))
+ if not os.path.exists(egg):
+ tarball = download_setuptools(version, download_base,
+ to_dir, download_delay)
+ _build_egg(egg, tarball, to_dir)
+ sys.path.insert(0, egg)
+ import setuptools
+ setuptools.bootstrap_install_from = egg
+
+
+def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+ to_dir=os.curdir, download_delay=15, no_fake=True):
+ # making sure we use the absolute path
+ to_dir = os.path.abspath(to_dir)
+ was_imported = 'pkg_resources' in sys.modules or \
+ 'setuptools' in sys.modules
+ try:
+ try:
+ import pkg_resources
+ if not hasattr(pkg_resources, '_distribute'):
+ if not no_fake:
+ _fake_setuptools()
+ raise ImportError
+ except ImportError:
+ return _do_download(version, download_base, to_dir, download_delay)
+ try:
+ pkg_resources.require("distribute>="+version)
+ return
+ except pkg_resources.VersionConflict:
+ e = sys.exc_info()[1]
+ if was_imported:
+ sys.stderr.write(
+ "The required version of distribute (>=%s) is not available,\n"
+ "and can't be installed while this script is running. Please\n"
+ "install a more recent version first, using\n"
+ "'easy_install -U distribute'."
+ "\n\n(Currently using %r)\n" % (version, e.args[0]))
+ sys.exit(2)
+ else:
+ del pkg_resources, sys.modules['pkg_resources'] # reload ok
+ return _do_download(version, download_base, to_dir,
+ download_delay)
+ except pkg_resources.DistributionNotFound:
+ return _do_download(version, download_base, to_dir,
+ download_delay)
+ finally:
+ if not no_fake:
+ _create_fake_setuptools_pkg_info(to_dir)
+
+def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+ to_dir=os.curdir, delay=15):
+ """Download distribute from a specified location and return its filename
+
+ `version` should be a valid distribute version number that is available
+ as an egg for download under the `download_base` URL (which should end
+ with a '/'). `to_dir` is the directory where the egg will be downloaded.
+ `delay` is the number of seconds to pause before an actual download
+ attempt.
+ """
+ # making sure we use the absolute path
+ to_dir = os.path.abspath(to_dir)
+ try:
+ from urllib.request import urlopen
+ except ImportError:
+ from urllib2 import urlopen
+ tgz_name = "distribute-%s.tar.gz" % version
+ url = download_base + tgz_name
+ saveto = os.path.join(to_dir, tgz_name)
+ src = dst = None
+ if not os.path.exists(saveto): # Avoid repeated downloads
+ try:
+ log.warn("Downloading %s", url)
+ src = urlopen(url)
+ # Read/write all in one block, so we don't create a corrupt file
+ # if the download is interrupted.
+ data = src.read()
+ dst = open(saveto, "wb")
+ dst.write(data)
+ finally:
+ if src:
+ src.close()
+ if dst:
+ dst.close()
+ return os.path.realpath(saveto)
+
+def _no_sandbox(function):
+ def __no_sandbox(*args, **kw):
+ try:
+ from setuptools.sandbox import DirectorySandbox
+ if not hasattr(DirectorySandbox, '_old'):
+ def violation(*args):
+ pass
+ DirectorySandbox._old = DirectorySandbox._violation
+ DirectorySandbox._violation = violation
+ patched = True
+ else:
+ patched = False
+ except ImportError:
+ patched = False
+
+ try:
+ return function(*args, **kw)
+ finally:
+ if patched:
+ DirectorySandbox._violation = DirectorySandbox._old
+ del DirectorySandbox._old
+
+ return __no_sandbox
+
+def _patch_file(path, content):
+ """Will backup the file then patch it"""
+ existing_content = open(path).read()
+ if existing_content == content:
+ # already patched
+ log.warn('Already patched.')
+ return False
+ log.warn('Patching...')
+ _rename_path(path)
+ f = open(path, 'w')
+ try:
+ f.write(content)
+ finally:
+ f.close()
+ return True
+
+_patch_file = _no_sandbox(_patch_file)
+
+def _same_content(path, content):
+ return open(path).read() == content
+
+def _rename_path(path):
+ new_name = path + '.OLD.%s' % time.time()
+ log.warn('Renaming %s into %s', path, new_name)
+ os.rename(path, new_name)
+ return new_name
+
+def _remove_flat_installation(placeholder):
+ if not os.path.isdir(placeholder):
+ log.warn('Unkown installation at %s', placeholder)
+ return False
+ found = False
+ for file in os.listdir(placeholder):
+ if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
+ found = True
+ break
+ if not found:
+ log.warn('Could not locate setuptools*.egg-info')
+ return
+
+ log.warn('Removing elements out of the way...')
+ pkg_info = os.path.join(placeholder, file)
+ if os.path.isdir(pkg_info):
+ patched = _patch_egg_dir(pkg_info)
+ else:
+ patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
+
+ if not patched:
+ log.warn('%s already patched.', pkg_info)
+ return False
+ # now let's move the files out of the way
+ for element in ('setuptools', 'pkg_resources.py', 'site.py'):
+ element = os.path.join(placeholder, element)
+ if os.path.exists(element):
+ _rename_path(element)
+ else:
+ log.warn('Could not find the %s element of the '
+ 'Setuptools distribution', element)
+ return True
+
+_remove_flat_installation = _no_sandbox(_remove_flat_installation)
+
+def _after_install(dist):
+ log.warn('After install bootstrap.')
+ placeholder = dist.get_command_obj('install').install_purelib
+ _create_fake_setuptools_pkg_info(placeholder)
+
+def _create_fake_setuptools_pkg_info(placeholder):
+ if not placeholder or not os.path.exists(placeholder):
+ log.warn('Could not find the install location')
+ return
+ pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
+ setuptools_file = 'setuptools-%s-py%s.egg-info' % \
+ (SETUPTOOLS_FAKED_VERSION, pyver)
+ pkg_info = os.path.join(placeholder, setuptools_file)
+ if os.path.exists(pkg_info):
+ log.warn('%s already exists', pkg_info)
+ return
+
+ log.warn('Creating %s', pkg_info)
+ f = open(pkg_info, 'w')
+ try:
+ f.write(SETUPTOOLS_PKG_INFO)
+ finally:
+ f.close()
+
+ pth_file = os.path.join(placeholder, 'setuptools.pth')
+ log.warn('Creating %s', pth_file)
+ f = open(pth_file, 'w')
+ try:
+ f.write(os.path.join(os.curdir, setuptools_file))
+ finally:
+ f.close()
+
+_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
+
+def _patch_egg_dir(path):
+ # let's check if it's already patched
+ pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
+ if os.path.exists(pkg_info):
+ if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
+ log.warn('%s already patched.', pkg_info)
+ return False
+ _rename_path(path)
+ os.mkdir(path)
+ os.mkdir(os.path.join(path, 'EGG-INFO'))
+ pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
+ f = open(pkg_info, 'w')
+ try:
+ f.write(SETUPTOOLS_PKG_INFO)
+ finally:
+ f.close()
+ return True
+
+_patch_egg_dir = _no_sandbox(_patch_egg_dir)
+
+def _before_install():
+ log.warn('Before install bootstrap.')
+ _fake_setuptools()
+
+
+def _under_prefix(location):
+ if 'install' not in sys.argv:
+ return True
+ args = sys.argv[sys.argv.index('install')+1:]
+ for index, arg in enumerate(args):
+ for option in ('--root', '--prefix'):
+ if arg.startswith('%s=' % option):
+ top_dir = arg.split('root=')[-1]
+ return location.startswith(top_dir)
+ elif arg == option:
+ if len(args) > index:
+ top_dir = args[index+1]
+ return location.startswith(top_dir)
+ if arg == '--user' and USER_SITE is not None:
+ return location.startswith(USER_SITE)
+ return True
+
+
+def _fake_setuptools():
+ log.warn('Scanning installed packages')
+ try:
+ import pkg_resources
+ except ImportError:
+ # we're cool
+ log.warn('Setuptools or Distribute does not seem to be installed.')
+ return
+ ws = pkg_resources.working_set
+ try:
+ setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
+ replacement=False))
+ except TypeError:
+ # old distribute API
+ setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
+
+ if setuptools_dist is None:
+ log.warn('No setuptools distribution found')
+ return
+ # detecting if it was already faked
+ setuptools_location = setuptools_dist.location
+ log.warn('Setuptools installation detected at %s', setuptools_location)
+
+ # if --root or --preix was provided, and if
+ # setuptools is not located in them, we don't patch it
+ if not _under_prefix(setuptools_location):
+ log.warn('Not patching, --root or --prefix is installing Distribute'
+ ' in another location')
+ return
+
+ # let's see if its an egg
+ if not setuptools_location.endswith('.egg'):
+ log.warn('Non-egg installation')
+ res = _remove_flat_installation(setuptools_location)
+ if not res:
+ return
+ else:
+ log.warn('Egg installation')
+ pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
+ if (os.path.exists(pkg_info) and
+ _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
+ log.warn('Already patched.')
+ return
+ log.warn('Patching...')
+ # let's create a fake egg replacing setuptools one
+ res = _patch_egg_dir(setuptools_location)
+ if not res:
+ return
+ log.warn('Patched done.')
+ _relaunch()
+
+
+def _relaunch():
+ log.warn('Relaunching...')
+ # we have to relaunch the process
+ # pip marker to avoid a relaunch bug
+ if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
+ sys.argv[0] = 'setup.py'
+ args = [sys.executable] + sys.argv
+ sys.exit(subprocess.call(args))
+
+
+def _extractall(self, path=".", members=None):
+ """Extract all members from the archive to the current working
+ directory and set owner, modification time and permissions on
+ directories afterwards. `path' specifies a different directory
+ to extract to. `members' is optional and must be a subset of the
+ list returned by getmembers().
+ """
+ import copy
+ import operator
+ from tarfile import ExtractError
+ directories = []
+
+ if members is None:
+ members = self
+
+ for tarinfo in members:
+ if tarinfo.isdir():
+ # Extract directories with a safe mode.
+ directories.append(tarinfo)
+ tarinfo = copy.copy(tarinfo)
+ tarinfo.mode = 448 # decimal for oct 0700
+ self.extract(tarinfo, path)
+
+ # Reverse sort directories.
+ if sys.version_info < (2, 4):
+ def sorter(dir1, dir2):
+ return cmp(dir1.name, dir2.name)
+ directories.sort(sorter)
+ directories.reverse()
+ else:
+ directories.sort(key=operator.attrgetter('name'), reverse=True)
+
+ # Set correct owner, mtime and filemode on directories.
+ for tarinfo in directories:
+ dirpath = os.path.join(path, tarinfo.name)
+ try:
+ self.chown(tarinfo, dirpath)
+ self.utime(tarinfo, dirpath)
+ self.chmod(tarinfo, dirpath)
+ except ExtractError:
+ e = sys.exc_info()[1]
+ if self.errorlevel > 1:
+ raise
+ else:
+ self._dbg(1, "tarfile: %s" % e)
+
+
+def main(argv, version=DEFAULT_VERSION):
+ """Install or upgrade setuptools and EasyInstall"""
+ tarball = download_setuptools()
+ _install(tarball)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html
index a268ec6ef..85b8fba95 100644
--- a/doc/_templates/indexsidebar.html
+++ b/doc/_templates/indexsidebar.html
@@ -23,6 +23,6 @@ are also available.
-or come to the #python-docs channel on FreeNode.
+or come to the #pocoo channel on FreeNode.
You can also open an issue at the
tracker .
diff --git a/doc/builders.rst b/doc/builders.rst
index 6e90ccc62..80203e759 100644
--- a/doc/builders.rst
+++ b/doc/builders.rst
@@ -255,11 +255,11 @@ All serialization builders outputs one file per source file and a few special
files. They also copy the reST source files in the directory ``_sources``
under the output directory.
-The :class:`PickleHTMLBuilder` is a builtin subclass that implements the pickle
+The :class:`.PickleHTMLBuilder` is a builtin subclass that implements the pickle
serialization interface.
The files per source file have the extensions of
-:attr:`~SerializingHTMLBuilder.out_suffix`, and are arranged in directories
+:attr:`~.SerializingHTMLBuilder.out_suffix`, and are arranged in directories
just as the source files are. They unserialize to a dictionary (or dictionary
like structure) with these keys:
@@ -290,7 +290,7 @@ like structure) with these keys:
The special files are located in the root output directory. They are:
-:attr:`SerializingHTMLBuilder.globalcontext_filename`
+:attr:`.SerializingHTMLBuilder.globalcontext_filename`
A pickled dict with these keys:
``project``, ``copyright``, ``release``, ``version``
@@ -309,7 +309,7 @@ The special files are located in the root output directory. They are:
``titles``
A dictionary of all documents' titles, as HTML strings.
-:attr:`SerializingHTMLBuilder.searchindex_filename`
+:attr:`.SerializingHTMLBuilder.searchindex_filename`
An index that can be used for searching the documentation. It is a pickled
list with these entries:
diff --git a/doc/conf.py b/doc/conf.py
index 299f321ac..b3a1cda79 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -64,6 +64,10 @@ man_pages = [
'template generator', '', 1),
]
+# We're not using intersphinx right now, but if we did, this would be part of
+# the mapping:
+intersphinx_mapping = {'python': ('http://docs.python.org/dev', None)}
+
# -- Extension interface -------------------------------------------------------
diff --git a/doc/config.rst b/doc/config.rst
index bf8ad3c2d..e0fbeb46e 100644
--- a/doc/config.rst
+++ b/doc/config.rst
@@ -346,12 +346,12 @@ Project information
A boolean that decides whether module names are prepended to all
:term:`object` names (for object types where a "module" of some kind is
- defined), e.g. for :rst:dir:`function` directives. Default is ``True``.
+ defined), e.g. for :rst:dir:`py:function` directives. Default is ``True``.
.. confval:: show_authors
- A boolean that decides whether :rst:dir:`moduleauthor` and :rst:dir:`sectionauthor`
- directives produce any output in the built files.
+ A boolean that decides whether :rst:dir:`codeauthor` and
+ :rst:dir:`sectionauthor` directives produce any output in the built files.
.. confval:: modindex_common_prefix
@@ -388,6 +388,8 @@ Options for HTML output
These options influence HTML as well as HTML Help output, and other builders
that use Sphinx' HTMLWriter class.
+.. XXX document html_context
+
.. confval:: html_theme
The "theme" that the HTML output should use. See the :doc:`section about
@@ -553,19 +555,6 @@ that use Sphinx' HTMLWriter class.
This will render the template ``customdownload.html`` as the page
``download.html``.
- .. note::
-
- Earlier versions of Sphinx had a value called :confval:`html_index` which
- was a clumsy way of controlling the content of the "index" document. If
- you used this feature, migrate it by adding an ``'index'`` key to this
- setting, with your custom template as the value, and in your custom
- template, use ::
-
- {% extend "defindex.html" %}
- {% block tables %}
- ... old template content ...
- {% endblock %}
-
.. confval:: html_domain_indices
If true, generate domain-specific indices in addition to the general index.
diff --git a/doc/domains.rst b/doc/domains.rst
index c64930a24..8cd7a0c7d 100644
--- a/doc/domains.rst
+++ b/doc/domains.rst
@@ -52,10 +52,19 @@ flag ``:noindex:``. An example using a Python domain directive::
.. py:function:: spam(eggs)
ham(eggs)
- :noindex:
Spam or ham the foo.
+This describes the two Python functions ``spam`` and ``ham``. (Note that when
+signatures become too long, you can break them if you add a backslash to lines
+that are continued in the next line. Example::
+
+ .. py:function:: filterwarnings(action, message='', category=Warning, \
+ module='', lineno=0, append=False)
+ :noindex:
+
+(This example also shows how to use the ``:noindex:`` flag.)
+
The domains also provide roles that link back to these object descriptions. For
example, to link to one of the functions described in the example above, you
could say ::
@@ -138,11 +147,12 @@ declarations:
.. rst:directive:: .. py:currentmodule:: name
This directive tells Sphinx that the classes, functions etc. documented from
- here are in the given module (like :rst:dir:`py:module`), but it will not create
- index entries, an entry in the Global Module Index, or a link target for
- :rst:role:`mod`. This is helpful in situations where documentation for things in
- a module is spread over multiple files or sections -- one location has the
- :rst:dir:`py:module` directive, the others only :rst:dir:`py:currentmodule`.
+ here are in the given module (like :rst:dir:`py:module`), but it will not
+ create index entries, an entry in the Global Module Index, or a link target
+ for :rst:role:`py:mod`. This is helpful in situations where documentation
+ for things in a module is spread over multiple files or sections -- one
+ location has the :rst:dir:`py:module` directive, the others only
+ :rst:dir:`py:currentmodule`.
The following directives are provided for module and class contents:
@@ -363,6 +373,9 @@ dot, this order is reversed. For example, in the documentation of Python's
:mod:`codecs` module, ``:py:func:`open``` always refers to the built-in
function, while ``:py:func:`.open``` refers to :func:`codecs.open`.
+A similar heuristic is used to determine whether the name is an attribute of the
+currently documented class.
+
Also, if the name is prefixed with a dot, and no exact match is found, the
target is taken as a suffix and all object names with that suffix are
searched. For example, ``:py:meth:`.TarFile.close``` references the
@@ -370,8 +383,9 @@ searched. For example, ``:py:meth:`.TarFile.close``` references the
``tarfile``. Since this can get ambiguous, if there is more than one possible
match, you will get a warning from Sphinx.
-A similar heuristic is used to determine whether the name is an attribute of the
-currently documented class.
+Note that you can combine the ``~`` and ``.`` prefixes:
+``:py:meth:`~.TarFile.close``` will reference the ``tarfile.TarFile.close()``
+method, but the visible link caption will only be ``close()``.
.. _c-domain:
diff --git a/doc/ext/appapi.rst b/doc/ext/appapi.rst
index 402dd72f0..2717c6a8e 100644
--- a/doc/ext/appapi.rst
+++ b/doc/ext/appapi.rst
@@ -210,7 +210,7 @@ the following public API:
standard Sphinx roles (see :ref:`xref-syntax`).
This method is also available under the deprecated alias
- :meth:`add_description_unit`.
+ ``add_description_unit``.
.. method:: Sphinx.add_crossref_type(directivename, rolename, indextemplate='', ref_nodeclass=None, objname='')
@@ -272,6 +272,8 @@ the following public API:
This allows to auto-document new types of objects. See the source of the
autodoc module for examples on how to subclass :class:`Documenter`.
+ .. XXX add real docs for Documenter and subclassing
+
.. versionadded:: 0.6
.. method:: Sphinx.add_autodoc_attrgetter(type, getter)
diff --git a/doc/ext/autodoc.rst b/doc/ext/autodoc.rst
index bd725cfaa..813331015 100644
--- a/doc/ext/autodoc.rst
+++ b/doc/ext/autodoc.rst
@@ -27,20 +27,21 @@ two locations for documentation, while at the same time avoiding
auto-generated-looking pure API documentation.
:mod:`autodoc` provides several directives that are versions of the usual
-:rst:dir:`module`, :rst:dir:`class` and so forth. On parsing time, they import the
-corresponding module and extract the docstring of the given objects, inserting
-them into the page source under a suitable :rst:dir:`module`, :rst:dir:`class` etc.
-directive.
+:rst:dir:`py:module`, :rst:dir:`py:class` and so forth. On parsing time, they
+import the corresponding module and extract the docstring of the given objects,
+inserting them into the page source under a suitable :rst:dir:`py:module`,
+:rst:dir:`py:class` etc. directive.
.. note::
- Just as :rst:dir:`class` respects the current :rst:dir:`module`, :rst:dir:`autoclass`
- will also do so, and likewise with :rst:dir:`method` and :rst:dir:`class`.
+ Just as :rst:dir:`py:class` respects the current :rst:dir:`py:module`,
+ :rst:dir:`autoclass` will also do so. Likewise, :rst:dir:`automethod` will
+ respect the current :rst:dir:`py:class`.
.. rst:directive:: automodule
- autoclass
- autoexception
+ autoclass
+ autoexception
Document a module, class or exception. All three directives will by default
only insert the docstring of the object itself::
@@ -127,23 +128,24 @@ directive.
.. versionadded:: 0.4
- * The :rst:dir:`automodule`, :rst:dir:`autoclass` and :rst:dir:`autoexception` directives
- also support a flag option called ``show-inheritance``. When given, a list
- of base classes will be inserted just below the class signature (when used
- with :rst:dir:`automodule`, this will be inserted for every class that is
- documented in the module).
+ * The :rst:dir:`automodule`, :rst:dir:`autoclass` and
+ :rst:dir:`autoexception` directives also support a flag option called
+ ``show-inheritance``. When given, a list of base classes will be inserted
+ just below the class signature (when used with :rst:dir:`automodule`, this
+ will be inserted for every class that is documented in the module).
.. versionadded:: 0.4
* All autodoc directives support the ``noindex`` flag option that has the
- same effect as for standard :rst:dir:`function` etc. directives: no index
- entries are generated for the documented object (and all autodocumented
- members).
+ same effect as for standard :rst:dir:`py:function` etc. directives: no
+ index entries are generated for the documented object (and all
+ autodocumented members).
.. versionadded:: 0.4
* :rst:dir:`automodule` also recognizes the ``synopsis``, ``platform`` and
- ``deprecated`` options that the standard :rst:dir:`module` directive supports.
+ ``deprecated`` options that the standard :rst:dir:`py:module` directive
+ supports.
.. versionadded:: 0.5
@@ -213,8 +215,8 @@ There are also new config values that you can set:
``"class"``
Only the class' docstring is inserted. This is the default. You can
- still document ``__init__`` as a separate method using :rst:dir:`automethod`
- or the ``members`` option to :rst:dir:`autoclass`.
+ still document ``__init__`` as a separate method using
+ :rst:dir:`automethod` or the ``members`` option to :rst:dir:`autoclass`.
``"both"``
Both the class' and the ``__init__`` method's docstring are concatenated
and inserted.
diff --git a/doc/ext/inheritance.rst b/doc/ext/inheritance.rst
index 76388a94c..cdd017917 100644
--- a/doc/ext/inheritance.rst
+++ b/doc/ext/inheritance.rst
@@ -17,7 +17,7 @@ It adds this directive:
This directive has one or more arguments, each giving a module or class
name. Class names can be unqualified; in that case they are taken to exist
- in the currently described module (see :rst:dir:`module`).
+ in the currently described module (see :rst:dir:`py:module`).
For each given class, and each class in each given module, the base classes
are determined. Then, from all classes and their base classes, a graph is
diff --git a/doc/ext/intersphinx.rst b/doc/ext/intersphinx.rst
index bb2a5a8c4..7997472a7 100644
--- a/doc/ext/intersphinx.rst
+++ b/doc/ext/intersphinx.rst
@@ -84,7 +84,7 @@ linking:
To add links to modules and objects in the Python standard library
documentation, use::
- intersphinx_mapping = {'python': ('http://docs.python.org/', None)}
+ intersphinx_mapping = {'python': ('http://docs.python.org/3.2', None)}
This will download the corresponding :file:`objects.inv` file from the
Internet and generate links to the pages under the given URI. The downloaded
@@ -94,12 +94,12 @@ linking:
A second example, showing the meaning of a non-``None`` value of the second
tuple item::
- intersphinx_mapping = {'python': ('http://docs.python.org/',
+ intersphinx_mapping = {'python': ('http://docs.python.org/3.2',
'python-inv.txt')}
This will read the inventory from :file:`python-inv.txt` in the source
directory, but still generate links to the pages under
- ``http://docs.python.org/``. It is up to you to update the inventory file as
+ ``http://docs.python.org/3.2``. It is up to you to update the inventory file as
new objects are added to the Python documentation.
.. confval:: intersphinx_cache_limit
diff --git a/doc/ext/math.rst b/doc/ext/math.rst
index b9f6ab12b..f2896c39c 100644
--- a/doc/ext/math.rst
+++ b/doc/ext/math.rst
@@ -17,15 +17,15 @@ if possible, reuse that support too.
.. note::
- :mod:`sphinx.ext.mathbase` is not meant to be added to the
- :confval:`extensions` config value, instead, use either
- :mod:`sphinx.ext.pngmath` or :mod:`sphinx.ext.jsmath` as described below.
+ :mod:`.mathbase` is not meant to be added to the :confval:`extensions` config
+ value, instead, use either :mod:`sphinx.ext.pngmath` or
+ :mod:`sphinx.ext.jsmath` as described below.
The input language for mathematics is LaTeX markup. This is the de-facto
standard for plain-text math notation and has the added advantage that no
further translation is necessary when building LaTeX output.
-:mod:`mathbase` defines these new markup elements:
+:mod:`.mathbase` defines these new markup elements:
.. rst:role:: math
diff --git a/doc/intro.rst b/doc/intro.rst
index 1a39e266c..c85fbbad3 100644
--- a/doc/intro.rst
+++ b/doc/intro.rst
@@ -45,10 +45,10 @@ See the :ref:`pertinent section in the FAQ list `.
Prerequisites
-------------
-Sphinx needs at least **Python 2.4** to run, as well as the docutils_ and
-Jinja2_ libraries. Sphinx should work with docutils version 0.5 or some
-(not broken) SVN trunk snapshot. If you like to have source code highlighting
-support, you must also install the Pygments_ library.
+Sphinx needs at least **Python 2.4** or **Python 3.1** to run, as well as the
+docutils_ and Jinja2_ libraries. Sphinx should work with docutils version 0.5
+or some (not broken) SVN trunk snapshot. If you like to have source code
+highlighting support, you must also install the Pygments_ library.
.. _reStructuredText: http://docutils.sf.net/rst.html
.. _docutils: http://docutils.sf.net/
diff --git a/doc/markup/inline.rst b/doc/markup/inline.rst
index bb1ed68ec..4b704228f 100644
--- a/doc/markup/inline.rst
+++ b/doc/markup/inline.rst
@@ -260,7 +260,7 @@ in a different style:
.. rst:role:: samp
A piece of literal text, such as code. Within the contents, you can use
- curly braces to indicate a "variable" part, as in :rst:dir:`file`. For
+ curly braces to indicate a "variable" part, as in :rst:role:`file`. For
example, in ``:samp:`print 1+{variable}```, the part ``variable`` would be
emphasized.
@@ -274,13 +274,15 @@ The following roles generate external links:
A reference to a Python Enhancement Proposal. This generates appropriate
index entries. The text "PEP *number*\ " is generated; in the HTML output,
- this text is a hyperlink to an online copy of the specified PEP.
+ this text is a hyperlink to an online copy of the specified PEP. You can
+ link to a specific section by saying ``:pep:`number#anchor```.
.. rst:role:: rfc
A reference to an Internet Request for Comments. This generates appropriate
index entries. The text "RFC *number*\ " is generated; in the HTML output,
- this text is a hyperlink to an online copy of the specified RFC.
+ this text is a hyperlink to an online copy of the specified RFC. You can
+ link to a specific section by saying ``:rfc:`number#anchor```.
Note that there are no special roles for including hyperlinks as you can use
diff --git a/doc/markup/para.rst b/doc/markup/para.rst
index be06f6365..ecc6b4a6b 100644
--- a/doc/markup/para.rst
+++ b/doc/markup/para.rst
@@ -42,15 +42,25 @@ units as well as normal text:
Example::
.. versionadded:: 2.5
- The `spam` parameter.
+ The *spam* parameter.
Note that there must be no blank line between the directive head and the
explanation; this is to make these blocks visually continuous in the markup.
.. rst:directive:: .. versionchanged:: version
- Similar to :rst:dir:`versionadded`, but describes when and what changed in the named
- feature in some way (new parameters, changed side effects, etc.).
+ Similar to :rst:dir:`versionadded`, but describes when and what changed in
+ the named feature in some way (new parameters, changed side effects, etc.).
+
+.. rst:directive:: .. deprecated:: vesion
+
+ Similar to :rst:dir:`versionchanged`, but describes when the feature was
+ deprecated. An explanation can also be given, for example to inform the
+ reader what should be used instead. Example::
+
+ .. deprecated:: 3.1
+ Use :func:`spam` instead.
+
--------------
diff --git a/doc/markup/toctree.rst b/doc/markup/toctree.rst
index 474427d72..2c0a418a2 100644
--- a/doc/markup/toctree.rst
+++ b/doc/markup/toctree.rst
@@ -151,7 +151,7 @@ The special document names (and pages generated for them) are:
:ref:`object descriptions `, and from :rst:dir:`index`
directives.
- The module index contains one entry per :rst:dir:`module` directive.
+ The Python module index contains one entry per :rst:dir:`py:module` directive.
The search page contains a form that uses the generated JSON search index and
JavaScript to full-text search the generated documents for search words; it
diff --git a/doc/templating.rst b/doc/templating.rst
index 6880663d3..193a90bd9 100644
--- a/doc/templating.rst
+++ b/doc/templating.rst
@@ -21,10 +21,10 @@ No. You have several other options:
configuration value accordingly.
* You can :ref:`write a custom builder ` that derives from
- :class:`~sphinx.builders.StandaloneHTMLBuilder` and calls your template engine
- of choice.
+ :class:`~sphinx.builders.html.StandaloneHTMLBuilder` and calls your template
+ engine of choice.
-* You can use the :class:`~sphinx.builders.PickleHTMLBuilder` that produces
+* You can use the :class:`~sphinx.builders.html.PickleHTMLBuilder` that produces
pickle files with the page contents, and postprocess them using a custom tool,
or use them in your Web application.
@@ -261,9 +261,9 @@ in the future.
.. data:: file_suffix
- The value of the builder's :attr:`out_suffix` attribute, i.e. the file name
- extension that the output files will get. For a standard HTML builder, this
- is usually ``.html``.
+ The value of the builder's :attr:`~.SerializingHTMLBuilder.out_suffix`
+ attribute, i.e. the file name extension that the output files will get. For
+ a standard HTML builder, this is usually ``.html``.
.. data:: has_source
diff --git a/ez_setup.py b/ez_setup.py
deleted file mode 100644
index d24e845e5..000000000
--- a/ez_setup.py
+++ /dev/null
@@ -1,276 +0,0 @@
-#!python
-"""Bootstrap setuptools installation
-
-If you want to use setuptools in your package's setup.py, just include this
-file in the same directory with it, and add this to the top of your setup.py::
-
- from ez_setup import use_setuptools
- use_setuptools()
-
-If you want to require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, you can do so by supplying
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import sys
-DEFAULT_VERSION = "0.6c9"
-DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
-
-md5_data = {
- 'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
- 'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
- 'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
- 'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
- 'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
- 'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
- 'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
- 'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
- 'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
- 'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
- 'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
- 'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
- 'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
- 'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
- 'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
- 'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
- 'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
- 'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
- 'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
- 'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
- 'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
- 'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
- 'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
- 'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
- 'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
- 'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
- 'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
- 'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
- 'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
- 'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
- 'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
- 'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
- 'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
- 'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
-}
-
-import sys, os
-try: from hashlib import md5
-except ImportError: from md5 import md5
-
-def _validate_md5(egg_name, data):
- if egg_name in md5_data:
- digest = md5(data).hexdigest()
- if digest != md5_data[egg_name]:
- print >>sys.stderr, (
- "md5 validation of %s failed! (Possible download problem?)"
- % egg_name
- )
- sys.exit(2)
- return data
-
-def use_setuptools(
- version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
- download_delay=15
-):
- """Automatically find/download setuptools and make it available on sys.path
-
- `version` should be a valid setuptools version number that is available
- as an egg for download under the `download_base` URL (which should end with
- a '/'). `to_dir` is the directory where setuptools will be downloaded, if
- it is not already available. If `download_delay` is specified, it should
- be the number of seconds that will be paused before initiating a download,
- should one be required. If an older version of setuptools is installed,
- this routine will print a message to ``sys.stderr`` and raise SystemExit in
- an attempt to abort the calling script.
- """
- was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
- def do_download():
- egg = download_setuptools(version, download_base, to_dir, download_delay)
- sys.path.insert(0, egg)
- import setuptools; setuptools.bootstrap_install_from = egg
- try:
- import pkg_resources
- except ImportError:
- return do_download()
- try:
- pkg_resources.require("setuptools>="+version); return
- except pkg_resources.VersionConflict, e:
- if was_imported:
- print >>sys.stderr, (
- "The required version of setuptools (>=%s) is not available, and\n"
- "can't be installed while this script is running. Please install\n"
- " a more recent version first, using 'easy_install -U setuptools'."
- "\n\n(Currently using %r)"
- ) % (version, e.args[0])
- sys.exit(2)
- else:
- del pkg_resources, sys.modules['pkg_resources'] # reload ok
- return do_download()
- except pkg_resources.DistributionNotFound:
- return do_download()
-
-def download_setuptools(
- version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
- delay = 15
-):
- """Download setuptools from a specified location and return its filename
-
- `version` should be a valid setuptools version number that is available
- as an egg for download under the `download_base` URL (which should end
- with a '/'). `to_dir` is the directory where the egg will be downloaded.
- `delay` is the number of seconds to pause before an actual download attempt.
- """
- import urllib2, shutil
- egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
- url = download_base + egg_name
- saveto = os.path.join(to_dir, egg_name)
- src = dst = None
- if not os.path.exists(saveto): # Avoid repeated downloads
- try:
- from distutils import log
- if delay:
- log.warn("""
----------------------------------------------------------------------------
-This script requires setuptools version %s to run (even to display
-help). I will attempt to download it for you (from
-%s), but
-you may need to enable firewall access for this script first.
-I will start the download in %d seconds.
-
-(Note: if this machine does not have network access, please obtain the file
-
- %s
-
-and place it in this directory before rerunning this script.)
----------------------------------------------------------------------------""",
- version, download_base, delay, url
- ); from time import sleep; sleep(delay)
- log.warn("Downloading %s", url)
- src = urllib2.urlopen(url)
- # Read/write all in one block, so we don't create a corrupt file
- # if the download is interrupted.
- data = _validate_md5(egg_name, src.read())
- dst = open(saveto,"wb"); dst.write(data)
- finally:
- if src: src.close()
- if dst: dst.close()
- return os.path.realpath(saveto)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-def main(argv, version=DEFAULT_VERSION):
- """Install or upgrade setuptools and EasyInstall"""
- try:
- import setuptools
- except ImportError:
- egg = None
- try:
- egg = download_setuptools(version, delay=0)
- sys.path.insert(0,egg)
- from setuptools.command.easy_install import main
- return main(list(argv)+[egg]) # we're done here
- finally:
- if egg and os.path.exists(egg):
- os.unlink(egg)
- else:
- if setuptools.__version__ == '0.0.1':
- print >>sys.stderr, (
- "You have an obsolete version of setuptools installed. Please\n"
- "remove it from your system entirely before rerunning this script."
- )
- sys.exit(2)
-
- req = "setuptools>="+version
- import pkg_resources
- try:
- pkg_resources.require(req)
- except pkg_resources.VersionConflict:
- try:
- from setuptools.command.easy_install import main
- except ImportError:
- from easy_install import main
- main(list(argv)+[download_setuptools(delay=0)])
- sys.exit(0) # try to force an exit
- else:
- if argv:
- from setuptools.command.easy_install import main
- main(argv)
- else:
- print "Setuptools version",version,"or greater has been installed."
- print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
-
-def update_md5(filenames):
- """Update our built-in md5 registry"""
-
- import re
-
- for name in filenames:
- base = os.path.basename(name)
- f = open(name,'rb')
- md5_data[base] = md5(f.read()).hexdigest()
- f.close()
-
- data = [" %r: %r,\n" % it for it in md5_data.items()]
- data.sort()
- repl = "".join(data)
-
- import inspect
- srcfile = inspect.getsourcefile(sys.modules[__name__])
- f = open(srcfile, 'rb'); src = f.read(); f.close()
-
- match = re.search("\nmd5_data = {\n([^}]+)}", src)
- if not match:
- print >>sys.stderr, "Internal error!"
- sys.exit(2)
-
- src = src[:match.start(1)] + repl + src[match.end(1):]
- f = open(srcfile,'w')
- f.write(src)
- f.close()
-
-
-if __name__=='__main__':
- if len(sys.argv)>2 and sys.argv[1]=='--md5update':
- update_md5(sys.argv[2:])
- else:
- main(sys.argv[1:])
-
-
-
-
-
-
diff --git a/setup.py b/setup.py
index 183fcceb1..fe4066b80 100644
--- a/setup.py
+++ b/setup.py
@@ -2,8 +2,8 @@
try:
from setuptools import setup, find_packages
except ImportError:
- import ez_setup
- ez_setup.use_setuptools()
+ import distribute_setup
+ distribute_setup.use_setuptools()
from setuptools import setup, find_packages
import os
@@ -47,7 +47,7 @@ A development egg can be found `here
requires = ['Pygments>=0.8', 'Jinja2>=2.2', 'docutils>=0.5']
if sys.version_info < (2, 4):
- print 'ERROR: Sphinx requires at least Python 2.4 to run.'
+ print('ERROR: Sphinx requires at least Python 2.4 to run.')
sys.exit(1)
if sys.version_info < (2, 5):
@@ -198,4 +198,6 @@ setup(
},
install_requires=requires,
cmdclass=cmdclass,
+ use_2to3=True,
+ use_2to3_fixers=['custom_fixers'],
)
diff --git a/sphinx/__init__.py b/sphinx/__init__.py
index 31c61a86a..1ea2e7bf7 100644
--- a/sphinx/__init__.py
+++ b/sphinx/__init__.py
@@ -9,6 +9,9 @@
:license: BSD, see LICENSE for details.
"""
+# Keep this file executable as-is in Python 3!
+# (Otherwise getting the version out of it from setup.py is impossible.)
+
import sys
from os import path
@@ -35,13 +38,14 @@ if '+' in __version__ or 'pre' in __version__:
def main(argv=sys.argv):
if sys.version_info[:3] < (2, 4, 0):
- print >>sys.stderr, \
- 'Error: Sphinx requires at least Python 2.4 to run.'
+ sys.stderr.write('Error: Sphinx requires at least '
+ 'Python 2.4 to run.\n')
return 1
try:
from sphinx import cmdline
- except ImportError, err:
+ except ImportError:
+ err = sys.exc_info()[1]
errstr = str(err)
if errstr.lower().startswith('no module named'):
whichmod = errstr[16:]
@@ -54,14 +58,14 @@ def main(argv=sys.argv):
whichmod = 'roman module (which is distributed with Docutils)'
hint = ('This can happen if you upgraded docutils using\n'
'easy_install without uninstalling the old version'
- 'first.')
+ 'first.\n')
else:
whichmod += ' module'
- print >>sys.stderr, ('Error: The %s cannot be found. '
- 'Did you install Sphinx and its dependencies '
- 'correctly?' % whichmod)
+ sys.stderr.write('Error: The %s cannot be found. '
+ 'Did you install Sphinx and its dependencies '
+ 'correctly?\n' % whichmod)
if hint:
- print >> sys.stderr, hint
+ sys.stderr.write(hint)
return 1
raise
return cmdline.main(argv)
diff --git a/sphinx/application.py b/sphinx/application.py
index 97778d3fb..b3d2aebc4 100644
--- a/sphinx/application.py
+++ b/sphinx/application.py
@@ -37,9 +37,6 @@ from sphinx.util.osutil import ENOENT
from sphinx.util.console import bold
-# Directive is either new-style or old-style
-clstypes = (type, types.ClassType)
-
# List of all known core events. Maps name to arguments description.
events = {
'builder-inited': '',
diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py
index 8d96c146c..5a7d49cd0 100644
--- a/sphinx/builders/html.py
+++ b/sphinx/builders/html.py
@@ -35,7 +35,7 @@ from sphinx.util.osutil import SEP, os_path, relative_uri, ensuredir, \
movefile, ustrftime, copyfile
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.matching import patmatch, compile_matchers
-from sphinx.util.pycompat import any
+from sphinx.util.pycompat import any, b
from sphinx.errors import SphinxError
from sphinx.locale import _
from sphinx.search import js_index
@@ -63,6 +63,7 @@ class StandaloneHTMLBuilder(Builder):
out_suffix = '.html'
link_suffix = '.html' # defaults to matching out_suffix
indexer_format = js_index
+ indexer_dumps_unicode = True
supported_image_types = ['image/svg+xml', 'image/png',
'image/gif', 'image/jpeg']
searchindex_filename = 'searchindex.js'
@@ -146,8 +147,9 @@ class StandaloneHTMLBuilder(Builder):
cfgdict = dict((name, self.config[name])
for (name, desc) in self.config.values.iteritems()
if desc[1] == 'html')
- self.config_hash = md5(str(cfgdict)).hexdigest()
- self.tags_hash = md5(str(sorted(self.tags))).hexdigest()
+ self.config_hash = md5(unicode(cfgdict).encode('utf-8')).hexdigest()
+ self.tags_hash = md5(unicode(sorted(self.tags)).encode('utf-8')) \
+ .hexdigest()
old_config_hash = old_tags_hash = ''
try:
fp = open(path.join(self.outdir, '.buildinfo'))
@@ -199,7 +201,7 @@ class StandaloneHTMLBuilder(Builder):
"""Utility: Render a lone doctree node."""
if node is None:
return {'fragment': ''}
- doc = new_document('')
+ doc = new_document(b(''))
doc.append(node)
if self._publisher is None:
@@ -727,10 +729,12 @@ class StandaloneHTMLBuilder(Builder):
self.info(bold('dumping object inventory... '), nonl=True)
f = open(path.join(self.outdir, INVENTORY_FILENAME), 'wb')
try:
- f.write('# Sphinx inventory version 2\n')
- f.write('# Project: %s\n' % self.config.project.encode('utf-8'))
- f.write('# Version: %s\n' % self.config.version.encode('utf-8'))
- f.write('# The remainder of this file is compressed using zlib.\n')
+ f.write((u'# Sphinx inventory version 2\n'
+ u'# Project: %s\n'
+ u'# Version: %s\n'
+ u'# The remainder of this file is compressed using zlib.\n'
+ % (self.config.project, self.config.version)
+ ).encode('utf-8'))
compressor = zlib.compressobj(9)
for domainname, domain in self.env.domains.iteritems():
for name, dispname, type, docname, anchor, prio in \
@@ -742,11 +746,9 @@ class StandaloneHTMLBuilder(Builder):
if dispname == name:
dispname = u'-'
f.write(compressor.compress(
- '%s %s:%s %s %s %s\n' % (name.encode('utf-8'),
- domainname.encode('utf-8'),
- type.encode('utf-8'), prio,
- uri.encode('utf-8'),
- dispname.encode('utf-8'))))
+ (u'%s %s:%s %s %s %s\n' % (name, domainname, type,
+ prio, uri, dispname)
+ ).encode('utf-8')))
f.write(compressor.flush())
finally:
f.close()
@@ -758,7 +760,10 @@ class StandaloneHTMLBuilder(Builder):
searchindexfn = path.join(self.outdir, self.searchindex_filename)
# first write to a temporary file, so that if dumping fails,
# the existing index won't be overwritten
- f = open(searchindexfn + '.tmp', 'wb')
+ if self.indexer_dumps_unicode:
+ f = codecs.open(searchindexfn + '.tmp', 'w', encoding='utf-8')
+ else:
+ f = open(searchindexfn + '.tmp', 'wb')
try:
self.indexer.dump(f, self.indexer_format)
finally:
@@ -915,6 +920,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
#: implements a `dump`, `load`, `dumps` and `loads` functions
#: (pickle, simplejson etc.)
implementation = None
+ implementation_dumps_unicode = False
#: the filename for the global context file
globalcontext_filename = None
@@ -937,6 +943,17 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
return docname[:-5] # up to sep
return docname + SEP
+ def dump_context(self, context, filename):
+ if self.implementation_dumps_unicode:
+ f = codecs.open(filename, 'w', encoding='utf-8')
+ else:
+ f = open(filename, 'wb')
+ try:
+ # XXX: the third argument is pickle-specific!
+ self.implementation.dump(context, f, 2)
+ finally:
+ f.close()
+
def handle_page(self, pagename, ctx, templatename='page.html',
outfilename=None, event_arg=None):
ctx['current_page_name'] = pagename
@@ -950,11 +967,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
ctx, event_arg)
ensuredir(path.dirname(outfilename))
- f = open(outfilename, 'wb')
- try:
- self.implementation.dump(ctx, f, 2)
- finally:
- f.close()
+ self.dump_context(ctx, outfilename)
# if there is a source file, copy the source file for the
# "show source" link
@@ -967,11 +980,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
def handle_finish(self):
# dump the global context
outfilename = path.join(self.outdir, self.globalcontext_filename)
- f = open(outfilename, 'wb')
- try:
- self.implementation.dump(self.globalcontext, f, 2)
- finally:
- f.close()
+ self.dump_context(self.globalcontext, outfilename)
# super here to dump the search index
StandaloneHTMLBuilder.handle_finish(self)
@@ -991,7 +1000,9 @@ class PickleHTMLBuilder(SerializingHTMLBuilder):
A Builder that dumps the generated HTML into pickle files.
"""
implementation = pickle
+ implementation_dumps_unicode = False
indexer_format = pickle
+ indexer_dumps_unicode = False
name = 'pickle'
out_suffix = '.fpickle'
globalcontext_filename = 'globalcontext.pickle'
@@ -1006,7 +1017,9 @@ class JSONHTMLBuilder(SerializingHTMLBuilder):
A builder that dumps the generated HTML into JSON files.
"""
implementation = jsonimpl
+ implementation_dumps_unicode = True
indexer_format = jsonimpl
+ indexer_dumps_unicode = True
name = 'json'
out_suffix = '.fjson'
globalcontext_filename = 'globalcontext.json'
diff --git a/sphinx/builders/htmlhelp.py b/sphinx/builders/htmlhelp.py
index 538f4c848..e3a58e724 100644
--- a/sphinx/builders/htmlhelp.py
+++ b/sphinx/builders/htmlhelp.py
@@ -258,7 +258,8 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
def write_index(title, refs, subitems):
def write_param(name, value):
item = ' \n' % (name, value)
- f.write(item.encode('ascii', 'xmlcharrefreplace'))
+ f.write(item.encode('ascii', 'xmlcharrefreplace')
+ .decode('ascii'))
title = cgi.escape(title)
f.write(' \n')
write_param('Keyword', title)
diff --git a/sphinx/builders/qthelp.py b/sphinx/builders/qthelp.py
index ffc52334c..e86f19217 100644
--- a/sphinx/builders/qthelp.py
+++ b/sphinx/builders/qthelp.py
@@ -130,8 +130,16 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
for indexname, indexcls, content, collapse in self.domain_indices:
item = section_template % {'title': indexcls.localname,
'ref': '%s.html' % indexname}
- sections.append(' '*4*4 + item)
- sections = '\n'.join(sections)
+ sections.append((' ' * 4 * 4 + item).encode('utf-8'))
+ # sections may be unicode strings or byte strings, we have to make sure
+ # they are all byte strings before joining them
+ new_sections = []
+ for section in sections:
+ if isinstance(section, unicode):
+ new_sections.append(section.encode('utf-8'))
+ else:
+ new_sections.append(section)
+ sections = u'\n'.encode('utf-8').join(new_sections)
# keywords
keywords = []
@@ -230,7 +238,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
link = node['refuri']
title = escape(node.astext()).replace('"','"')
item = section_template % {'title': title, 'ref': link}
- item = ' '*4*indentlevel + item.encode('ascii', 'xmlcharrefreplace')
+ item = u' ' * 4 * indentlevel + item
parts.append(item.encode('ascii', 'xmlcharrefreplace'))
elif isinstance(node, nodes.bullet_list):
for subnode in node:
diff --git a/sphinx/config.py b/sphinx/config.py
index 12c2a04ba..6c27f85f0 100644
--- a/sphinx/config.py
+++ b/sphinx/config.py
@@ -11,13 +11,18 @@
import os
import re
+import sys
from os import path
from sphinx.errors import ConfigError
from sphinx.util.osutil import make_filename
+from sphinx.util.pycompat import bytes, b, convert_with_2to3
-nonascii_re = re.compile(r'[\x80-\xff]')
+nonascii_re = re.compile(b(r'[\x80-\xff]'))
+CONFIG_SYNTAX_ERROR = "There is a syntax error in your configuration file: %s"
+if sys.version_info >= (3, 0):
+ CONFIG_SYNTAX_ERROR += "\nDid you change the syntax from 2.x to 3.x?"
class Config(object):
"""Configuration file abstraction."""
@@ -163,12 +168,30 @@ class Config(object):
config['tags'] = tags
olddir = os.getcwd()
try:
+ # we promise to have the config dir as current dir while the
+ # config file is executed
+ os.chdir(dirname)
+ # get config source
+ f = open(config_file, 'rb')
try:
- os.chdir(dirname)
- execfile(config['__file__'], config)
+ source = f.read()
+ finally:
+ f.close()
+ try:
+ # compile to a code object, handle syntax errors
+ try:
+ code = compile(source, config_file, 'exec')
+ except SyntaxError:
+ if convert_with_2to3:
+ # maybe the file uses 2.x syntax; try to refactor to
+ # 3.x syntax using 2to3
+ source = convert_with_2to3(config_file)
+ code = compile(source, config_file, 'exec')
+ else:
+ raise
+ exec code in config
except SyntaxError, err:
- raise ConfigError('There is a syntax error in your '
- 'configuration file: ' + str(err))
+ raise ConfigError(CONFIG_SYNTAX_ERROR % err)
finally:
os.chdir(olddir)
@@ -182,10 +205,11 @@ class Config(object):
# check all string values for non-ASCII characters in bytestrings,
# since that can result in UnicodeErrors all over the place
for name, value in self._raw_config.iteritems():
- if isinstance(value, str) and nonascii_re.search(value):
+ if isinstance(value, bytes) and nonascii_re.search(value):
warn('the config value %r is set to a string with non-ASCII '
'characters; this can lead to Unicode errors occurring. '
- 'Please use Unicode strings, e.g. u"Content".' % name)
+ 'Please use Unicode strings, e.g. %r.' % (name, u'Content')
+ )
def init_values(self):
config = self._raw_config
diff --git a/sphinx/directives/__init__.py b/sphinx/directives/__init__.py
index 6c03b8e5f..48c44178d 100644
--- a/sphinx/directives/__init__.py
+++ b/sphinx/directives/__init__.py
@@ -32,6 +32,7 @@ except AttributeError:
# RE to strip backslash escapes
+nl_escape_re = re.compile(r'\\\n')
strip_backslash_re = re.compile(r'\\(?=[^\\])')
@@ -57,10 +58,12 @@ class ObjectDescription(Directive):
"""
Retrieve the signatures to document from the directive arguments. By
default, signatures are given as arguments, one per line.
+
+ Backslash-escaping of newlines is supported.
"""
+ lines = nl_escape_re.sub('', self.arguments[0]).split('\n')
# remove backslashes to support (dummy) escapes; helps Vim highlighting
- return [strip_backslash_re.sub('', sig.strip())
- for sig in self.arguments[0].split('\n')]
+ return [strip_backslash_re.sub('', line.strip()) for line in lines]
def handle_signature(self, sig, signode):
"""
diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py
index 0647daf09..1808cdaba 100644
--- a/sphinx/directives/code.py
+++ b/sphinx/directives/code.py
@@ -119,7 +119,7 @@ class LiteralInclude(Directive):
encoding = self.options.get('encoding', env.config.source_encoding)
codec_info = codecs.lookup(encoding)
try:
- f = codecs.StreamReaderWriter(open(fn, 'U'),
+ f = codecs.StreamReaderWriter(open(fn, 'rb'),
codec_info[2], codec_info[3], 'strict')
lines = f.readlines()
f.close()
diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py
index 4dac89253..8df89459b 100644
--- a/sphinx/domains/cpp.py
+++ b/sphinx/domains/cpp.py
@@ -110,7 +110,7 @@ class DefinitionError(Exception):
return self.description
def __str__(self):
- return unicode(self.encode('utf-8'))
+ return unicode(self).encode('utf-8')
class DefExpr(object):
@@ -132,6 +132,8 @@ class DefExpr(object):
def __ne__(self, other):
return not self.__eq__(other)
+ __hash__ = None
+
def clone(self):
"""Close a definition expression node"""
return deepcopy(self)
diff --git a/sphinx/domains/python.py b/sphinx/domains/python.py
index fc8086995..cd87bfbda 100644
--- a/sphinx/domains/python.py
+++ b/sphinx/domains/python.py
@@ -356,6 +356,9 @@ class PyModule(Directive):
env.domaindata['py']['modules'][modname] = \
(env.docname, self.options.get('synopsis', ''),
self.options.get('platform', ''), 'deprecated' in self.options)
+ # make a duplicate entry in 'objects' to facilitate searching for the
+ # module in PythonDomain.find_obj()
+ env.domaindata['py']['objects'][modname] = (env.docname, 'module')
targetnode = nodes.target('', '', ids=['module-' + modname], ismod=True)
self.state.document.note_explicit_target(targetnode)
ret = [targetnode]
@@ -544,7 +547,7 @@ class PythonDomain(Domain):
if fn == docname:
del self.data['modules'][modname]
- def find_obj(self, env, modname, classname, name, type, searchorder=0):
+ def find_obj(self, env, modname, classname, name, type, searchmode=0):
"""
Find a Python object for "name", perhaps using the given module and/or
classname. Returns a list of (name, object entry) tuples.
@@ -560,22 +563,31 @@ class PythonDomain(Domain):
matches = []
newname = None
- if searchorder == 1:
- if modname and classname and \
- modname + '.' + classname + '.' + name in objects:
- newname = modname + '.' + classname + '.' + name
- elif modname and modname + '.' + name in objects:
- newname = modname + '.' + name
- elif name in objects:
- newname = name
- else:
- # "fuzzy" searching mode
- searchname = '.' + name
- matches = [(name, objects[name]) for name in objects
- if name.endswith(searchname)]
+ if searchmode == 1:
+ objtypes = self.objtypes_for_role(type)
+ if modname and classname:
+ fullname = modname + '.' + classname + '.' + name
+ if fullname in objects and objects[fullname][1] in objtypes:
+ newname = fullname
+ if not newname:
+ if modname and modname + '.' + name in objects and \
+ objects[modname + '.' + name][1] in objtypes:
+ newname = modname + '.' + name
+ elif name in objects and objects[name][1] in objtypes:
+ newname = name
+ else:
+ # "fuzzy" searching mode
+ searchname = '.' + name
+ matches = [(name, objects[name]) for name in objects
+ if name.endswith(searchname)
+ and objects[name][1] in objtypes]
else:
+ # NOTE: searching for exact match, object type is not considered
if name in objects:
newname = name
+ elif type == 'mod':
+ # only exact matches allowed for modules
+ return []
elif classname and classname + '.' + name in objects:
newname = classname + '.' + name
elif modname and modname + '.' + name in objects:
@@ -597,33 +609,35 @@ class PythonDomain(Domain):
def resolve_xref(self, env, fromdocname, builder,
type, target, node, contnode):
- if (type == 'mod' or
- type == 'obj' and target in self.data['modules']):
- docname, synopsis, platform, deprecated = \
- self.data['modules'].get(target, ('','','', ''))
- if not docname:
- return None
- else:
- title = '%s%s%s' % ((platform and '(%s) ' % platform),
- synopsis,
- (deprecated and ' (deprecated)' or ''))
- return make_refnode(builder, fromdocname, docname,
- 'module-' + target, contnode, title)
+ modname = node.get('py:module')
+ clsname = node.get('py:class')
+ searchmode = node.hasattr('refspecific') and 1 or 0
+ matches = self.find_obj(env, modname, clsname, target,
+ type, searchmode)
+ if not matches:
+ return None
+ elif len(matches) > 1:
+ env.warn(fromdocname,
+ 'more than one target found for cross-reference '
+ '%r: %s' % (target,
+ ', '.join(match[0] for match in matches)),
+ node.line)
+ name, obj = matches[0]
+
+ if obj[1] == 'module':
+ # get additional info for modules
+ docname, synopsis, platform, deprecated = self.data['modules'][name]
+ assert docname == obj[0]
+ title = name
+ if synopsis:
+ title += ': ' + synopsis
+ if deprecated:
+ title += _(' (deprecated)')
+ if platform:
+ title += ' (' + platform + ')'
+ return make_refnode(builder, fromdocname, docname,
+ 'module-' + name, contnode, title)
else:
- modname = node.get('py:module')
- clsname = node.get('py:class')
- searchorder = node.hasattr('refspecific') and 1 or 0
- matches = self.find_obj(env, modname, clsname, target,
- type, searchorder)
- if not matches:
- return None
- elif len(matches) > 1:
- env.warn(fromdocname,
- 'more than one target found for cross-reference '
- '%r: %s' % (target,
- ', '.join(match[0] for match in matches)),
- node.line)
- name, obj = matches[0]
return make_refnode(builder, fromdocname, obj[0], name,
contnode, name)
diff --git a/sphinx/domains/rst.py b/sphinx/domains/rst.py
index 61809f330..d3ffc6bdc 100644
--- a/sphinx/domains/rst.py
+++ b/sphinx/domains/rst.py
@@ -28,7 +28,7 @@ class ReSTMarkup(ObjectDescription):
"""
def add_target_and_index(self, name, sig, signode):
- targetname = name + '-' + self.objtype
+ targetname = self.objtype + '-' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
@@ -130,8 +130,9 @@ class ReSTDomain(Domain):
if (objtype, target) in objects:
return make_refnode(builder, fromdocname,
objects[objtype, target],
- target, contnode, target)
+ objtype + '-' + target,
+ contnode, target + ' ' + objtype)
def get_objects(self):
for (typ, name), docname in self.data['objects'].iteritems():
- yield name, name, typ, docname, name, 1
+ yield name, name, typ, docname, typ + '-' + name, 1
diff --git a/sphinx/environment.py b/sphinx/environment.py
index 5edcb4d90..a051d6361 100644
--- a/sphinx/environment.py
+++ b/sphinx/environment.py
@@ -11,6 +11,7 @@
import re
import os
+import sys
import time
import types
import codecs
@@ -39,10 +40,11 @@ from sphinx.util import url_re, get_matching_docs, docname_join, \
from sphinx.util.nodes import clean_astext, make_refnode
from sphinx.util.osutil import movefile, SEP, ustrftime
from sphinx.util.matching import compile_matchers
-from sphinx.util.pycompat import all
+from sphinx.util.pycompat import all, class_types
from sphinx.errors import SphinxError, ExtensionError
from sphinx.locale import _
+fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
orig_role_function = roles.role
orig_directive_function = directives.directive
@@ -63,7 +65,7 @@ default_settings = {
# This is increased every time an environment attribute is added
# or changed to properly invalidate pickle files.
-ENV_VERSION = 37
+ENV_VERSION = 38
default_substitutions = set([
@@ -80,7 +82,7 @@ class WarningStream(object):
self.warnfunc = warnfunc
def write(self, text):
if text.strip():
- self.warnfunc(text, None, '')
+ self.warnfunc(text.strip(), None, '')
class NoUri(Exception):
@@ -251,7 +253,7 @@ class BuildEnvironment:
if key.startswith('_') or \
isinstance(val, types.ModuleType) or \
isinstance(val, types.FunctionType) or \
- isinstance(val, (type, types.ClassType)):
+ isinstance(val, class_types):
del self.config[key]
try:
pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL)
@@ -383,14 +385,14 @@ class BuildEnvironment:
If base is a path string, return absolute path under that.
If suffix is not None, add it instead of config.source_suffix.
"""
+ docname = docname.replace(SEP, path.sep)
suffix = suffix or self.config.source_suffix
if base is True:
- return path.join(self.srcdir,
- docname.replace(SEP, path.sep)) + suffix
+ return path.join(self.srcdir, docname) + suffix
elif base is None:
- return docname.replace(SEP, path.sep) + suffix
+ return docname + suffix
else:
- return path.join(base, docname.replace(SEP, path.sep)) + suffix
+ return path.join(base, docname) + suffix
def find_files(self, config):
"""
@@ -628,6 +630,8 @@ class BuildEnvironment:
class SphinxSourceClass(FileInput):
def decode(self_, data):
+ if isinstance(data, unicode):
+ return data
return data.decode(self_.encoding, 'sphinx')
def read(self_):
@@ -649,7 +653,7 @@ class BuildEnvironment:
destination_class=NullOutput)
pub.set_components(None, 'restructuredtext', None)
pub.process_programmatic_settings(None, self.settings, None)
- pub.set_source(None, src_path)
+ pub.set_source(None, src_path.encode(fs_encoding))
pub.set_destination(None, None)
try:
pub.publish()
@@ -1488,8 +1492,9 @@ class BuildEnvironment:
i += 1
# group the entries by letter
- def keyfunc2((k, v), letters=string.ascii_uppercase + '_'):
+ def keyfunc2(item, letters=string.ascii_uppercase + '_'):
# hack: mutating the subitems dicts to a list in the keyfunc
+ k, v = item
v[1] = sorted((si, se) for (si, (se, void)) in v[1].iteritems())
# now calculate the key
letter = k[0].upper()
diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py
index adf08bcde..eef181de4 100644
--- a/sphinx/ext/autodoc.py
+++ b/sphinx/ext/autodoc.py
@@ -14,7 +14,7 @@
import re
import sys
import inspect
-from types import FunctionType, BuiltinFunctionType, MethodType, ClassType
+from types import FunctionType, BuiltinFunctionType, MethodType
from docutils import nodes
from docutils.utils import assemble_option_dict
@@ -27,15 +27,10 @@ from sphinx.application import ExtensionError
from sphinx.util.nodes import nested_parse_with_titles
from sphinx.util.compat import Directive
from sphinx.util.inspect import isdescriptor, safe_getmembers, safe_getattr
+from sphinx.util.pycompat import base_exception, class_types
from sphinx.util.docstrings import prepare_docstring
-try:
- base_exception = BaseException
-except NameError:
- base_exception = Exception
-
-
#: extended signature RE: with explicit module name separated by ::
py_ext_sig_re = re.compile(
r'''^ ([\w.]+::)? # explicit module name
@@ -256,6 +251,9 @@ class Documenter(object):
self.retann = None
# the object to document (set after import_object succeeds)
self.object = None
+ self.object_name = None
+ # the parent/owner of the object to document
+ self.parent = None
# the module analyzer to get at attribute docs, or None
self.analyzer = None
@@ -321,9 +319,13 @@ class Documenter(object):
"""
try:
__import__(self.modname)
+ parent = None
obj = self.module = sys.modules[self.modname]
for part in self.objpath:
+ parent = obj
obj = self.get_attr(obj, part)
+ self.object_name = part
+ self.parent = parent
self.object = obj
return True
# this used to only catch SyntaxError, ImportError and AttributeError,
@@ -416,9 +418,11 @@ class Documenter(object):
def get_doc(self, encoding=None):
"""Decode and return lines of the docstring(s) for the object."""
docstring = self.get_attr(self.object, '__doc__', None)
- if docstring:
- # make sure we have Unicode docstrings, then sanitize and split
- # into lines
+ # make sure we have Unicode docstrings, then sanitize and split
+ # into lines
+ if isinstance(docstring, unicode):
+ return [prepare_docstring(docstring)]
+ elif docstring:
return [prepare_docstring(force_decode(docstring, encoding))]
return []
@@ -438,8 +442,11 @@ class Documenter(object):
# set sourcename and add content from attribute documentation
if self.analyzer:
# prevent encoding errors when the file name is non-ASCII
- filename = unicode(self.analyzer.srcname,
- sys.getfilesystemencoding(), 'replace')
+ if not isinstance(self.analyzer.srcname, unicode):
+ filename = unicode(self.analyzer.srcname,
+ sys.getfilesystemencoding(), 'replace')
+ else:
+ filename = self.analyzer.srcname
sourcename = u'%s:docstring of %s' % (filename, self.fullname)
attr_docs = self.analyzer.find_attr_docs()
@@ -866,7 +873,7 @@ class ClassDocumenter(ModuleLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- return isinstance(member, (type, ClassType))
+ return isinstance(member, class_types)
def import_object(self):
ret = ModuleLevelDocumenter.import_object(self)
@@ -939,9 +946,12 @@ class ClassDocumenter(ModuleLevelDocumenter):
docstrings = [initdocstring]
else:
docstrings.append(initdocstring)
-
- return [prepare_docstring(force_decode(docstring, encoding))
- for docstring in docstrings]
+ doc = []
+ for docstring in docstrings:
+ if not isinstance(docstring, unicode):
+ docstring = force_decode(docstring, encoding)
+ doc.append(prepare_docstring(docstring))
+ return doc
def add_content(self, more_content, no_docstring=False):
if self.doc_as_attr:
@@ -972,7 +982,7 @@ class ExceptionDocumenter(ClassDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- return isinstance(member, (type, ClassType)) and \
+ return isinstance(member, class_types) and \
issubclass(member, base_exception)
@@ -1004,24 +1014,38 @@ class MethodDocumenter(ClassLevelDocumenter):
return inspect.isroutine(member) and \
not isinstance(parent, ModuleDocumenter)
- def import_object(self):
- ret = ClassLevelDocumenter.import_object(self)
- if isinstance(self.object, classmethod) or \
- (isinstance(self.object, MethodType) and
- self.object.im_self is not None):
- self.directivetype = 'classmethod'
- # document class and static members before ordinary ones
- self.member_order = self.member_order - 1
- elif isinstance(self.object, FunctionType) or \
- (isinstance(self.object, BuiltinFunctionType) and
- hasattr(self.object, '__self__') and
- self.object.__self__ is not None):
- self.directivetype = 'staticmethod'
- # document class and static members before ordinary ones
- self.member_order = self.member_order - 1
- else:
- self.directivetype = 'method'
- return ret
+ if sys.version_info >= (3, 0):
+ def import_object(self):
+ ret = ClassLevelDocumenter.import_object(self)
+ obj_from_parent = self.parent.__dict__.get(self.object_name)
+ if isinstance(obj_from_parent, classmethod):
+ self.directivetype = 'classmethod'
+ self.member_order = self.member_order - 1
+ elif isinstance(obj_from_parent, staticmethod):
+ self.directivetype = 'staticmethod'
+ self.member_order = self.member_order - 1
+ else:
+ self.directivetype = 'method'
+ return ret
+ else:
+ def import_object(self):
+ ret = ClassLevelDocumenter.import_object(self)
+ if isinstance(self.object, classmethod) or \
+ (isinstance(self.object, MethodType) and
+ self.object.im_self is not None):
+ self.directivetype = 'classmethod'
+ # document class and static members before ordinary ones
+ self.member_order = self.member_order - 1
+ elif isinstance(self.object, FunctionType) or \
+ (isinstance(self.object, BuiltinFunctionType) and
+ hasattr(self.object, '__self__') and
+ self.object.__self__ is not None):
+ self.directivetype = 'staticmethod'
+ # document class and static members before ordinary ones
+ self.member_order = self.member_order - 1
+ else:
+ self.directivetype = 'method'
+ return ret
def format_args(self):
if inspect.isbuiltin(self.object) or \
diff --git a/sphinx/ext/coverage.py b/sphinx/ext/coverage.py
index 4924d30b0..f41820e2a 100644
--- a/sphinx/ext/coverage.py
+++ b/sphinx/ext/coverage.py
@@ -173,8 +173,11 @@ class CoverageBuilder(Builder):
attrs = []
+ for attr_name in dir(obj):
+ attr = getattr(obj, attr_name)
for attr_name, attr in inspect.getmembers(
- obj, inspect.ismethod):
+ obj, lambda x: inspect.ismethod(x) or \
+ inspect.isfunction(x)):
if attr_name[0] == '_':
# starts with an underscore, ignore it
continue
diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py
index 9d681f904..62fbfdff4 100644
--- a/sphinx/ext/doctest.py
+++ b/sphinx/ext/doctest.py
@@ -149,14 +149,14 @@ class TestCode(object):
class SphinxDocTestRunner(doctest.DocTestRunner):
def summarize(self, out, verbose=None):
- io = StringIO.StringIO()
+ string_io = StringIO.StringIO()
old_stdout = sys.stdout
- sys.stdout = io
+ sys.stdout = string_io
try:
res = doctest.DocTestRunner.summarize(self, verbose)
finally:
sys.stdout = old_stdout
- out(io.getvalue())
+ out(string_io.getvalue())
return res
def _DocTestRunner__patched_linecache_getlines(self, filename,
diff --git a/sphinx/ext/inheritance_diagram.py b/sphinx/ext/inheritance_diagram.py
index b930d8cab..a12bad256 100644
--- a/sphinx/ext/inheritance_diagram.py
+++ b/sphinx/ext/inheritance_diagram.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-"""
+r"""
sphinx.ext.inheritance_diagram
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py
index 0a210879a..251f5c55d 100644
--- a/sphinx/ext/intersphinx.py
+++ b/sphinx/ext/intersphinx.py
@@ -26,6 +26,7 @@
import time
import zlib
+import codecs
import urllib2
import posixpath
from os import path
@@ -33,19 +34,26 @@ from os import path
from docutils import nodes
from sphinx.builders.html import INVENTORY_FILENAME
+from sphinx.util.pycompat import b
+
handlers = [urllib2.ProxyHandler(), urllib2.HTTPRedirectHandler(),
urllib2.HTTPHandler()]
-if hasattr(urllib2, 'HTTPSHandler'):
+try:
handlers.append(urllib2.HTTPSHandler)
+except NameError:
+ pass
urllib2.install_opener(urllib2.build_opener(*handlers))
+UTF8StreamReader = codecs.lookup('utf-8')[2]
+
def read_inventory_v1(f, uri, join):
+ f = UTF8StreamReader(f)
invdata = {}
line = f.next()
- projname = line.rstrip()[11:].decode('utf-8')
+ projname = line.rstrip()[11:]
line = f.next()
version = line.rstrip()[11:]
for line in f:
@@ -68,25 +76,25 @@ def read_inventory_v2(f, uri, join, bufsize=16*1024):
projname = line.rstrip()[11:].decode('utf-8')
line = f.readline()
version = line.rstrip()[11:].decode('utf-8')
- line = f.readline()
+ line = f.readline().decode('utf-8')
if 'zlib' not in line:
raise ValueError
def read_chunks():
decompressor = zlib.decompressobj()
- for chunk in iter(lambda: f.read(bufsize), ''):
+ for chunk in iter(lambda: f.read(bufsize), b('')):
yield decompressor.decompress(chunk)
yield decompressor.flush()
def split_lines(iter):
- buf = ''
+ buf = b('')
for chunk in iter:
buf += chunk
- lineend = buf.find('\n')
+ lineend = buf.find(b('\n'))
while lineend != -1:
yield buf[:lineend].decode('utf-8')
buf = buf[lineend+1:]
- lineend = buf.find('\n')
+ lineend = buf.find(b('\n'))
assert not buf
for line in split_lines(read_chunks()):
@@ -109,13 +117,13 @@ def fetch_inventory(app, uri, inv):
if inv.find('://') != -1:
f = urllib2.urlopen(inv)
else:
- f = open(path.join(app.srcdir, inv))
+ f = open(path.join(app.srcdir, inv), 'rb')
except Exception, err:
app.warn('intersphinx inventory %r not fetchable due to '
'%s: %s' % (inv, err.__class__, err))
return
try:
- line = f.readline().rstrip()
+ line = f.readline().rstrip().decode('utf-8')
try:
if line == '# Sphinx inventory version 1':
invdata = read_inventory_v1(f, uri, join)
@@ -205,9 +213,20 @@ def missing_reference(app, env, node, contnode):
proj, version, uri, dispname = inventory[objtype][target]
newnode = nodes.reference('', '', internal=False, refuri=uri,
reftitle='(in %s v%s)' % (proj, version))
- if dispname == '-':
- dispname = target
- newnode.append(contnode.__class__(dispname, dispname))
+ if node.get('refexplicit'):
+ # use whatever title was given
+ newnode.append(contnode)
+ elif dispname == '-':
+ # use whatever title was given, but strip prefix
+ title = contnode.astext()
+ if in_set and title.startswith(in_set+':'):
+ newnode.append(contnode.__class__(title[len(in_set)+1:],
+ title[len(in_set)+1:]))
+ else:
+ newnode.append(contnode)
+ else:
+ # else use the given display name (used for :ref:)
+ newnode.append(contnode.__class__(dispname, dispname))
return newnode
# at least get rid of the ':' in the target if no explicit title given
if in_set is not None and not node.get('refexplicit', True):
diff --git a/sphinx/ext/oldcmarkup.py b/sphinx/ext/oldcmarkup.py
index 84ae61dd2..571d82a51 100644
--- a/sphinx/ext/oldcmarkup.py
+++ b/sphinx/ext/oldcmarkup.py
@@ -31,6 +31,7 @@ class OldCDirective(Directive):
def run(self):
env = self.state.document.settings.env
if not env.app._oldcmarkup_warned:
+ print 'XXXYYY'
env.warn(env.docname, WARNING_MSG, self.lineno)
env.app._oldcmarkup_warned = True
newname = 'c:' + self.name[1:]
@@ -42,6 +43,8 @@ class OldCDirective(Directive):
def old_crole(typ, rawtext, text, lineno, inliner, options={}, content=[]):
env = inliner.document.settings.env
+ if not typ:
+ typ = env.config.default_role
if not env.app._oldcmarkup_warned:
env.warn(env.docname, WARNING_MSG)
env.app._oldcmarkup_warned = True
diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py
index 81881beb6..db04ac794 100644
--- a/sphinx/ext/viewcode.py
+++ b/sphinx/ext/viewcode.py
@@ -31,7 +31,11 @@ def doctree_read(app, doctree):
env._viewcode_modules[modname] = False
return
analyzer.find_tags()
- entry = analyzer.code.decode(analyzer.encoding), analyzer.tags, {}
+ if not isinstance(analyzer.code, unicode):
+ code = analyzer.code.decode(analyzer.encoding)
+ else:
+ code = analyzer.code
+ entry = code, analyzer.tags, {}
env._viewcode_modules[modname] = entry
elif entry is False:
return
diff --git a/sphinx/highlighting.py b/sphinx/highlighting.py
index f5ea859cb..6d7109199 100644
--- a/sphinx/highlighting.py
+++ b/sphinx/highlighting.py
@@ -156,7 +156,7 @@ class PygmentsBridge(object):
if sys.version_info >= (2, 5):
src = 'from __future__ import with_statement\n' + src
- if isinstance(src, unicode):
+ if sys.version_info < (3, 0) and isinstance(src, unicode):
# Non-ASCII chars will only occur in string literals
# and comments. If we wanted to give them to the parser
# correctly, we'd have to find out the correct source
@@ -175,7 +175,7 @@ class PygmentsBridge(object):
return True
def highlight_block(self, source, lang, linenos=False, warn=None):
- if isinstance(source, str):
+ if not isinstance(source, unicode):
source = source.decode()
if not pygments:
return self.unhighlighted(source)
@@ -240,7 +240,7 @@ class PygmentsBridge(object):
# no HTML styles needed
return ''
if self.dest == 'html':
- return self.fmter[0].get_style_defs()
+ return self.fmter[0].get_style_defs('.highlight')
else:
styledefs = self.fmter[0].get_style_defs()
# workaround for Pygments < 0.12
diff --git a/sphinx/locale/__init__.py b/sphinx/locale/__init__.py
index b0b89720c..02958457b 100644
--- a/sphinx/locale/__init__.py
+++ b/sphinx/locale/__init__.py
@@ -8,6 +8,8 @@
:copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+
+import sys
import gettext
import UserString
@@ -178,8 +180,12 @@ pairindextypes = {
translator = None
-def _(message):
- return translator.ugettext(message)
+if sys.version_info >= (3, 0):
+ def _(message):
+ return translator.gettext(message)
+else:
+ def _(message):
+ return translator.ugettext(message)
def init(locale_dirs, language):
global translator
diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py
index b8e2fded2..ef92297c7 100644
--- a/sphinx/pycode/__init__.py
+++ b/sphinx/pycode/__init__.py
@@ -18,6 +18,7 @@ from sphinx.errors import PycodeError
from sphinx.pycode import nodes
from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals
from sphinx.util import get_module_source
+from sphinx.util.pycompat import next
from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc
@@ -98,7 +99,8 @@ class AttrDocVisitor(nodes.NodeVisitor):
if not pnode or pnode.type not in (token.INDENT, token.DEDENT):
break
prefix = pnode.get_prefix()
- prefix = prefix.decode(self.encoding)
+ if not isinstance(prefix, unicode):
+ prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
self.add_docstring(node, docstring)
@@ -278,7 +280,7 @@ class ModuleAnalyzer(object):
result[fullname] = (dtype, startline, endline)
expect_indent = False
if tok in ('def', 'class'):
- name = tokeniter.next()[1]
+ name = next(tokeniter)[1]
namespace.append(name)
fullname = '.'.join(namespace)
stack.append((tok, fullname, spos[0], indent))
diff --git a/sphinx/pycode/nodes.py b/sphinx/pycode/nodes.py
index e71846779..fc6eb93aa 100644
--- a/sphinx/pycode/nodes.py
+++ b/sphinx/pycode/nodes.py
@@ -29,6 +29,8 @@ class BaseNode(object):
return NotImplemented
return not self._eq(other)
+ __hash__ = None
+
def get_prev_sibling(self):
"""Return previous child in parent's children, or None."""
if self.parent is None:
diff --git a/sphinx/pycode/pgen2/literals.py b/sphinx/pycode/pgen2/literals.py
index 319002910..d48937028 100644
--- a/sphinx/pycode/pgen2/literals.py
+++ b/sphinx/pycode/pgen2/literals.py
@@ -66,7 +66,7 @@ uni_escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}|"
def evalString(s, encoding=None):
regex = escape_re
repl = escape
- if encoding:
+ if encoding and not isinstance(s, unicode):
s = s.decode(encoding)
if s.startswith('u') or s.startswith('U'):
regex = uni_escape_re
diff --git a/sphinx/pycode/pgen2/tokenize.py b/sphinx/pycode/pgen2/tokenize.py
index 4489db898..7ad9f012c 100644
--- a/sphinx/pycode/pgen2/tokenize.py
+++ b/sphinx/pycode/pgen2/tokenize.py
@@ -143,7 +143,9 @@ class TokenError(Exception): pass
class StopTokenizing(Exception): pass
-def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
+def printtoken(type, token, scell, ecell, line): # for testing
+ srow, scol = scell
+ erow, ecol = ecell
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
diff --git a/sphinx/quickstart.py b/sphinx/quickstart.py
index 884caca75..7e38a742a 100644
--- a/sphinx/quickstart.py
+++ b/sphinx/quickstart.py
@@ -9,8 +9,9 @@
:license: BSD, see LICENSE for details.
"""
-import sys, os, time
+import sys, os, time, re
from os import path
+from codecs import open
TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
@@ -20,10 +21,23 @@ from sphinx.util.console import purple, bold, red, turquoise, \
nocolor, color_terminal
from sphinx.util import texescape
+# function to get input from terminal -- overridden by the test suite
+try:
+ # this raw_input is not converted by 2to3
+ term_input = raw_input
+except NameError:
+ term_input = input
+
PROMPT_PREFIX = '> '
-QUICKSTART_CONF = '''\
+if sys.version_info >= (3, 0):
+ # prevents that the file is checked for being written in Python 2.x syntax
+ QUICKSTART_CONF = '#!/usr/bin/env python3\n'
+else:
+ QUICKSTART_CONF = ''
+
+QUICKSTART_CONF += '''\
# -*- coding: utf-8 -*-
#
# %(project)s documentation build configuration file, created by
@@ -656,20 +670,22 @@ def do_prompt(d, key, text, default=None, validator=nonempty):
prompt = purple(PROMPT_PREFIX + '%s [%s]: ' % (text, default))
else:
prompt = purple(PROMPT_PREFIX + text + ': ')
- x = raw_input(prompt)
+ x = term_input(prompt)
if default and not x:
x = default
- if x.decode('ascii', 'replace').encode('ascii', 'replace') != x:
- if TERM_ENCODING:
- x = x.decode(TERM_ENCODING)
- else:
- print turquoise('* Note: non-ASCII characters entered '
- 'and terminal encoding unknown -- assuming '
- 'UTF-8 or Latin-1.')
- try:
- x = x.decode('utf-8')
- except UnicodeDecodeError:
- x = x.decode('latin1')
+ if not isinstance(x, unicode):
+ # for Python 2.x, try to get a Unicode string out of it
+ if x.decode('ascii', 'replace').encode('ascii', 'replace') != x:
+ if TERM_ENCODING:
+ x = x.decode(TERM_ENCODING)
+ else:
+ print turquoise('* Note: non-ASCII characters entered '
+ 'and terminal encoding unknown -- assuming '
+ 'UTF-8 or Latin-1.')
+ try:
+ x = x.decode('utf-8')
+ except UnicodeDecodeError:
+ x = x.decode('latin1')
try:
x = validator(x)
except ValidationError, err:
@@ -679,6 +695,18 @@ def do_prompt(d, key, text, default=None, validator=nonempty):
d[key] = x
+if sys.version_info >= (3, 0):
+ # remove Unicode literal prefixes
+ _unicode_string_re = re.compile(r"[uU]('.*?')")
+ def _convert_python_source(source):
+ return _unicode_string_re.sub('\\1', source)
+
+ for f in ['QUICKSTART_CONF', 'EPUB_CONFIG', 'INTERSPHINX_CONFIG']:
+ globals()[f] = _convert_python_source(globals()[f])
+
+ del _unicode_string_re, _convert_python_source
+
+
def inner_main(args):
d = {}
texescape.init()
@@ -834,28 +862,28 @@ directly.'''
if d['ext_intersphinx']:
conf_text += INTERSPHINX_CONFIG
- f = open(path.join(srcdir, 'conf.py'), 'w')
- f.write(conf_text.encode('utf-8'))
+ f = open(path.join(srcdir, 'conf.py'), 'w', encoding='utf-8')
+ f.write(conf_text)
f.close()
masterfile = path.join(srcdir, d['master'] + d['suffix'])
- f = open(masterfile, 'w')
- f.write((MASTER_FILE % d).encode('utf-8'))
+ f = open(masterfile, 'w', encoding='utf-8')
+ f.write(MASTER_FILE % d)
f.close()
if d['makefile']:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
# use binary mode, to avoid writing \r\n on Windows
- f = open(path.join(d['path'], 'Makefile'), 'wb')
- f.write((MAKEFILE % d).encode('utf-8'))
+ f = open(path.join(d['path'], 'Makefile'), 'wb', encoding='utf-8')
+ f.write(MAKEFILE % d)
f.close()
if d['batchfile']:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
- f = open(path.join(d['path'], 'make.bat'), 'w')
- f.write((BATCHFILE % d).encode('utf-8'))
+ f = open(path.join(d['path'], 'make.bat'), 'w', encoding='utf-8')
+ f.write(BATCHFILE % d)
f.close()
print
diff --git a/sphinx/roles.py b/sphinx/roles.py
index d3f3c67e3..0ea0ec485 100644
--- a/sphinx/roles.py
+++ b/sphinx/roles.py
@@ -105,9 +105,9 @@ class XRefRole(object):
classes = ['xref', domain, '%s-%s' % (domain, role)]
# if the first character is a bang, don't cross-reference at all
if text[0:1] == '!':
- text = utils.unescape(text)
+ text = utils.unescape(text)[1:]
if self.fix_parens:
- text, tgt = self._fix_parens(env, False, text[1:], "")
+ text, tgt = self._fix_parens(env, False, text, "")
innernode = self.innernodeclass(rawtext, text, classes=classes)
return self.result_nodes(inliner.document, env, innernode,
is_ref=False)
@@ -173,6 +173,10 @@ def indexmarkup_role(typ, rawtext, etext, lineno, inliner,
indexnode['entries'] = [
('single', _('Python Enhancement Proposals!PEP %s') % text,
targetid, 'PEP %s' % text)]
+ anchor = ''
+ anchorindex = text.find('#')
+ if anchorindex > 0:
+ text, anchor = text[:anchorindex], text[anchorindex:]
try:
pepnum = int(text)
except ValueError:
@@ -182,12 +186,17 @@ def indexmarkup_role(typ, rawtext, etext, lineno, inliner,
return [prb], [msg]
ref = inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum
sn = nodes.strong('PEP '+text, 'PEP '+text)
- rn = nodes.reference('', '', internal=False, refuri=ref, classes=[typ])
+ rn = nodes.reference('', '', internal=False, refuri=ref+anchor,
+ classes=[typ])
rn += sn
return [indexnode, targetnode, rn], []
elif typ == 'rfc':
indexnode['entries'] = [('single', 'RFC; RFC %s' % text,
targetid, 'RFC %s' % text)]
+ anchor = ''
+ anchorindex = text.find('#')
+ if anchorindex > 0:
+ text, anchor = text[:anchorindex], text[anchorindex:]
try:
rfcnum = int(text)
except ValueError:
@@ -197,7 +206,8 @@ def indexmarkup_role(typ, rawtext, etext, lineno, inliner,
return [prb], [msg]
ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum
sn = nodes.strong('RFC '+text, 'RFC '+text)
- rn = nodes.reference('', '', internal=False, refuri=ref, classes=[typ])
+ rn = nodes.reference('', '', internal=False, refuri=ref+anchor,
+ classes=[typ])
rn += sn
return [indexnode, targetnode, rn], []
diff --git a/sphinx/texinputs/sphinx.sty b/sphinx/texinputs/sphinx.sty
index be8a6c15d..b5381392a 100644
--- a/sphinx/texinputs/sphinx.sty
+++ b/sphinx/texinputs/sphinx.sty
@@ -446,6 +446,7 @@
linkcolor=InnerLinkColor,filecolor=OuterLinkColor,
menucolor=OuterLinkColor,urlcolor=OuterLinkColor,
citecolor=InnerLinkColor]{hyperref}
+\RequirePackage[figure,table]{hypcap}
% From docutils.writers.latex2e
\providecommand{\DUspan}[2]{%
diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py
index 8d1298cd3..ec48009f4 100644
--- a/sphinx/util/__init__.py
+++ b/sphinx/util/__init__.py
@@ -18,6 +18,7 @@ import tempfile
import posixpath
import traceback
from os import path
+from codecs import open
import docutils
from docutils.utils import relative_path
@@ -140,8 +141,8 @@ def copy_static_entry(source, targetdir, builder, context={},
target = path.join(targetdir, path.basename(source))
if source.lower().endswith('_t') and builder.templates:
# templated!
- fsrc = open(source, 'rb')
- fdst = open(target[:-2], 'wb')
+ fsrc = open(source, 'r', encoding='utf-8')
+ fdst = open(target[:-2], 'w', encoding='utf-8')
fdst.write(builder.templates.render_string(fsrc.read(), context))
fsrc.close()
fdst.close()
@@ -162,17 +163,23 @@ def copy_static_entry(source, targetdir, builder, context={},
shutil.copytree(source, target)
+_DEBUG_HEADER = '''\
+# Sphinx version: %s
+# Docutils version: %s %s
+# Jinja2 version: %s
+'''
+
def save_traceback():
"""
Save the current exception's traceback in a temporary file.
"""
exc = traceback.format_exc()
fd, path = tempfile.mkstemp('.log', 'sphinx-err-')
- os.write(fd, '# Sphinx version: %s\n' % sphinx.__version__)
- os.write(fd, '# Docutils version: %s %s\n' % (docutils.__version__,
- docutils.__version_details__))
- os.write(fd, '# Jinja2 version: %s\n' % jinja2.__version__)
- os.write(fd, exc)
+ os.write(fd, (_DEBUG_HEADER %
+ (sphinx.__version__,
+ docutils.__version__, docutils.__version_details__,
+ jinja2.__version__)).encode('utf-8'))
+ os.write(fd, exc.encode('utf-8'))
os.close(fd)
return path
diff --git a/sphinx/util/docfields.py b/sphinx/util/docfields.py
index c8e58f48a..6ce6d82bf 100644
--- a/sphinx/util/docfields.py
+++ b/sphinx/util/docfields.py
@@ -142,9 +142,12 @@ class TypedField(GroupedField):
par += self.make_xref(self.rolename, domain, fieldarg, nodes.strong)
if fieldarg in types:
par += nodes.Text(' (')
- fieldtype = types[fieldarg]
+ # NOTE: using .pop() here to prevent a single type node to be
+ # inserted twice into the doctree, which leads to
+ # inconsistencies later when references are resolved
+ fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
- typename = u''.join(n.astext() for n in types[fieldarg])
+ typename = u''.join(n.astext() for n in fieldtype)
par += self.make_xref(self.typerolename, domain, typename)
else:
par += fieldtype
diff --git a/sphinx/util/jsonimpl.py b/sphinx/util/jsonimpl.py
index 21f285d30..fda85b5e3 100644
--- a/sphinx/util/jsonimpl.py
+++ b/sphinx/util/jsonimpl.py
@@ -13,8 +13,10 @@ import UserString
try:
import json
+ # json-py's json module has not JSONEncoder; this will raise AttributeError
+ # if json-py is imported instead of the built-in json module
JSONEncoder = json.JSONEncoder
-except ImportError:
+except (ImportError, AttributeError):
try:
import simplejson as json
JSONEncoder = json.JSONEncoder
@@ -39,5 +41,8 @@ def dumps(obj, *args, **kwds):
kwds['cls'] = SphinxJSONEncoder
return json.dumps(obj, *args, **kwds)
-load = json.load
-loads = json.loads
+def load(*args, **kwds):
+ return json.load(*args, **kwds)
+
+def loads(*args, **kwds):
+ return json.loads(*args, **kwds)
diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py
index 97b585696..aab8f0142 100644
--- a/sphinx/util/nodes.py
+++ b/sphinx/util/nodes.py
@@ -10,11 +10,11 @@
"""
import re
-import types
from docutils import nodes
from sphinx import addnodes
+from sphinx.util.pycompat import class_types
# \x00 means the "<" was backslash-escaped
@@ -115,7 +115,7 @@ def _new_traverse(self, condition=None,
if include_self and descend and not siblings and not ascend:
if condition is None:
return self._all_traverse([])
- elif isinstance(condition, (types.ClassType, type)):
+ elif isinstance(condition, class_types):
return self._fast_traverse(condition, [])
return self._old_traverse(condition, include_self,
descend, siblings, ascend)
diff --git a/sphinx/util/osutil.py b/sphinx/util/osutil.py
index beab38cbd..9943b207f 100644
--- a/sphinx/util/osutil.py
+++ b/sphinx/util/osutil.py
@@ -11,6 +11,7 @@
import os
import re
+import sys
import time
import errno
import shutil
@@ -124,7 +125,10 @@ no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
def make_filename(string):
return no_fn_re.sub('', string)
-
-def ustrftime(format, *args):
- # strftime for unicode strings
- return time.strftime(unicode(format).encode('utf-8'), *args).decode('utf-8')
+if sys.version_info < (3, 0):
+ def ustrftime(format, *args):
+ # strftime for unicode strings
+ return time.strftime(unicode(format).encode('utf-8'), *args) \
+ .decode('utf-8')
+else:
+ ustrftime = time.strftime
diff --git a/sphinx/util/pycompat.py b/sphinx/util/pycompat.py
index bdd9507df..5f23bbe18 100644
--- a/sphinx/util/pycompat.py
+++ b/sphinx/util/pycompat.py
@@ -12,6 +12,65 @@
import sys
import codecs
import encodings
+import re
+
+try:
+ from types import ClassType
+ class_types = (type, ClassType)
+except ImportError:
+ # Python 3
+ class_types = (type,)
+
+
+# the ubiquitous "bytes" helper function
+if sys.version_info >= (3, 0):
+ def b(s):
+ return s.encode('utf-8')
+else:
+ b = str
+
+
+# Support for running 2to3 over config files
+
+if sys.version_info < (3, 0):
+ # no need to refactor on 2.x versions
+ convert_with_2to3 = None
+else:
+ def convert_with_2to3(filepath):
+ from lib2to3.refactor import RefactoringTool, get_fixers_from_package
+ from lib2to3.pgen2.parse import ParseError
+ fixers = get_fixers_from_package('lib2to3.fixes')
+ refactoring_tool = RefactoringTool(fixers)
+ source = refactoring_tool._read_python_source(filepath)[0]
+ try:
+ tree = refactoring_tool.refactor_string(source, 'conf.py')
+ except ParseError, err:
+ # do not propagate lib2to3 exceptions
+ lineno, offset = err.context[1]
+ # try to match ParseError details with SyntaxError details
+ raise SyntaxError(err.msg, (filepath, lineno, offset, err.value))
+ return unicode(tree)
+
+
+try:
+ base_exception = BaseException
+except NameError:
+ base_exception = Exception
+
+
+try:
+ next = next
+except NameError:
+ # this is on Python 2, where the method is called "next" (it is refactored
+ # to __next__ by 2to3, but in that case never executed)
+ def next(iterator):
+ return iterator.next()
+
+
+try:
+ bytes = bytes
+except NameError:
+ bytes = str
try:
diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py
index 5674b388c..de0bbdf2f 100644
--- a/sphinx/writers/latex.py
+++ b/sphinx/writers/latex.py
@@ -224,6 +224,8 @@ class LaTeXTranslator(nodes.NodeVisitor):
else:
self.top_sectionlevel = 1
self.next_section_ids = set()
+ self.next_figure_ids = set()
+ self.next_table_ids = set()
# flags
self.verbatim = None
self.in_title = 0
@@ -314,7 +316,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
# ... and all others are the appendices
self.body.append(u'\n\\appendix\n')
self.first_document = -1
- if node.has_key('docname'):
+ if 'docname' in node:
self.body.append(self.hypertarget(':doc'))
# "- 1" because the level is increased before the title is visited
self.sectionlevel = self.top_sectionlevel - 1
@@ -633,7 +635,10 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('{|' + ('L|' * self.table.colcount) + '}\n')
if self.table.longtable and self.table.caption is not None:
self.body.append(u'\\caption{%s} \\\\\n' % self.table.caption)
-
+ if self.table.caption is not None:
+ for id in self.next_table_ids:
+ self.body.append(self.hypertarget(id, anchor=False))
+ self.next_table_ids.clear()
if self.table.longtable:
self.body.append('\\hline\n')
self.body.append('\\endfirsthead\n\n')
@@ -694,7 +699,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.table.rowcount += 1
def visit_entry(self, node):
- if node.has_key('morerows') or node.has_key('morecols'):
+ if 'morerows' in node or 'morecols' in node:
raise UnsupportedError('%s:%s: column or row spanning cells are '
'not yet implemented.' %
(self.curfilestack[-1], node.line or ''))
@@ -751,7 +756,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
def visit_term(self, node):
ctx = '}] \\leavevmode'
- if node.has_key('ids') and node['ids']:
+ if node.get('ids'):
ctx += self.hypertarget(node['ids'][0])
self.body.append('\\item[{')
self.context.append(ctx)
@@ -833,20 +838,20 @@ class LaTeXTranslator(nodes.NodeVisitor):
post = []
include_graphics_options = []
is_inline = self.is_inline(node)
- if attrs.has_key('scale'):
+ if 'scale' in attrs:
# Could also be done with ``scale`` option to
# ``\includegraphics``; doing it this way for consistency.
pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,))
post.append('}')
- if attrs.has_key('width'):
+ if 'width' in attrs:
w = self.latex_image_length(attrs['width'])
if w:
include_graphics_options.append('width=%s' % w)
- if attrs.has_key('height'):
+ if 'height' in attrs:
h = self.latex_image_length(attrs['height'])
if h:
include_graphics_options.append('height=%s' % h)
- if attrs.has_key('align'):
+ if 'align' in attrs:
align_prepost = {
# By default latex aligns the top of an image.
(1, 'top'): ('', ''),
@@ -887,13 +892,17 @@ class LaTeXTranslator(nodes.NodeVisitor):
pass
def visit_figure(self, node):
- if node.has_key('width') and node.get('align', '') in ('left', 'right'):
+ ids = ''
+ for id in self.next_figure_ids:
+ ids += self.hypertarget(id, anchor=False)
+ self.next_figure_ids.clear()
+ if 'width' in node and node.get('align', '') in ('left', 'right'):
self.body.append('\\begin{wrapfigure}{%s}{%s}\n\\centering' %
(node['align'] == 'right' and 'r' or 'l',
node['width']))
- self.context.append('\\end{wrapfigure}\n')
+ self.context.append(ids + '\\end{wrapfigure}\n')
else:
- if (not node.attributes.has_key('align') or
+ if (not 'align' in node.attributes or
node.attributes['align'] == 'center'):
# centering does not add vertical space like center.
align = '\n\\centering'
@@ -903,7 +912,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
align = '\\begin{flush%s}' % node.attributes['align']
align_end = '\\end{flush%s}' % node.attributes['align']
self.body.append('\\begin{figure}[htbp]%s\n' % align)
- self.context.append('%s\\end{figure}\n' % align_end)
+ self.context.append(ids + align_end + '\\end{figure}\n')
def depart_figure(self, node):
self.body.append(self.context.pop())
@@ -983,6 +992,20 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.next_section_ids.add(node['refid'])
self.next_section_ids.update(node['ids'])
return
+ elif isinstance(next, nodes.figure):
+ # labels for figures go in the figure body, not before
+ if node.get('refid'):
+ self.next_figure_ids.add(node['refid'])
+ self.next_figure_ids.update(node['ids'])
+ return
+ elif isinstance(next, nodes.table):
+ # same for tables, but only if they have a caption
+ for n in node:
+ if isinstance(n, nodes.title):
+ if node.get('refid'):
+ self.next_table_ids.add(node['refid'])
+ self.next_table_ids.update(node['ids'])
+ return
except IndexError:
pass
if 'refuri' in node:
@@ -1154,7 +1177,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.no_contractions -= 1
if self.in_title:
self.body.append(r'\texttt{%s}' % content)
- elif node.has_key('role') and node['role'] == 'samp':
+ elif node.get('role') == 'samp':
self.body.append(r'\samp{%s}' % content)
else:
self.body.append(r'\code{%s}' % content)
@@ -1183,10 +1206,10 @@ class LaTeXTranslator(nodes.NodeVisitor):
code = self.verbatim.rstrip('\n')
lang = self.hlsettingstack[-1][0]
linenos = code.count('\n') >= self.hlsettingstack[-1][1] - 1
- if node.has_key('language'):
+ if 'language' in node:
# code-block directives
lang = node['language']
- if node.has_key('linenos'):
+ if 'linenos' in node:
linenos = node['linenos']
hlcode = self.highlighter.highlight_block(code, lang, linenos)
# workaround for Unicode issue
diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py
index 98528d5ba..b28b23792 100644
--- a/sphinx/writers/text.py
+++ b/sphinx/writers/text.py
@@ -390,7 +390,7 @@ class TextTranslator(nodes.NodeVisitor):
self.add_text(''.join(out) + '\n')
def writerow(row):
- lines = map(None, *row)
+ lines = zip(*row)
for line in lines:
out = ['|']
for i, cell in enumerate(line):
diff --git a/tests/etree13/ElementTree.py b/tests/etree13/ElementTree.py
index d37325049..f459c7f8f 100644
--- a/tests/etree13/ElementTree.py
+++ b/tests/etree13/ElementTree.py
@@ -1425,12 +1425,16 @@ class XMLParser(object):
err.position = value.lineno, value.offset
raise err
- def _fixtext(self, text):
- # convert text string to ascii, if possible
- try:
- return text.encode("ascii")
- except UnicodeError:
+ if sys.version_info >= (3, 0):
+ def _fixtext(self, text):
return text
+ else:
+ def _fixtext(self, text):
+ # convert text string to ascii, if possible
+ try:
+ return text.encode("ascii")
+ except UnicodeError:
+ return text
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
diff --git a/tests/path.py b/tests/path.py
index ceb895f50..df96bce45 100644
--- a/tests/path.py
+++ b/tests/path.py
@@ -1,953 +1,200 @@
-""" path.py - An object representing a path to a file or directory.
-
-Example:
-
-from path import path
-d = path('/home/guido/bin')
-for f in d.files('*.py'):
- f.chmod(0755)
-
-This module requires Python 2.2 or later.
-
-
-URL: http://www.jorendorff.com/articles/python/path
-Author: Jason Orendorff (and others - see the url!)
-Date: 9 Mar 2007
+#!/usr/bin/env python
+# coding: utf-8
"""
+ path
+ ~~~~
+
+ :copyright: Copyright 2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import os
+import sys
+import shutil
+from codecs import open
-# TODO
-# - Tree-walking functions don't avoid symlink loops. Matt Harrison
-# sent me a patch for this.
-# - Bug in write_text(). It doesn't support Universal newline mode.
-# - Better error message in listdir() when self isn't a
-# directory. (On Windows, the error message really sucks.)
-# - Make sure everything has a good docstring.
-# - Add methods for regex find and replace.
-# - guess_content_type() method?
-# - Perhaps support arguments to touch().
-
-from __future__ import generators
-
-import sys, warnings, os, fnmatch, glob, shutil, codecs
-
-__version__ = '2.2'
-__all__ = ['path']
-
-# Platform-specific support for path.owner
-if os.name == 'nt':
- try:
- import win32security
- except ImportError:
- win32security = None
-else:
- try:
- import pwd
- except ImportError:
- pwd = None
-
-# Pre-2.3 support. Are unicode filenames supported?
-_base = str
-_getcwd = os.getcwd
-try:
- if os.path.supports_unicode_filenames:
- _base = unicode
- _getcwd = os.getcwdu
-except AttributeError:
- pass
-
-# Pre-2.3 workaround for booleans
-try:
- True, False
-except NameError:
- True, False = 1, 0
-
-# Pre-2.3 workaround for basestring.
-try:
- basestring
-except NameError:
- basestring = (str, unicode)
-
-# Universal newline support
-_textmode = 'r'
-if hasattr(file, 'newlines'):
- _textmode = 'U'
+FILESYSTEMENCODING = sys.getfilesystemencoding() or sys.getdefaultencoding()
-class TreeWalkWarning(Warning):
- pass
-
-class path(_base):
- """ Represents a filesystem path.
-
- For documentation on individual methods, consult their
- counterparts in os.path.
+class path(str):
"""
+ Represents a path which behaves like a string.
+ """
+ if sys.version_info < (3, 0):
+ def __new__(cls, s, encoding=FILESYSTEMENCODING, errors='strict'):
+ if isinstance(s, unicode):
+ s = s.encode(encoding, errors=errors)
+ return str.__new__(cls, s)
+ return str.__new__(cls, s)
- # --- Special Python methods.
+ @property
+ def parent(self):
+ """
+ The name of the directory the file or directory is in.
+ """
+ return self.__class__(os.path.dirname(self))
- def __repr__(self):
- return 'path(%s)' % _base.__repr__(self)
+ def abspath(self):
+ """
+ Returns the absolute path.
+ """
+ return self.__class__(os.path.abspath(self))
- # Adding a path and a string yields a path.
- def __add__(self, more):
+ def isabs(self):
+ """
+ Returns ``True`` if the path is absolute.
+ """
+ return os.path.isabs(self)
+
+ def isdir(self):
+ """
+ Returns ``True`` if the path is a directory.
+ """
+ return os.path.isdir(self)
+
+ def isfile(self):
+ """
+ Returns ``True`` if the path is a file.
+ """
+ return os.path.isfile(self)
+
+ def islink(self):
+ """
+ Returns ``True`` if the path is a symbolic link.
+ """
+ return os.path.islink(self)
+
+ def ismount(self):
+ """
+ Returns ``True`` if the path is a mount point.
+ """
+ return os.path.ismount(self)
+
+ def rmtree(self, ignore_errors=False, onerror=None):
+ """
+ Removes the file or directory and any files or directories it may
+ contain.
+
+ :param ignore_errors:
+ If ``True`` errors are silently ignored, otherwise an exception
+ is raised in case an error occurs.
+
+ :param onerror:
+ A callback which gets called with the arguments `func`, `path` and
+ `exc_info`. `func` is one of :func:`os.listdir`, :func:`os.remove`
+ or :func:`os.rmdir`. `path` is the argument to the function which
+ caused it to fail and `exc_info` is a tuple as returned by
+ :func:`sys.exc_info`.
+ """
+ shutil.rmtree(self, ignore_errors=ignore_errors, onerror=onerror)
+
+ def copytree(self, destination, symlinks=False, ignore=None):
+ """
+ Recursively copy a directory to the given `destination`. If the given
+ `destination` does not exist it will be created.
+
+ :param symlinks:
+ If ``True`` symbolic links in the source tree result in symbolic
+ links in the destination tree otherwise the contents of the files
+ pointed to by the symbolic links are copied.
+
+ :param ignore:
+ A callback which gets called with the path of the directory being
+ copied and a list of paths as returned by :func:`os.listdir`.
+ """
+ shutil.copytree(self, destination, symlinks=symlinks, ignore=ignore)
+
+ def movetree(self, destination):
+ """
+ Recursively move the file or directory to the given `destination`
+ similar to the Unix "mv" command.
+
+ If the `destination` is a file it may be overwritten depending on the
+ :func:`os.rename` semantics.
+ """
+ shutil.move(self, destination)
+
+ move = movetree
+
+ def unlink(self):
+ """
+ Removes a file.
+ """
+ os.unlink(self)
+
+ def write_text(self, text, **kwargs):
+ """
+ Writes the given `text` to the file.
+ """
+ f = open(self, 'w', **kwargs)
try:
- resultStr = _base.__add__(self, more)
- except TypeError: #Python bug
- resultStr = NotImplemented
- if resultStr is NotImplemented:
- return resultStr
- return self.__class__(resultStr)
+ f.write(text)
+ finally:
+ f.close()
- def __radd__(self, other):
- if isinstance(other, basestring):
- return self.__class__(other.__add__(self))
- else:
- return NotImplemented
-
- # The / operator joins paths.
- def __div__(self, rel):
- """ fp.__div__(rel) == fp / rel == fp.joinpath(rel)
-
- Join two path components, adding a separator character if
- needed.
+ def text(self, **kwargs):
"""
- return self.__class__(os.path.join(self, rel))
-
- # Make the / operator work even when true division is enabled.
- __truediv__ = __div__
-
- def getcwd(cls):
- """ Return the current working directory as a path object. """
- return cls(_getcwd())
- getcwd = classmethod(getcwd)
-
-
- # --- Operations on path strings.
-
- isabs = os.path.isabs
- def abspath(self): return self.__class__(os.path.abspath(self))
- def normcase(self): return self.__class__(os.path.normcase(self))
- def normpath(self): return self.__class__(os.path.normpath(self))
- def realpath(self): return self.__class__(os.path.realpath(self))
- def expanduser(self): return self.__class__(os.path.expanduser(self))
- def expandvars(self): return self.__class__(os.path.expandvars(self))
- def dirname(self): return self.__class__(os.path.dirname(self))
- basename = os.path.basename
-
- def expand(self):
- """ Clean up a filename by calling expandvars(),
- expanduser(), and normpath() on it.
-
- This is commonly everything needed to clean up a filename
- read from a configuration file, for example.
+ Returns the text in the file.
"""
- return self.expandvars().expanduser().normpath()
-
- def _get_namebase(self):
- base, ext = os.path.splitext(self.name)
- return base
-
- def _get_ext(self):
- f, ext = os.path.splitext(_base(self))
- return ext
-
- def _get_drive(self):
- drive, r = os.path.splitdrive(self)
- return self.__class__(drive)
-
- parent = property(
- dirname, None, None,
- """ This path's parent directory, as a new path object.
-
- For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
- """)
-
- name = property(
- basename, None, None,
- """ The name of this file or directory without the full path.
-
- For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
- """)
-
- namebase = property(
- _get_namebase, None, None,
- """ The same as path.name, but with one file extension stripped off.
-
- For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
- but path('/home/guido/python.tar.gz').namebase == 'python.tar'
- """)
-
- ext = property(
- _get_ext, None, None,
- """ The file extension, for example '.py'. """)
-
- drive = property(
- _get_drive, None, None,
- """ The drive specifier, for example 'C:'.
- This is always empty on systems that don't use drive specifiers.
- """)
-
- def splitpath(self):
- """ p.splitpath() -> Return (p.parent, p.name). """
- parent, child = os.path.split(self)
- return self.__class__(parent), child
-
- def splitdrive(self):
- """ p.splitdrive() -> Return (p.drive, ).
-
- Split the drive specifier from this path. If there is
- no drive specifier, p.drive is empty, so the return value
- is simply (path(''), p). This is always the case on Unix.
- """
- drive, rel = os.path.splitdrive(self)
- return self.__class__(drive), rel
-
- def splitext(self):
- """ p.splitext() -> Return (p.stripext(), p.ext).
-
- Split the filename extension from this path and return
- the two parts. Either part may be empty.
-
- The extension is everything from '.' to the end of the
- last path segment. This has the property that if
- (a, b) == p.splitext(), then a + b == p.
- """
- filename, ext = os.path.splitext(self)
- return self.__class__(filename), ext
-
- def stripext(self):
- """ p.stripext() -> Remove one file extension from the path.
-
- For example, path('/home/guido/python.tar.gz').stripext()
- returns path('/home/guido/python.tar').
- """
- return self.splitext()[0]
-
- if hasattr(os.path, 'splitunc'):
- def splitunc(self):
- unc, rest = os.path.splitunc(self)
- return self.__class__(unc), rest
-
- def _get_uncshare(self):
- unc, r = os.path.splitunc(self)
- return self.__class__(unc)
-
- uncshare = property(
- _get_uncshare, None, None,
- """ The UNC mount point for this path.
- This is empty for paths on local drives. """)
-
- def joinpath(self, *args):
- """ Join two or more path components, adding a separator
- character (os.sep) if needed. Returns a new path
- object.
- """
- return self.__class__(os.path.join(self, *args))
-
- def splitall(self):
- r""" Return a list of the path components in this path.
-
- The first item in the list will be a path. Its value will be
- either os.curdir, os.pardir, empty, or the root directory of
- this path (for example, '/' or 'C:\\'). The other items in
- the list will be strings.
-
- path.path.joinpath(*result) will yield the original path.
- """
- parts = []
- loc = self
- while loc != os.curdir and loc != os.pardir:
- prev = loc
- loc, child = prev.splitpath()
- if loc == prev:
- break
- parts.append(child)
- parts.append(loc)
- parts.reverse()
- return parts
-
- def relpath(self):
- """ Return this path as a relative path,
- based from the current working directory.
- """
- cwd = self.__class__(os.getcwd())
- return cwd.relpathto(self)
-
- def relpathto(self, dest):
- """ Return a relative path from self to dest.
-
- If there is no relative path from self to dest, for example if
- they reside on different drives in Windows, then this returns
- dest.abspath().
- """
- origin = self.abspath()
- dest = self.__class__(dest).abspath()
-
- orig_list = origin.normcase().splitall()
- # Don't normcase dest! We want to preserve the case.
- dest_list = dest.splitall()
-
- if orig_list[0] != os.path.normcase(dest_list[0]):
- # Can't get here from there.
- return dest
-
- # Find the location where the two paths start to differ.
- i = 0
- for start_seg, dest_seg in zip(orig_list, dest_list):
- if start_seg != os.path.normcase(dest_seg):
- break
- i += 1
-
- # Now i is the point where the two paths diverge.
- # Need a certain number of "os.pardir"s to work up
- # from the origin to the point of divergence.
- segments = [os.pardir] * (len(orig_list) - i)
- # Need to add the diverging part of dest_list.
- segments += dest_list[i:]
- if len(segments) == 0:
- # If they happen to be identical, use os.curdir.
- relpath = os.curdir
- else:
- relpath = os.path.join(*segments)
- return self.__class__(relpath)
-
- # --- Listing, searching, walking, and matching
-
- def listdir(self, pattern=None):
- """ D.listdir() -> List of items in this directory.
-
- Use D.files() or D.dirs() instead if you want a listing
- of just files or just subdirectories.
-
- The elements of the list are path objects.
-
- With the optional 'pattern' argument, this only lists
- items whose names match the given pattern.
- """
- names = os.listdir(self)
- if pattern is not None:
- names = fnmatch.filter(names, pattern)
- return [self / child for child in names]
-
- def dirs(self, pattern=None):
- """ D.dirs() -> List of this directory's subdirectories.
-
- The elements of the list are path objects.
- This does not walk recursively into subdirectories
- (but see path.walkdirs).
-
- With the optional 'pattern' argument, this only lists
- directories whose names match the given pattern. For
- example, d.dirs('build-*').
- """
- return [p for p in self.listdir(pattern) if p.isdir()]
-
- def files(self, pattern=None):
- """ D.files() -> List of the files in this directory.
-
- The elements of the list are path objects.
- This does not walk into subdirectories (see path.walkfiles).
-
- With the optional 'pattern' argument, this only lists files
- whose names match the given pattern. For example,
- d.files('*.pyc').
- """
-
- return [p for p in self.listdir(pattern) if p.isfile()]
-
- def walk(self, pattern=None, errors='strict'):
- """ D.walk() -> iterator over files and subdirs, recursively.
-
- The iterator yields path objects naming each child item of
- this directory and its descendants. This requires that
- D.isdir().
-
- This performs a depth-first traversal of the directory tree.
- Each directory is returned just before all its children.
-
- The errors= keyword argument controls behavior when an
- error occurs. The default is 'strict', which causes an
- exception. The other allowed values are 'warn', which
- reports the error via warnings.warn(), and 'ignore'.
- """
- if errors not in ('strict', 'warn', 'ignore'):
- raise ValueError("invalid errors parameter")
-
+ f = open(self, mode='U', **kwargs)
try:
- childList = self.listdir()
- except Exception:
- if errors == 'ignore':
- return
- elif errors == 'warn':
- warnings.warn(
- "Unable to list directory '%s': %s"
- % (self, sys.exc_info()[1]),
- TreeWalkWarning)
- return
- else:
- raise
-
- for child in childList:
- if pattern is None or child.fnmatch(pattern):
- yield child
- try:
- isdir = child.isdir()
- except Exception:
- if errors == 'ignore':
- isdir = False
- elif errors == 'warn':
- warnings.warn(
- "Unable to access '%s': %s"
- % (child, sys.exc_info()[1]),
- TreeWalkWarning)
- isdir = False
- else:
- raise
-
- if isdir:
- for item in child.walk(pattern, errors):
- yield item
-
- def walkdirs(self, pattern=None, errors='strict'):
- """ D.walkdirs() -> iterator over subdirs, recursively.
-
- With the optional 'pattern' argument, this yields only
- directories whose names match the given pattern. For
- example, mydir.walkdirs('*test') yields only directories
- with names ending in 'test'.
-
- The errors= keyword argument controls behavior when an
- error occurs. The default is 'strict', which causes an
- exception. The other allowed values are 'warn', which
- reports the error via warnings.warn(), and 'ignore'.
- """
- if errors not in ('strict', 'warn', 'ignore'):
- raise ValueError("invalid errors parameter")
-
- try:
- dirs = self.dirs()
- except Exception:
- if errors == 'ignore':
- return
- elif errors == 'warn':
- warnings.warn(
- "Unable to list directory '%s': %s"
- % (self, sys.exc_info()[1]),
- TreeWalkWarning)
- return
- else:
- raise
-
- for child in dirs:
- if pattern is None or child.fnmatch(pattern):
- yield child
- for subsubdir in child.walkdirs(pattern, errors):
- yield subsubdir
-
- def walkfiles(self, pattern=None, errors='strict'):
- """ D.walkfiles() -> iterator over files in D, recursively.
-
- The optional argument, pattern, limits the results to files
- with names that match the pattern. For example,
- mydir.walkfiles('*.tmp') yields only files with the .tmp
- extension.
- """
- if errors not in ('strict', 'warn', 'ignore'):
- raise ValueError("invalid errors parameter")
-
- try:
- childList = self.listdir()
- except Exception:
- if errors == 'ignore':
- return
- elif errors == 'warn':
- warnings.warn(
- "Unable to list directory '%s': %s"
- % (self, sys.exc_info()[1]),
- TreeWalkWarning)
- return
- else:
- raise
-
- for child in childList:
- try:
- isfile = child.isfile()
- isdir = not isfile and child.isdir()
- except:
- if errors == 'ignore':
- continue
- elif errors == 'warn':
- warnings.warn(
- "Unable to access '%s': %s"
- % (self, sys.exc_info()[1]),
- TreeWalkWarning)
- continue
- else:
- raise
-
- if isfile:
- if pattern is None or child.fnmatch(pattern):
- yield child
- elif isdir:
- for f in child.walkfiles(pattern, errors):
- yield f
-
- def fnmatch(self, pattern):
- """ Return True if self.name matches the given pattern.
-
- pattern - A filename pattern with wildcards,
- for example '*.py'.
- """
- return fnmatch.fnmatch(self.name, pattern)
-
- def glob(self, pattern):
- """ Return a list of path objects that match the pattern.
-
- pattern - a path relative to this directory, with wildcards.
-
- For example, path('/users').glob('*/bin/*') returns a list
- of all the files users have in their bin directories.
- """
- cls = self.__class__
- return [cls(s) for s in glob.glob(_base(self / pattern))]
-
-
- # --- Reading or writing an entire file at once.
-
- def open(self, mode='r'):
- """ Open this file. Return a file object. """
- return file(self, mode)
+ return f.read()
+ finally:
+ f.close()
def bytes(self):
- """ Open this file, read all bytes, return them as a string. """
- f = self.open('rb')
+ """
+ Returns the bytes in the file.
+ """
+ f = open(self, mode='rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
- """ Open this file and write the given bytes to it.
+ """
+ Writes the given `bytes` to the file.
- Default behavior is to overwrite any existing file.
- Call p.write_bytes(bytes, append=True) to append instead.
+ :param append:
+ If ``True`` given `bytes` are added at the end of the file.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
- f = self.open(mode)
+ f = open(self, mode=mode)
try:
f.write(bytes)
finally:
f.close()
- def text(self, encoding=None, errors='strict'):
- r""" Open this file, read it in, return the content as a string.
-
- This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
- are automatically translated to '\n'.
-
- Optional arguments:
-
- encoding - The Unicode encoding (or character set) of
- the file. If present, the content of the file is
- decoded and returned as a unicode object; otherwise
- it is returned as an 8-bit str.
- errors - How to handle Unicode errors; see help(str.decode)
- for the options. Default is 'strict'.
+ def exists(self):
"""
- if encoding is None:
- # 8-bit
- f = self.open(_textmode)
- try:
- return f.read()
- finally:
- f.close()
- else:
- # Unicode
- f = codecs.open(self, 'r', encoding, errors)
- # (Note - Can't use 'U' mode here, since codecs.open
- # doesn't support 'U' mode, even in Python 2.3.)
- try:
- t = f.read()
- finally:
- f.close()
- return (t.replace(u'\r\n', u'\n')
- .replace(u'\r\x85', u'\n')
- .replace(u'\r', u'\n')
- .replace(u'\x85', u'\n')
- .replace(u'\u2028', u'\n'))
-
- def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
- r""" Write the given text to this file.
-
- The default behavior is to overwrite any existing file;
- to append instead, use the 'append=True' keyword argument.
-
- There are two differences between path.write_text() and
- path.write_bytes(): newline handling and Unicode handling.
- See below.
-
- Parameters:
-
- - text - str/unicode - The text to be written.
-
- - encoding - str - The Unicode encoding that will be used.
- This is ignored if 'text' isn't a Unicode string.
-
- - errors - str - How to handle Unicode encoding errors.
- Default is 'strict'. See help(unicode.encode) for the
- options. This is ignored if 'text' isn't a Unicode
- string.
-
- - linesep - keyword argument - str/unicode - The sequence of
- characters to be used to mark end-of-line. The default is
- os.linesep. You can also specify None; this means to
- leave all newlines as they are in 'text'.
-
- - append - keyword argument - bool - Specifies what to do if
- the file already exists (True: append to the end of it;
- False: overwrite it.) The default is False.
-
-
- --- Newline handling.
-
- write_text() converts all standard end-of-line sequences
- ('\n', '\r', and '\r\n') to your platform's default end-of-line
- sequence (see os.linesep; on Windows, for example, the
- end-of-line marker is '\r\n').
-
- If you don't like your platform's default, you can override it
- using the 'linesep=' keyword argument. If you specifically want
- write_text() to preserve the newlines as-is, use 'linesep=None'.
-
- This applies to Unicode text the same as to 8-bit text, except
- there are three additional standard Unicode end-of-line sequences:
- u'\x85', u'\r\x85', and u'\u2028'.
-
- (This is slightly different from when you open a file for
- writing with fopen(filename, "w") in C or file(filename, 'w')
- in Python.)
-
-
- --- Unicode
-
- If 'text' isn't Unicode, then apart from newline handling, the
- bytes are written verbatim to the file. The 'encoding' and
- 'errors' arguments are not used and must be omitted.
-
- If 'text' is Unicode, it is first converted to bytes using the
- specified 'encoding' (or the default encoding if 'encoding'
- isn't specified). The 'errors' argument applies only to this
- conversion.
-
+ Returns ``True`` if the path exist.
"""
- if isinstance(text, unicode):
- if linesep is not None:
- # Convert all standard end-of-line sequences to
- # ordinary newline characters.
- text = (text.replace(u'\r\n', u'\n')
- .replace(u'\r\x85', u'\n')
- .replace(u'\r', u'\n')
- .replace(u'\x85', u'\n')
- .replace(u'\u2028', u'\n'))
- text = text.replace(u'\n', linesep)
- if encoding is None:
- encoding = sys.getdefaultencoding()
- bytes = text.encode(encoding, errors)
- else:
- # It is an error to specify an encoding if 'text' is
- # an 8-bit string.
- assert encoding is None
+ return os.path.exists(self)
- if linesep is not None:
- text = (text.replace('\r\n', '\n')
- .replace('\r', '\n'))
- bytes = text.replace('\n', linesep)
-
- self.write_bytes(bytes, append)
-
- def lines(self, encoding=None, errors='strict', retain=True):
- r""" Open this file, read all lines, return them in a list.
-
- Optional arguments:
- encoding - The Unicode encoding (or character set) of
- the file. The default is None, meaning the content
- of the file is read as 8-bit characters and returned
- as a list of (non-Unicode) str objects.
- errors - How to handle Unicode errors; see help(str.decode)
- for the options. Default is 'strict'
- retain - If true, retain newline characters; but all newline
- character combinations ('\r', '\n', '\r\n') are
- translated to '\n'. If false, newline characters are
- stripped off. Default is True.
-
- This uses 'U' mode in Python 2.3 and later.
+ def lexists(self):
"""
- if encoding is None and retain:
- f = self.open(_textmode)
- try:
- return f.readlines()
- finally:
- f.close()
- else:
- return self.text(encoding, errors).splitlines(retain)
-
- def write_lines(self, lines, encoding=None, errors='strict',
- linesep=os.linesep, append=False):
- r""" Write the given lines of text to this file.
-
- By default this overwrites any existing file at this path.
-
- This puts a platform-specific newline sequence on every line.
- See 'linesep' below.
-
- lines - A list of strings.
-
- encoding - A Unicode encoding to use. This applies only if
- 'lines' contains any Unicode strings.
-
- errors - How to handle errors in Unicode encoding. This
- also applies only to Unicode strings.
-
- linesep - The desired line-ending. This line-ending is
- applied to every line. If a line already has any
- standard line ending ('\r', '\n', '\r\n', u'\x85',
- u'\r\x85', u'\u2028'), that will be stripped off and
- this will be used instead. The default is os.linesep,
- which is platform-dependent ('\r\n' on Windows, '\n' on
- Unix, etc.) Specify None to write the lines as-is,
- like file.writelines().
-
- Use the keyword argument append=True to append lines to the
- file. The default is to overwrite the file. Warning:
- When you use this with Unicode data, if the encoding of the
- existing data in the file is different from the encoding
- you specify with the encoding= parameter, the result is
- mixed-encoding data, which can really confuse someone trying
- to read the file later.
+ Returns ``True`` if the path exists unless it is a broken symbolic
+ link.
"""
- if append:
- mode = 'ab'
- else:
- mode = 'wb'
- f = self.open(mode)
- try:
- for line in lines:
- isUnicode = isinstance(line, unicode)
- if linesep is not None:
- # Strip off any existing line-end and add the
- # specified linesep string.
- if isUnicode:
- if line[-2:] in (u'\r\n', u'\x0d\x85'):
- line = line[:-2]
- elif line[-1:] in (u'\r', u'\n',
- u'\x85', u'\u2028'):
- line = line[:-1]
- else:
- if line[-2:] == '\r\n':
- line = line[:-2]
- elif line[-1:] in ('\r', '\n'):
- line = line[:-1]
- line += linesep
- if isUnicode:
- if encoding is None:
- encoding = sys.getdefaultencoding()
- line = line.encode(encoding, errors)
- f.write(line)
- finally:
- f.close()
-
- # --- Methods for querying the filesystem.
-
- exists = os.path.exists
- isdir = os.path.isdir
- isfile = os.path.isfile
- islink = os.path.islink
- ismount = os.path.ismount
-
- if hasattr(os.path, 'samefile'):
- samefile = os.path.samefile
-
- getatime = os.path.getatime
- atime = property(
- getatime, None, None,
- """ Last access time of the file. """)
-
- getmtime = os.path.getmtime
- mtime = property(
- getmtime, None, None,
- """ Last-modified time of the file. """)
-
- if hasattr(os.path, 'getctime'):
- getctime = os.path.getctime
- ctime = property(
- getctime, None, None,
- """ Creation time of the file. """)
-
- getsize = os.path.getsize
- size = property(
- getsize, None, None,
- """ Size of the file, in bytes. """)
-
- if hasattr(os, 'access'):
- def access(self, mode):
- """ Return true if current user has access to this path.
-
- mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
- """
- return os.access(self, mode)
-
- def stat(self):
- """ Perform a stat() system call on this path. """
- return os.stat(self)
-
- def lstat(self):
- """ Like path.stat(), but do not follow symbolic links. """
- return os.lstat(self)
-
- def get_owner(self):
- r""" Return the name of the owner of this file or directory.
-
- This follows symbolic links.
-
- On Windows, this returns a name of the form ur'DOMAIN\User Name'.
- On Windows, a group can own a file or directory.
- """
- if os.name == 'nt':
- if win32security is None:
- raise Exception("path.owner requires win32all to be installed")
- desc = win32security.GetFileSecurity(
- self, win32security.OWNER_SECURITY_INFORMATION)
- sid = desc.GetSecurityDescriptorOwner()
- account, domain, typecode = win32security.LookupAccountSid(None, sid)
- return domain + u'\\' + account
- else:
- if pwd is None:
- raise NotImplementedError("path.owner is not implemented on this platform.")
- st = self.stat()
- return pwd.getpwuid(st.st_uid).pw_name
-
- owner = property(
- get_owner, None, None,
- """ Name of the owner of this file or directory. """)
-
- if hasattr(os, 'statvfs'):
- def statvfs(self):
- """ Perform a statvfs() system call on this path. """
- return os.statvfs(self)
-
- if hasattr(os, 'pathconf'):
- def pathconf(self, name):
- return os.pathconf(self, name)
-
-
- # --- Modifying operations on files and directories
-
- def utime(self, times):
- """ Set the access and modified times of this file. """
- os.utime(self, times)
-
- def chmod(self, mode):
- os.chmod(self, mode)
-
- if hasattr(os, 'chown'):
- def chown(self, uid, gid):
- os.chown(self, uid, gid)
-
- def rename(self, new):
- os.rename(self, new)
-
- def renames(self, new):
- os.renames(self, new)
-
-
- # --- Create/delete operations on directories
-
- def mkdir(self, mode=0777):
- os.mkdir(self, mode)
+ return os.path.lexists(self)
def makedirs(self, mode=0777):
+ """
+ Recursively create directories.
+ """
os.makedirs(self, mode)
- def rmdir(self):
- os.rmdir(self)
-
- def removedirs(self):
- os.removedirs(self)
-
-
- # --- Modifying operations on files
-
- def touch(self):
- """ Set the access/modified times of this file to the current time.
- Create the file if it does not exist.
+ def joinpath(self, *args):
"""
- fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
- os.close(fd)
- os.utime(self, None)
+ Joins the path with the argument given and returns the result.
+ """
+ return self.__class__(os.path.join(self, *map(self.__class__, args)))
- def remove(self):
- os.remove(self)
-
- def unlink(self):
- os.unlink(self)
-
-
- # --- Links
-
- if hasattr(os, 'link'):
- def link(self, newpath):
- """ Create a hard link at 'newpath', pointing to this file. """
- os.link(self, newpath)
-
- if hasattr(os, 'symlink'):
- def symlink(self, newlink):
- """ Create a symbolic link at 'newlink', pointing here. """
- os.symlink(self, newlink)
-
- if hasattr(os, 'readlink'):
- def readlink(self):
- """ Return the path to which this symbolic link points.
-
- The result may be an absolute or a relative path.
- """
- return self.__class__(os.readlink(self))
-
- def readlinkabs(self):
- """ Return the path to which this symbolic link points.
-
- The result is always an absolute path.
- """
- p = self.readlink()
- if p.isabs():
- return p
- else:
- return (self.parent / p).abspath()
-
-
- # --- High-level functions from shutil
-
- copyfile = shutil.copyfile
- copymode = shutil.copymode
- copystat = shutil.copystat
- copy = shutil.copy
- copy2 = shutil.copy2
- copytree = shutil.copytree
- if hasattr(shutil, 'move'):
- move = shutil.move
- rmtree = shutil.rmtree
-
-
- # --- Special stuff from os
-
- if hasattr(os, 'chroot'):
- def chroot(self):
- os.chroot(self)
-
- if hasattr(os, 'startfile'):
- def startfile(self):
- os.startfile(self)
+ __div__ = __truediv__ = joinpath
+ def __repr__(self):
+ return '%s(%s)' % (self.__class__.__name__, str.__repr__(self))
diff --git a/tests/root/conf.py b/tests/root/conf.py
index 2b6d6a9af..b47341899 100644
--- a/tests/root/conf.py
+++ b/tests/root/conf.py
@@ -22,7 +22,7 @@ copyright = '2010, Georg Brandl & Team'
version = '0.6'
release = '0.6alpha1'
today_fmt = '%B %d, %Y'
-#unused_docs = []
+# unused_docs = []
exclude_patterns = ['_build', '**/excluded.*']
keep_warnings = True
pygments_style = 'sphinx'
diff --git a/tests/root/doctest.txt b/tests/root/doctest.txt
index 35cdd589c..6ac0b2863 100644
--- a/tests/root/doctest.txt
+++ b/tests/root/doctest.txt
@@ -30,7 +30,7 @@ Special directives
.. testcode::
- print 1+1
+ print(1+1)
.. testoutput::
@@ -50,30 +50,30 @@ Special directives
.. testsetup:: *
- from math import floor
+ from math import factorial
.. doctest::
- >>> floor(1.2)
- 1.0
+ >>> factorial(1)
+ 1
.. testcode::
- print floor(1.2)
+ print(factorial(1))
.. testoutput::
- 1.0
+ 1
- >>> floor(1.2)
- 1.0
+ >>> factorial(1)
+ 1
* options for testcode/testoutput blocks
.. testcode::
:hide:
- print 'Output text.'
+ print('Output text.')
.. testoutput::
:hide:
@@ -85,36 +85,36 @@ Special directives
.. testsetup:: group1
- from math import ceil
+ from math import trunc
- ``ceil`` is now known in "group1", but not in others.
+ ``trunc`` is now known in "group1", but not in others.
.. doctest:: group1
- >>> ceil(0.8)
- 1.0
+ >>> trunc(1.1)
+ 1
.. doctest:: group2
- >>> ceil(0.8)
+ >>> trunc(1.1)
Traceback (most recent call last):
...
- NameError: name 'ceil' is not defined
+ NameError: name 'trunc' is not defined
Interleaving testcode/testoutput:
.. testcode:: group1
- print ceil(0.8)
+ print(factorial(3))
.. testcode:: group2
- print floor(0.8)
+ print(factorial(4))
.. testoutput:: group1
- 1.0
+ 6
.. testoutput:: group2
- 0.0
+ 24
diff --git a/tests/root/literal.inc b/tests/root/literal.inc
index d5b9890c9..694f15ed9 100644
--- a/tests/root/literal.inc
+++ b/tests/root/literal.inc
@@ -1,7 +1,7 @@
# Literally included file using Python highlighting
# -*- coding: utf-8 -*-
-foo = u"Including Unicode characters: üöä"
+foo = "Including Unicode characters: üöä"
class Foo:
pass
diff --git a/tests/root/objects.txt b/tests/root/objects.txt
index 334827de2..e62f6d962 100644
--- a/tests/root/objects.txt
+++ b/tests/root/objects.txt
@@ -41,6 +41,10 @@ Testing object descriptions
.. function:: func_without_module2() -> annotation
+.. object:: long(parameter, \
+ list)
+ another one
+
.. class:: TimeInt
:param moo: |test|
@@ -62,6 +66,8 @@ Testing object descriptions
:ivar int hour: like *hour*
:ivar minute: like *minute*
:vartype minute: int
+ :param hour: Duplicate param. Should not lead to crashes.
+ :type hour: Duplicate type.
C items
diff --git a/tests/run.py b/tests/run.py
index 0cb41442c..50567fbc5 100755
--- a/tests/run.py
+++ b/tests/run.py
@@ -11,7 +11,17 @@
"""
import sys
-from os import path
+from os import path, chdir, listdir
+
+if sys.version_info >= (3, 0):
+ print('Copying and converting sources to build/lib/tests...')
+ from distutils.util import copydir_run_2to3
+ testroot = path.dirname(__file__) or '.'
+ newroot = path.join(testroot, path.pardir, 'build')
+ newroot = path.join(newroot, listdir(newroot)[0], 'tests')
+ copydir_run_2to3(testroot, newroot)
+ # switch to the converted dir so nose tests the right tests
+ chdir(newroot)
# always test the sphinx package from this directory
sys.path.insert(0, path.join(path.dirname(__file__), path.pardir))
@@ -19,8 +29,8 @@ sys.path.insert(0, path.join(path.dirname(__file__), path.pardir))
try:
import nose
except ImportError:
- print "The nose package is needed to run the Sphinx test suite."
+ print("The nose package is needed to run the Sphinx test suite.")
sys.exit(1)
-print "Running Sphinx test suite..."
+print("Running Sphinx test suite...")
nose.main()
diff --git a/tests/test_application.py b/tests/test_application.py
index 3d287a57c..d1154863c 100644
--- a/tests/test_application.py
+++ b/tests/test_application.py
@@ -45,9 +45,11 @@ def test_output():
app = TestApp(status=status, warning=warnings)
try:
status.truncate(0) # __init__ writes to status
+ status.seek(0)
app.info("Nothing here...")
assert status.getvalue() == "Nothing here...\n"
status.truncate(0)
+ status.seek(0)
app.info("Nothing here...", True)
assert status.getvalue() == "Nothing here..."
diff --git a/tests/test_autosummary.py b/tests/test_autosummary.py
index 7e3093676..20fb06e0e 100644
--- a/tests/test_autosummary.py
+++ b/tests/test_autosummary.py
@@ -9,8 +9,6 @@
:license: BSD, see LICENSE for details.
"""
-import string
-
from util import *
from sphinx.ext.autosummary import mangle_signature
@@ -27,7 +25,7 @@ def test_mangle_signature():
(a, b, c='foobar()', d=123) :: (a, b[, c, d])
"""
- TEST = [map(string.strip, x.split("::")) for x in TEST.split("\n")
+ TEST = [map(lambda x: x.strip(), x.split("::")) for x in TEST.split("\n")
if '::' in x]
for inp, outp in TEST:
res = mangle_signature(inp).strip().replace(u"\u00a0", " ")
diff --git a/tests/test_build_html.py b/tests/test_build_html.py
index 4dee513ae..a685fcb61 100644
--- a/tests/test_build_html.py
+++ b/tests/test_build_html.py
@@ -12,6 +12,7 @@
import os
import re
import htmlentitydefs
+import sys
from StringIO import StringIO
try:
@@ -38,7 +39,7 @@ http://www.python.org/logo.png
reading included file u'wrongenc.inc' seems to be wrong, try giving an \
:encoding: option\\n?
%(root)s/includes.txt:4: WARNING: download file not readable: nonexisting.png
-%(root)s/objects.txt:84: WARNING: using old C markup; please migrate to \
+%(root)s/objects.txt:\\d*: WARNING: using old C markup; please migrate to \
new-style markup \(e.g. c:function instead of cfunction\), see \
http://sphinx.pocoo.org/domains.html
"""
@@ -50,6 +51,11 @@ HTML_WARNINGS = ENV_WARNINGS + """\
%(root)s/markup.txt:: WARNING: invalid pair index entry u'keyword; '
"""
+if sys.version_info >= (3, 0):
+ ENV_WARNINGS = remove_unicode_literals(ENV_WARNINGS)
+ HTML_WARNINGS = remove_unicode_literals(HTML_WARNINGS)
+
+
def tail_check(check):
rex = re.compile(check)
def checker(nodes):
@@ -161,6 +167,8 @@ HTML_XPATH = {
'objects.html': [
(".//dt[@id='mod.Cls.meth1']", ''),
(".//dt[@id='errmod.Error']", ''),
+ (".//dt/tt", r'long\(parameter,\s* list\)'),
+ (".//dt/tt", 'another one'),
(".//a[@href='#mod.Cls'][@class='reference internal']", ''),
(".//dl[@class='userdesc']", ''),
(".//dt[@id='userdesc-myobj']", ''),
@@ -227,7 +235,7 @@ if pygments:
(".//div[@class='inc-lines highlight-text']//pre",
r'^class Foo:\n pass\nclass Bar:\n$'),
(".//div[@class='inc-startend highlight-text']//pre",
- ur'^foo = u"Including Unicode characters: üöä"\n$'),
+ ur'^foo = "Including Unicode characters: üöä"\n$'),
(".//div[@class='inc-preappend highlight-text']//pre",
r'(?m)^START CODE$'),
(".//div[@class='inc-pyobj-dedent highlight-python']//span",
diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py
index 4405395a0..6c1ccad9b 100644
--- a/tests/test_build_latex.py
+++ b/tests/test_build_latex.py
@@ -32,6 +32,9 @@ None:None: WARNING: no matching candidate for image URI u'foo.\\*'
WARNING: invalid pair index entry u''
"""
+if sys.version_info >= (3, 0):
+ LATEX_WARNINGS = remove_unicode_literals(LATEX_WARNINGS)
+
@with_app(buildername='latex', warning=latex_warnfile, cleanenv=True)
def test_latex(app):
diff --git a/tests/test_config.py b/tests/test_config.py
index cb4e11056..7fce4495b 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -9,6 +9,7 @@
:copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+import sys
from util import *
@@ -84,11 +85,22 @@ def test_extension_values(app):
@with_tempdir
def test_errors_warnings(dir):
# test the error for syntax errors in the config file
- write_file(dir / 'conf.py', 'project = \n')
+ write_file(dir / 'conf.py', u'project = \n', 'ascii')
raises_msg(ConfigError, 'conf.py', Config, dir, 'conf.py', {}, None)
+ # test the automatic conversion of 2.x only code in configs
+ write_file(dir / 'conf.py', u'\n\nproject = u"Jägermeister"\n', 'utf-8')
+ cfg = Config(dir, 'conf.py', {}, None)
+ cfg.init_values()
+ assert cfg.project == u'Jägermeister'
+
# test the warning for bytestrings with non-ascii content
- write_file(dir / 'conf.py', '# -*- coding: latin-1\nproject = "foo\xe4"\n')
+ # bytestrings with non-ascii content are a syntax error in python3 so we
+ # skip the test there
+ if sys.version_info >= (3, 0):
+ return
+ write_file(dir / 'conf.py', u'# -*- coding: latin-1\nproject = "fooä"\n',
+ 'latin-1')
cfg = Config(dir, 'conf.py', {}, None)
warned = [False]
def warn(msg):
diff --git a/tests/test_coverage.py b/tests/test_coverage.py
index 1262ebf5b..cb8316358 100644
--- a/tests/test_coverage.py
+++ b/tests/test_coverage.py
@@ -33,7 +33,7 @@ def test_build(app):
assert 'api.h' in c_undoc
assert ' * Py_SphinxTest' in c_undoc
- undoc_py, undoc_c = pickle.loads((app.outdir / 'undoc.pickle').text())
+ undoc_py, undoc_c = pickle.loads((app.outdir / 'undoc.pickle').bytes())
assert len(undoc_c) == 1
# the key is the full path to the header file, which isn't testable
assert undoc_c.values()[0] == [('function', 'Py_SphinxTest')]
diff --git a/tests/test_env.py b/tests/test_env.py
index 4ecbaac49..124ed08cd 100644
--- a/tests/test_env.py
+++ b/tests/test_env.py
@@ -8,6 +8,7 @@
:copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+import sys
from util import *
@@ -54,8 +55,10 @@ def test_images():
app._warning.reset()
htmlbuilder = StandaloneHTMLBuilder(app)
htmlbuilder.post_process_images(tree)
- assert "no matching candidate for image URI u'foo.*'" in \
- app._warning.content[-1]
+ image_uri_message = "no matching candidate for image URI u'foo.*'"
+ if sys.version_info >= (3, 0):
+ image_uri_message = remove_unicode_literals(image_uri_message)
+ assert image_uri_message in app._warning.content[-1]
assert set(htmlbuilder.images.keys()) == \
set(['subdir/img.png', 'img.png', 'subdir/simg.png', 'svgimg.svg'])
assert set(htmlbuilder.images.values()) == \
@@ -64,8 +67,7 @@ def test_images():
app._warning.reset()
latexbuilder = LaTeXBuilder(app)
latexbuilder.post_process_images(tree)
- assert "no matching candidate for image URI u'foo.*'" in \
- app._warning.content[-1]
+ assert image_uri_message in app._warning.content[-1]
assert set(latexbuilder.images.keys()) == \
set(['subdir/img.png', 'subdir/simg.png', 'img.png', 'img.pdf',
'svgimg.pdf'])
diff --git a/tests/test_intersphinx.py b/tests/test_intersphinx.py
index 8b6547e54..990e35bd2 100644
--- a/tests/test_intersphinx.py
+++ b/tests/test_intersphinx.py
@@ -11,7 +11,10 @@
import zlib
import posixpath
-from cStringIO import StringIO
+try:
+ from io import BytesIO
+except ImportError:
+ from cStringIO import StringIO as BytesIO
from docutils import nodes
@@ -28,23 +31,23 @@ inventory_v1 = '''\
# Version: 1.0
module mod foo.html
module.cls class foo.html
-'''
+'''.encode('utf-8')
inventory_v2 = '''\
# Sphinx inventory version 2
# Project: foo
# Version: 2.0
# The remainder of this file is compressed with zlib.
-''' + zlib.compress('''\
+'''.encode('utf-8') + zlib.compress('''\
module1 py:module 0 foo.html#module-module1 Long Module desc
module2 py:module 0 foo.html#module-$ -
module1.func py:function 1 sub/foo.html#$ -
CFunc c:function 2 cfunc.html#CFunc -
-''')
+'''.encode('utf-8'))
def test_read_inventory_v1():
- f = StringIO(inventory_v1)
+ f = BytesIO(inventory_v1)
f.readline()
invdata = read_inventory_v1(f, '/util', posixpath.join)
assert invdata['py:module']['module'] == \
@@ -54,12 +57,12 @@ def test_read_inventory_v1():
def test_read_inventory_v2():
- f = StringIO(inventory_v2)
+ f = BytesIO(inventory_v2)
f.readline()
invdata1 = read_inventory_v2(f, '/util', posixpath.join)
# try again with a small buffer size to test the chunking algorithm
- f = StringIO(inventory_v2)
+ f = BytesIO(inventory_v2)
f.readline()
invdata2 = read_inventory_v2(f, '/util', posixpath.join, bufsize=5)
@@ -94,46 +97,58 @@ def test_missing_reference(tempdir, app):
('foo', '2.0', 'http://docs.python.org/foo.html#module-module2', '-')
# create fake nodes and check referencing
- contnode = nodes.emphasis('foo', 'foo')
- refnode = addnodes.pending_xref('')
- refnode['reftarget'] = 'module1.func'
- refnode['reftype'] = 'func'
- refnode['refdomain'] = 'py'
- rn = missing_reference(app, app.env, refnode, contnode)
+ def fake_node(domain, type, target, content, **attrs):
+ contnode = nodes.emphasis(content, content)
+ node = addnodes.pending_xref('')
+ node['reftarget'] = target
+ node['reftype'] = type
+ node['refdomain'] = domain
+ node.attributes.update(attrs)
+ node += contnode
+ return node, contnode
+
+ def reference_check(*args, **kwds):
+ node, contnode = fake_node(*args, **kwds)
+ return missing_reference(app, app.env, node, contnode)
+
+ # check resolution when a target is found
+ rn = reference_check('py', 'func', 'module1.func', 'foo')
assert isinstance(rn, nodes.reference)
assert rn['refuri'] == 'http://docs.python.org/sub/foo.html#module1.func'
assert rn['reftitle'] == '(in foo v2.0)'
- assert rn[0].astext() == 'module1.func'
+ assert rn[0].astext() == 'foo'
# create unresolvable nodes and check None return value
- refnode['reftype'] = 'foo'
- assert missing_reference(app, app.env, refnode, contnode) is None
-
- refnode['reftype'] = 'function'
- refnode['reftarget'] = 'foo.func'
- assert missing_reference(app, app.env, refnode, contnode) is None
+ assert reference_check('py', 'foo', 'module1.func', 'foo') is None
+ assert reference_check('py', 'func', 'foo', 'foo') is None
+ assert reference_check('py', 'func', 'foo', 'foo') is None
# check handling of prefixes
# prefix given, target found: prefix is stripped
- refnode['reftype'] = 'mod'
- refnode['reftarget'] = 'py3k:module2'
- rn = missing_reference(app, app.env, refnode, contnode)
+ rn = reference_check('py', 'mod', 'py3k:module2', 'py3k:module2')
assert rn[0].astext() == 'module2'
+ # prefix given, but not in title: nothing stripped
+ rn = reference_check('py', 'mod', 'py3k:module2', 'module2')
+ assert rn[0].astext() == 'module2'
+
+ # prefix given, but explicit: nothing stripped
+ rn = reference_check('py', 'mod', 'py3k:module2', 'py3k:module2',
+ refexplicit=True)
+ assert rn[0].astext() == 'py3k:module2'
+
# prefix given, target not found and nonexplicit title: prefix is stripped
- refnode['reftarget'] = 'py3k:unknown'
- refnode['refexplicit'] = False
- contnode[0] = nodes.Text('py3k:unknown')
- rn = missing_reference(app, app.env, refnode, contnode)
+ node, contnode = fake_node('py', 'mod', 'py3k:unknown', 'py3k:unknown',
+ refexplicit=False)
+ rn = missing_reference(app, app.env, node, contnode)
assert rn is None
assert contnode[0].astext() == 'unknown'
# prefix given, target not found and explicit title: nothing is changed
- refnode['reftarget'] = 'py3k:unknown'
- refnode['refexplicit'] = True
- contnode[0] = nodes.Text('py3k:unknown')
- rn = missing_reference(app, app.env, refnode, contnode)
+ node, contnode = fake_node('py', 'mod', 'py3k:unknown', 'py3k:unknown',
+ refexplicit=True)
+ rn = missing_reference(app, app.env, node, contnode)
assert rn is None
assert contnode[0].astext() == 'py3k:unknown'
diff --git a/tests/test_markup.py b/tests/test_markup.py
index 31817df62..092113bbe 100644
--- a/tests/test_markup.py
+++ b/tests/test_markup.py
@@ -17,6 +17,7 @@ from docutils import frontend, utils, nodes
from docutils.parsers import rst
from sphinx.util import texescape
+from sphinx.util.pycompat import b
from sphinx.writers.html import HTMLWriter, SmartyPantsHTMLTranslator
from sphinx.writers.latex import LaTeXWriter, LaTeXTranslator
@@ -50,7 +51,7 @@ class ForgivingLaTeXTranslator(LaTeXTranslator, ForgivingTranslator):
def verify_re(rst, html_expected, latex_expected):
- document = utils.new_document('test data', settings)
+ document = utils.new_document(b('test data'), settings)
document['file'] = 'dummy'
parser.parse(rst, document)
for msg in document.traverse(nodes.system_message):
diff --git a/tests/test_quickstart.py b/tests/test_quickstart.py
index cb40d27cf..541959bd3 100644
--- a/tests/test_quickstart.py
+++ b/tests/test_quickstart.py
@@ -36,8 +36,13 @@ def mock_raw_input(answers, needanswer=False):
return ''
return raw_input
+try:
+ real_raw_input = raw_input
+except NameError:
+ real_raw_input = input
+
def teardown_module():
- qs.raw_input = __builtin__.raw_input
+ qs.term_input = real_raw_input
qs.TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
coloron()
@@ -51,7 +56,7 @@ def test_do_prompt():
'Q5': 'no',
'Q6': 'foo',
}
- qs.raw_input = mock_raw_input(answers)
+ qs.term_input = mock_raw_input(answers)
try:
qs.do_prompt(d, 'k1', 'Q1')
except AssertionError:
@@ -79,13 +84,18 @@ def test_quickstart_defaults(tempdir):
'Author name': 'Georg Brandl',
'Project version': '0.1',
}
- qs.raw_input = mock_raw_input(answers)
+ qs.term_input = mock_raw_input(answers)
qs.inner_main([])
conffile = tempdir / 'conf.py'
assert conffile.isfile()
ns = {}
- execfile(conffile, ns)
+ f = open(conffile, 'U')
+ try:
+ code = compile(f.read(), conffile, 'exec')
+ finally:
+ f.close()
+ exec code in ns
assert ns['extensions'] == []
assert ns['templates_path'] == ['_templates']
assert ns['source_suffix'] == '.rst'
@@ -112,8 +122,8 @@ def test_quickstart_all_answers(tempdir):
'Root path': tempdir,
'Separate source and build': 'y',
'Name prefix for templates': '.',
- 'Project name': 'STASI\xe2\x84\xa2',
- 'Author name': 'Wolfgang Sch\xc3\xa4uble & G\'Beckstein',
+ 'Project name': u'STASI™'.encode('utf-8'),
+ 'Author name': u'Wolfgang Schäuble & G\'Beckstein'.encode('utf-8'),
'Project version': '2.0',
'Project release': '2.0.1',
'Source file suffix': '.txt',
@@ -131,14 +141,19 @@ def test_quickstart_all_answers(tempdir):
'Create Windows command file': 'no',
'Do you want to use the epub builder': 'yes',
}
- qs.raw_input = mock_raw_input(answers, needanswer=True)
+ qs.term_input = mock_raw_input(answers, needanswer=True)
qs.TERM_ENCODING = 'utf-8'
qs.inner_main([])
conffile = tempdir / 'source' / 'conf.py'
assert conffile.isfile()
ns = {}
- execfile(conffile, ns)
+ f = open(conffile, 'U')
+ try:
+ code = compile(f.read(), conffile, 'exec')
+ finally:
+ f.close()
+ exec code in ns
assert ns['extensions'] == ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
assert ns['templates_path'] == ['.templates']
assert ns['source_suffix'] == '.txt'
diff --git a/tests/test_search.py b/tests/test_search.py
index 0b5b158b8..c0750366c 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -13,6 +13,7 @@ from docutils import frontend, utils
from docutils.parsers import rst
from sphinx.search import IndexBuilder
+from sphinx.util.pycompat import b
settings = parser = None
@@ -31,7 +32,7 @@ test that non-comments are indexed: fermion
'''
def test_wordcollector():
- doc = utils.new_document('test data', settings)
+ doc = utils.new_document(b('test data'), settings)
doc['file'] = 'dummy'
parser.parse(FILE_CONTENTS, doc)
diff --git a/tests/util.py b/tests/util.py
index 1b24af0e2..b81e15b6e 100644
--- a/tests/util.py
+++ b/tests/util.py
@@ -11,6 +11,8 @@ import sys
import StringIO
import tempfile
import shutil
+import re
+from codecs import open
try:
from functools import wraps
@@ -31,7 +33,7 @@ __all__ = [
'raises', 'raises_msg', 'Struct',
'ListOutput', 'TestApp', 'with_app', 'gen_with_app',
'path', 'with_tempdir', 'write_file',
- 'sprint',
+ 'sprint', 'remove_unicode_literals',
]
@@ -191,11 +193,21 @@ def with_tempdir(func):
return new_func
-def write_file(name, contents):
- f = open(str(name), 'wb')
+def write_file(name, contents, encoding=None):
+ if encoding is None:
+ mode = 'wb'
+ if isinstance(contents, unicode):
+ contents = contents.encode('ascii')
+ else:
+ mode = 'w'
+ f = open(str(name), 'wb', encoding=encoding)
f.write(contents)
f.close()
def sprint(*args):
sys.stderr.write(' '.join(map(str, args)) + '\n')
+
+_unicode_literals_re = re.compile(r'u(".*?")|u(\'.*?\')')
+def remove_unicode_literals(s):
+ return _unicode_literals_re.sub(lambda x: x.group(1) or x.group(2), s)
diff --git a/utils/check_sources.py b/utils/check_sources.py
index 0571ab1e8..c412742b7 100755
--- a/utils/check_sources.py
+++ b/utils/check_sources.py
@@ -12,10 +12,16 @@
"""
import sys, os, re
-import getopt
import cStringIO
+from optparse import OptionParser
from os.path import join, splitext, abspath
+if sys.version_info >= (3, 0):
+ def b(s):
+ return s.encode('utf-8')
+else:
+ b = str
+
checkers = {}
@@ -30,26 +36,26 @@ def checker(*suffixes, **kwds):
name_mail_re = r'[\w ]+(<.*?>)?'
-copyright_re = re.compile(r'^ :copyright: Copyright 200\d(-20\d\d)? '
- r'by %s(, %s)*[,.]$' %
- (name_mail_re, name_mail_re))
-license_re = re.compile(r" :license: (.*?).\n")
-copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
- (name_mail_re, name_mail_re))
-coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
-not_ix_re = re.compile(r'\bnot\s+\S+?\s+i[sn]\s\S+')
-is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b')
+copyright_re = re.compile(b(r'^ :copyright: Copyright 200\d(-20\d\d)? '
+ r'by %s(, %s)*[,.]$' %
+ (name_mail_re, name_mail_re)))
+license_re = re.compile(b(r" :license: (.*?).\n"))
+copyright_2_re = re.compile(b(r'^ %s(, %s)*[,.]$' %
+ (name_mail_re, name_mail_re)))
+coding_re = re.compile(b(r'coding[:=]\s*([-\w.]+)'))
+not_ix_re = re.compile(b(r'\bnot\s+\S+?\s+i[sn]\s\S+'))
+is_const_re = re.compile(b(r'if.*?==\s+(None|False|True)\b'))
-misspellings = ["developement", "adress", "verificate", # ALLOW-MISSPELLING
- "informations"] # ALLOW-MISSPELLING
+misspellings = [b("developement"), b("adress"), # ALLOW-MISSPELLING
+ b("verificate"), b("informations")] # ALLOW-MISSPELLING
-
-@checker('.py')
-def check_syntax(fn, lines):
- try:
- compile(''.join(lines), fn, "exec")
- except SyntaxError, err:
- yield 0, "not compilable: %s" % err
+if sys.version_info < (3, 0):
+ @checker('.py')
+ def check_syntax(fn, lines):
+ try:
+ compile(b('').join(lines), fn, "exec")
+ except SyntaxError, err:
+ yield 0, "not compilable: %s" % err
@checker('.py')
@@ -61,8 +67,8 @@ def check_style_and_encoding(fn, lines):
if lno < 2:
co = coding_re.search(line)
if co:
- encoding = co.group(1)
- if line.strip().startswith('#'):
+ encoding = co.group(1).decode('ascii')
+ if line.strip().startswith(b('#')):
continue
#m = not_ix_re.search(line)
#if m:
@@ -82,7 +88,7 @@ def check_style_and_encoding(fn, lines):
def check_fileheader(fn, lines):
# line number correction
c = 1
- if lines[0:1] == ['#!/usr/bin/env python\n']:
+ if lines[0:1] == [b('#!/usr/bin/env python\n')]:
lines = lines[1:]
c = 2
@@ -91,38 +97,38 @@ def check_fileheader(fn, lines):
for lno, l in enumerate(lines):
llist.append(l)
if lno == 0:
- if l == '# -*- coding: rot13 -*-\n':
+ if l == b('# -*- coding: rot13 -*-\n'):
# special-case pony package
return
- elif l != '# -*- coding: utf-8 -*-\n':
+ elif l != b('# -*- coding: utf-8 -*-\n'):
yield 1, "missing coding declaration"
elif lno == 1:
- if l != '"""\n' and l != 'r"""\n':
+ if l != b('"""\n') and l != b('r"""\n'):
yield 2, 'missing docstring begin (""")'
else:
docopen = True
elif docopen:
- if l == '"""\n':
+ if l == b('"""\n'):
# end of docstring
if lno <= 4:
yield lno+c, "missing module name in docstring"
break
- if l != "\n" and l[:4] != ' ' and docopen:
+ if l != b("\n") and l[:4] != b(' ') and docopen:
yield lno+c, "missing correct docstring indentation"
if lno == 2:
# if not in package, don't check the module name
modname = fn[:-3].replace('/', '.').replace('.__init__', '')
while modname:
- if l.lower()[4:-1] == modname:
+ if l.lower()[4:-1] == b(modname):
break
modname = '.'.join(modname.split('.')[1:])
else:
yield 3, "wrong module name in docstring heading"
modnamelen = len(l.strip())
elif lno == 3:
- if l.strip() != modnamelen * "~":
+ if l.strip() != modnamelen * b("~"):
yield 4, "wrong module name underline, should be ~~~...~"
else:
@@ -145,16 +151,16 @@ def check_fileheader(fn, lines):
@checker('.py', '.html', '.rst')
def check_whitespace_and_spelling(fn, lines):
for lno, line in enumerate(lines):
- if "\t" in line:
+ if b("\t") in line:
yield lno+1, "OMG TABS!!!1 "
- if line[:-1].rstrip(' \t') != line[:-1]:
+ if line[:-1].rstrip(b(' \t')) != line[:-1]:
yield lno+1, "trailing whitespace"
for word in misspellings:
- if word in line and 'ALLOW-MISSPELLING' not in line:
+ if word in line and b('ALLOW-MISSPELLING') not in line:
yield lno+1, '"%s" used' % word
-bad_tags = ('', '', '', '', '', '', '', '', '= (3, 0):
+ def tokens(readline, tokeneater):
+ for token in tokenize.tokenize(readline):
+ yield tokeneater(*token)
+else:
+ tokens = tokenize.tokenize
+
+verbose = 0
+recurse = 0
+dryrun = 0
+makebackup = True
def usage(msg=None):
if msg is not None:
@@ -61,12 +71,10 @@ def errprint(*args):
def main():
import getopt
- global verbose, recurse, dryrun, no_backup
-
+ global verbose, recurse, dryrun, makebackup
try:
- opts, args = getopt.getopt(sys.argv[1:], "drvhB",
- ["dryrun", "recurse", "verbose", "help",
- "no-backup"])
+ opts, args = getopt.getopt(sys.argv[1:], "drnvh",
+ ["dryrun", "recurse", "nobackup", "verbose", "help"])
except getopt.error, msg:
usage(msg)
return
@@ -75,10 +83,10 @@ def main():
dryrun += 1
elif o in ('-r', '--recurse'):
recurse += 1
+ elif o in ('-n', '--nobackup'):
+ makebackup = False
elif o in ('-v', '--verbose'):
verbose += 1
- elif o in ('-B', '--no-backup'):
- no_backup += 1
elif o in ('-h', '--help'):
usage()
return
@@ -98,7 +106,8 @@ def check(file):
for name in names:
fullname = os.path.join(file, name)
if ((recurse and os.path.isdir(fullname) and
- not os.path.islink(fullname))
+ not os.path.islink(fullname) and
+ not os.path.split(fullname)[1].startswith("."))
or name.lower().endswith(".py")):
check(fullname)
return
@@ -118,26 +127,35 @@ def check(file):
print "changed."
if dryrun:
print "But this is a dry run, so leaving it alone."
- else:
- print "reindented", file, \
- (dryrun and "(dry run => not really)" or "")
if not dryrun:
- if not no_backup:
- bak = file + ".bak"
- if os.path.exists(bak):
- os.remove(bak)
- os.rename(file, bak)
+ bak = file + ".bak"
+ if makebackup:
+ shutil.copyfile(file, bak)
if verbose:
- print "renamed", file, "to", bak
+ print "backed up", file, "to", bak
f = open(file, "w")
r.write(f)
f.close()
if verbose:
print "wrote new", file
+ return True
else:
if verbose:
print "unchanged."
+ return False
+def _rstrip(line, JUNK='\n \t'):
+ """Return line stripped of trailing spaces, tabs, newlines.
+
+ Note that line.rstrip() instead also strips sundry control characters,
+ but at least one known Emacs user expects to keep junk like that, not
+ mentioning Barry by name or anything .
+ """
+
+ i = len(line)
+ while i > 0 and line[i-1] in JUNK:
+ i -= 1
+ return line[:i]
class Reindenter:
@@ -151,7 +169,7 @@ class Reindenter:
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it's "\n".
- self.lines = [line.rstrip('\n \t').expandtabs() + "\n"
+ self.lines = [_rstrip(line).expandtabs() + "\n"
for line in self.raw]
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
@@ -163,7 +181,7 @@ class Reindenter:
self.stats = []
def run(self):
- tokenize.tokenize(self.getline, self.tokeneater)
+ tokens(self.getline, self.tokeneater)
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == "\n":