mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Split websupport to sphinxcontrib-websupport package
This commit is contained in:
parent
45887c7d62
commit
fef9f870d4
5
setup.py
5
setup.py
@ -51,6 +51,7 @@ requires = [
|
||||
'alabaster>=0.7,<0.8',
|
||||
'imagesize',
|
||||
'requests>=2.0.0',
|
||||
'sphinxcontrib-websupport',
|
||||
'typing',
|
||||
'setuptools',
|
||||
]
|
||||
@ -59,10 +60,6 @@ extras_require = {
|
||||
':sys_platform=="win32"': [
|
||||
'colorama>=0.3.5',
|
||||
],
|
||||
'websupport': [
|
||||
'sqlalchemy>=0.9',
|
||||
'whoosh>=2.0',
|
||||
],
|
||||
'test': [
|
||||
'pytest',
|
||||
'mock', # it would be better for 'test:python_version in 2.7'
|
||||
|
@ -9,182 +9,14 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
from os import path
|
||||
import posixpath
|
||||
import shutil
|
||||
|
||||
from docutils.io import StringOutput
|
||||
|
||||
from sphinx.jinja2glue import BuiltinTemplateLoader
|
||||
from sphinx.util.osutil import os_path, relative_uri, ensuredir, copyfile
|
||||
from sphinx.builders.html import PickleHTMLBuilder
|
||||
from sphinx.writers.websupport import WebSupportTranslator
|
||||
from sphinxcontrib.websupport.builder import WebSupportBuilder
|
||||
|
||||
if False:
|
||||
# For type annotation
|
||||
from typing import Any, Dict, Iterable, Tuple # NOQA
|
||||
from docutils import nodes # NOQA
|
||||
from typing import Any, Dict # NOQA
|
||||
from sphinx.application import Sphinx # NOQA
|
||||
|
||||
|
||||
class WebSupportBuilder(PickleHTMLBuilder):
|
||||
"""
|
||||
Builds documents for the web support package.
|
||||
"""
|
||||
name = 'websupport'
|
||||
versioning_method = 'commentable'
|
||||
versioning_compare = True # for commentable node's uuid stability.
|
||||
|
||||
def init(self):
|
||||
# type: () -> None
|
||||
PickleHTMLBuilder.init(self)
|
||||
# templates are needed for this builder, but the serializing
|
||||
# builder does not initialize them
|
||||
self.init_templates()
|
||||
if not isinstance(self.templates, BuiltinTemplateLoader):
|
||||
raise RuntimeError('websupport builder must be used with '
|
||||
'the builtin templates')
|
||||
# add our custom JS
|
||||
self.script_files.append('_static/websupport.js')
|
||||
|
||||
def set_webinfo(self, staticdir, virtual_staticdir, search, storage):
|
||||
# type: (unicode, unicode, Any, unicode) -> None
|
||||
self.staticdir = staticdir
|
||||
self.virtual_staticdir = virtual_staticdir
|
||||
self.search = search
|
||||
self.storage = storage
|
||||
|
||||
def init_translator_class(self):
|
||||
# type: () -> None
|
||||
if self.translator_class is None:
|
||||
self.translator_class = WebSupportTranslator
|
||||
|
||||
def prepare_writing(self, docnames):
|
||||
# type: (Iterable[unicode]) -> None
|
||||
PickleHTMLBuilder.prepare_writing(self, docnames)
|
||||
self.globalcontext['no_search_suffix'] = True
|
||||
|
||||
def write_doc(self, docname, doctree):
|
||||
# type: (unicode, nodes.Node) -> None
|
||||
destination = StringOutput(encoding='utf-8')
|
||||
doctree.settings = self.docsettings
|
||||
|
||||
self.secnumbers = self.env.toc_secnumbers.get(docname, {})
|
||||
self.fignumbers = self.env.toc_fignumbers.get(docname, {})
|
||||
self.imgpath = '/' + posixpath.join(self.virtual_staticdir, self.imagedir)
|
||||
self.dlpath = '/' + posixpath.join(self.virtual_staticdir, '_downloads')
|
||||
self.current_docname = docname
|
||||
self.docwriter.write(doctree, destination)
|
||||
self.docwriter.assemble_parts()
|
||||
body = self.docwriter.parts['fragment']
|
||||
metatags = self.docwriter.clean_meta
|
||||
|
||||
ctx = self.get_doc_context(docname, body, metatags)
|
||||
self.handle_page(docname, ctx, event_arg=doctree)
|
||||
|
||||
def write_doc_serialized(self, docname, doctree):
|
||||
# type: (unicode, nodes.Node) -> None
|
||||
self.imgpath = '/' + posixpath.join(self.virtual_staticdir, self.imagedir)
|
||||
self.post_process_images(doctree)
|
||||
title = self.env.longtitles.get(docname)
|
||||
title = title and self.render_partial(title)['title'] or ''
|
||||
self.index_page(docname, doctree, title)
|
||||
|
||||
def load_indexer(self, docnames):
|
||||
# type: (Iterable[unicode]) -> None
|
||||
self.indexer = self.search # type: ignore
|
||||
self.indexer.init_indexing(changed=docnames) # type: ignore
|
||||
|
||||
def _render_page(self, pagename, addctx, templatename, event_arg=None):
|
||||
# type: (unicode, Dict, unicode, unicode) -> Tuple[Dict, Dict]
|
||||
# This is mostly copied from StandaloneHTMLBuilder. However, instead
|
||||
# of rendering the template and saving the html, create a context
|
||||
# dict and pickle it.
|
||||
ctx = self.globalcontext.copy()
|
||||
ctx['pagename'] = pagename
|
||||
|
||||
def pathto(otheruri, resource=False,
|
||||
baseuri=self.get_target_uri(pagename)):
|
||||
# type: (unicode, bool, unicode) -> unicode
|
||||
if resource and '://' in otheruri:
|
||||
return otheruri
|
||||
elif not resource:
|
||||
otheruri = self.get_target_uri(otheruri)
|
||||
return relative_uri(baseuri, otheruri) or '#'
|
||||
else:
|
||||
return '/' + posixpath.join(self.virtual_staticdir, otheruri)
|
||||
ctx['pathto'] = pathto
|
||||
ctx['hasdoc'] = lambda name: name in self.env.all_docs
|
||||
ctx['encoding'] = self.config.html_output_encoding
|
||||
ctx['toctree'] = lambda **kw: self._get_local_toctree(pagename, **kw)
|
||||
self.add_sidebars(pagename, ctx)
|
||||
ctx.update(addctx)
|
||||
|
||||
newtmpl = self.app.emit_firstresult('html-page-context', pagename,
|
||||
templatename, ctx, event_arg)
|
||||
if newtmpl:
|
||||
templatename = newtmpl
|
||||
|
||||
# create a dict that will be pickled and used by webapps
|
||||
doc_ctx = {
|
||||
'body': ctx.get('body', ''),
|
||||
'title': ctx.get('title', ''),
|
||||
'css': ctx.get('css', ''),
|
||||
'script': ctx.get('script', ''),
|
||||
}
|
||||
# partially render the html template to get at interesting macros
|
||||
template = self.templates.environment.get_template(templatename)
|
||||
template_module = template.make_module(ctx)
|
||||
for item in ['sidebar', 'relbar', 'script', 'css']:
|
||||
if hasattr(template_module, item):
|
||||
doc_ctx[item] = getattr(template_module, item)()
|
||||
|
||||
return ctx, doc_ctx
|
||||
|
||||
def handle_page(self, pagename, addctx, templatename='page.html',
|
||||
outfilename=None, event_arg=None):
|
||||
# type: (unicode, Dict, unicode, unicode, unicode) -> None
|
||||
ctx, doc_ctx = self._render_page(pagename, addctx,
|
||||
templatename, event_arg)
|
||||
|
||||
if not outfilename:
|
||||
outfilename = path.join(self.outdir, 'pickles',
|
||||
os_path(pagename) + self.out_suffix)
|
||||
ensuredir(path.dirname(outfilename))
|
||||
self.dump_context(doc_ctx, outfilename)
|
||||
|
||||
# if there is a source file, copy the source file for the
|
||||
# "show source" link
|
||||
if ctx.get('sourcename'):
|
||||
source_name = path.join(self.staticdir,
|
||||
'_sources', os_path(ctx['sourcename']))
|
||||
ensuredir(path.dirname(source_name))
|
||||
copyfile(self.env.doc2path(pagename), source_name)
|
||||
|
||||
def handle_finish(self):
|
||||
# type: () -> None
|
||||
# get global values for css and script files
|
||||
_, doc_ctx = self._render_page('tmp', {}, 'page.html')
|
||||
self.globalcontext['css'] = doc_ctx['css']
|
||||
self.globalcontext['script'] = doc_ctx['script']
|
||||
|
||||
PickleHTMLBuilder.handle_finish(self)
|
||||
|
||||
# move static stuff over to separate directory
|
||||
directories = [self.imagedir, '_static']
|
||||
for directory in directories:
|
||||
src = path.join(self.outdir, directory)
|
||||
dst = path.join(self.staticdir, directory)
|
||||
if path.isdir(src):
|
||||
if path.isdir(dst):
|
||||
shutil.rmtree(dst)
|
||||
shutil.move(src, dst)
|
||||
|
||||
def dump_search_index(self):
|
||||
# type: () -> None
|
||||
self.indexer.finish_indexing() # type: ignore
|
||||
|
||||
|
||||
def setup(app):
|
||||
# type: (Sphinx) -> Dict[unicode, Any]
|
||||
app.add_builder(WebSupportBuilder)
|
||||
|
@ -7,12 +7,4 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
if False:
|
||||
# For type annotation
|
||||
from docutils import nodes # NOQA
|
||||
|
||||
|
||||
def is_commentable(node):
|
||||
# type: (nodes.Node) -> bool
|
||||
# return node.__class__.__name__ in ('paragraph', 'literal_block')
|
||||
return node.__class__.__name__ == 'paragraph'
|
||||
from sphinxcontrib.websupport.utils import is_commentable # NOQA
|
||||
|
@ -9,447 +9,7 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import posixpath
|
||||
from os import path
|
||||
|
||||
from six.moves import cPickle as pickle
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from docutils.core import publish_parts
|
||||
|
||||
from sphinx.application import Sphinx
|
||||
from sphinx.locale import _
|
||||
from sphinx.util.osutil import ensuredir
|
||||
from sphinx.util.jsonimpl import dumps as dump_json
|
||||
from sphinx.util.pycompat import htmlescape
|
||||
from sphinx.websupport import errors
|
||||
from sphinx.websupport.search import BaseSearch, SEARCH_ADAPTERS
|
||||
from sphinx.websupport.storage import StorageBackend
|
||||
|
||||
if False:
|
||||
# For type annotation
|
||||
from typing import Dict # NOQA
|
||||
|
||||
|
||||
class WebSupport(object):
|
||||
"""The main API class for the web support package. All interactions
|
||||
with the web support package should occur through this class.
|
||||
"""
|
||||
def __init__(self,
|
||||
srcdir=None, # only required for building
|
||||
builddir='', # the dir with data/static/doctrees subdirs
|
||||
datadir=None, # defaults to builddir/data
|
||||
staticdir=None, # defaults to builddir/static
|
||||
doctreedir=None, # defaults to builddir/doctrees
|
||||
search=None, # defaults to no search
|
||||
storage=None, # defaults to SQLite in datadir
|
||||
status=sys.stdout,
|
||||
warning=sys.stderr,
|
||||
moderation_callback=None,
|
||||
allow_anonymous_comments=True,
|
||||
docroot='',
|
||||
staticroot='static',
|
||||
):
|
||||
# directories
|
||||
self.srcdir = srcdir
|
||||
self.builddir = builddir
|
||||
self.outdir = path.join(builddir, 'data')
|
||||
self.datadir = datadir or self.outdir
|
||||
self.staticdir = staticdir or path.join(self.builddir, 'static')
|
||||
self.doctreedir = staticdir or path.join(self.builddir, 'doctrees')
|
||||
# web server virtual paths
|
||||
self.staticroot = staticroot.strip('/')
|
||||
self.docroot = docroot.strip('/')
|
||||
|
||||
self.status = status
|
||||
self.warning = warning
|
||||
self.moderation_callback = moderation_callback
|
||||
self.allow_anonymous_comments = allow_anonymous_comments
|
||||
|
||||
self._init_templating()
|
||||
self._init_search(search)
|
||||
self._init_storage(storage)
|
||||
|
||||
self._globalcontext = None # type: ignore
|
||||
|
||||
self._make_base_comment_options()
|
||||
|
||||
def _init_storage(self, storage):
|
||||
if isinstance(storage, StorageBackend):
|
||||
self.storage = storage
|
||||
else:
|
||||
# If a StorageBackend isn't provided, use the default
|
||||
# SQLAlchemy backend.
|
||||
from sphinx.websupport.storage.sqlalchemystorage \
|
||||
import SQLAlchemyStorage
|
||||
if not storage:
|
||||
# no explicit DB path given; create default sqlite database
|
||||
db_path = path.join(self.datadir, 'db', 'websupport.db')
|
||||
ensuredir(path.dirname(db_path))
|
||||
storage = 'sqlite:///' + db_path
|
||||
self.storage = SQLAlchemyStorage(storage)
|
||||
|
||||
def _init_templating(self):
|
||||
import sphinx
|
||||
template_path = path.join(sphinx.package_dir,
|
||||
'themes', 'basic')
|
||||
loader = FileSystemLoader(template_path)
|
||||
self.template_env = Environment(loader=loader)
|
||||
|
||||
def _init_search(self, search):
|
||||
if isinstance(search, BaseSearch):
|
||||
self.search = search
|
||||
else:
|
||||
mod, cls = SEARCH_ADAPTERS[search or 'null']
|
||||
mod = 'sphinx.websupport.search.' + mod
|
||||
SearchClass = getattr(__import__(mod, None, None, [cls]), cls)
|
||||
search_path = path.join(self.datadir, 'search')
|
||||
self.search = SearchClass(search_path)
|
||||
self.results_template = \
|
||||
self.template_env.get_template('searchresults.html')
|
||||
|
||||
def build(self):
|
||||
"""Build the documentation. Places the data into the `outdir`
|
||||
directory. Use it like this::
|
||||
|
||||
support = WebSupport(srcdir, builddir, search='xapian')
|
||||
support.build()
|
||||
|
||||
This will read reStructured text files from `srcdir`. Then it will
|
||||
build the pickles and search index, placing them into `builddir`.
|
||||
It will also save node data to the database.
|
||||
"""
|
||||
if not self.srcdir:
|
||||
raise RuntimeError('No srcdir associated with WebSupport object')
|
||||
app = Sphinx(self.srcdir, self.srcdir, self.outdir, self.doctreedir,
|
||||
'websupport', status=self.status, warning=self.warning)
|
||||
app.builder.set_webinfo(self.staticdir, self.staticroot, # type: ignore
|
||||
self.search, self.storage)
|
||||
|
||||
self.storage.pre_build()
|
||||
app.build()
|
||||
self.storage.post_build()
|
||||
|
||||
def get_globalcontext(self):
|
||||
"""Load and return the "global context" pickle."""
|
||||
if not self._globalcontext:
|
||||
infilename = path.join(self.datadir, 'globalcontext.pickle')
|
||||
with open(infilename, 'rb') as f:
|
||||
self._globalcontext = pickle.load(f)
|
||||
return self._globalcontext
|
||||
|
||||
def get_document(self, docname, username='', moderator=False):
|
||||
"""Load and return a document from a pickle. The document will
|
||||
be a dict object which can be used to render a template::
|
||||
|
||||
support = WebSupport(datadir=datadir)
|
||||
support.get_document('index', username, moderator)
|
||||
|
||||
In most cases `docname` will be taken from the request path and
|
||||
passed directly to this function. In Flask, that would be something
|
||||
like this::
|
||||
|
||||
@app.route('/<path:docname>')
|
||||
def index(docname):
|
||||
username = g.user.name if g.user else ''
|
||||
moderator = g.user.moderator if g.user else False
|
||||
try:
|
||||
document = support.get_document(docname, username,
|
||||
moderator)
|
||||
except DocumentNotFoundError:
|
||||
abort(404)
|
||||
render_template('doc.html', document=document)
|
||||
|
||||
The document dict that is returned contains the following items
|
||||
to be used during template rendering.
|
||||
|
||||
* **body**: The main body of the document as HTML
|
||||
* **sidebar**: The sidebar of the document as HTML
|
||||
* **relbar**: A div containing links to related documents
|
||||
* **title**: The title of the document
|
||||
* **css**: Links to css files used by Sphinx
|
||||
* **script**: Javascript containing comment options
|
||||
|
||||
This raises :class:`~sphinx.websupport.errors.DocumentNotFoundError`
|
||||
if a document matching `docname` is not found.
|
||||
|
||||
:param docname: the name of the document to load.
|
||||
"""
|
||||
docpath = path.join(self.datadir, 'pickles', docname)
|
||||
if path.isdir(docpath):
|
||||
infilename = docpath + '/index.fpickle'
|
||||
if not docname:
|
||||
docname = 'index'
|
||||
else:
|
||||
docname += '/index'
|
||||
else:
|
||||
infilename = docpath + '.fpickle'
|
||||
|
||||
try:
|
||||
with open(infilename, 'rb') as f:
|
||||
document = pickle.load(f)
|
||||
except IOError:
|
||||
raise errors.DocumentNotFoundError(
|
||||
'The document "%s" could not be found' % docname)
|
||||
|
||||
comment_opts = self._make_comment_options(username, moderator)
|
||||
comment_meta = self._make_metadata(
|
||||
self.storage.get_metadata(docname, moderator))
|
||||
|
||||
document['script'] = comment_opts + comment_meta + document['script']
|
||||
return document
|
||||
|
||||
def get_search_results(self, q):
|
||||
"""Perform a search for the query `q`, and create a set
|
||||
of search results. Then render the search results as html and
|
||||
return a context dict like the one created by
|
||||
:meth:`get_document`::
|
||||
|
||||
document = support.get_search_results(q)
|
||||
|
||||
:param q: the search query
|
||||
"""
|
||||
results = self.search.query(q)
|
||||
ctx = {
|
||||
'q': q,
|
||||
'search_performed': True,
|
||||
'search_results': results,
|
||||
'docroot': '../', # XXX
|
||||
'_': _,
|
||||
}
|
||||
document = {
|
||||
'body': self.results_template.render(ctx),
|
||||
'title': 'Search Results',
|
||||
'sidebar': '',
|
||||
'relbar': ''
|
||||
}
|
||||
return document
|
||||
|
||||
def get_data(self, node_id, username=None, moderator=False):
|
||||
"""Get the comments and source associated with `node_id`. If
|
||||
`username` is given vote information will be included with the
|
||||
returned comments. The default CommentBackend returns a dict with
|
||||
two keys, *source*, and *comments*. *source* is raw source of the
|
||||
node and is used as the starting point for proposals a user can
|
||||
add. *comments* is a list of dicts that represent a comment, each
|
||||
having the following items:
|
||||
|
||||
============= ======================================================
|
||||
Key Contents
|
||||
============= ======================================================
|
||||
text The comment text.
|
||||
username The username that was stored with the comment.
|
||||
id The comment's unique identifier.
|
||||
rating The comment's current rating.
|
||||
age The time in seconds since the comment was added.
|
||||
time A dict containing time information. It contains the
|
||||
following keys: year, month, day, hour, minute, second,
|
||||
iso, and delta. `iso` is the time formatted in ISO
|
||||
8601 format. `delta` is a printable form of how old
|
||||
the comment is (e.g. "3 hours ago").
|
||||
vote If `user_id` was given, this will be an integer
|
||||
representing the vote. 1 for an upvote, -1 for a
|
||||
downvote, or 0 if unvoted.
|
||||
node The id of the node that the comment is attached to.
|
||||
If the comment's parent is another comment rather than
|
||||
a node, this will be null.
|
||||
parent The id of the comment that this comment is attached
|
||||
to if it is not attached to a node.
|
||||
children A list of all children, in this format.
|
||||
proposal_diff An HTML representation of the differences between the
|
||||
the current source and the user's proposed source.
|
||||
============= ======================================================
|
||||
|
||||
:param node_id: the id of the node to get comments for.
|
||||
:param username: the username of the user viewing the comments.
|
||||
:param moderator: whether the user is a moderator.
|
||||
"""
|
||||
return self.storage.get_data(node_id, username, moderator)
|
||||
|
||||
def delete_comment(self, comment_id, username='', moderator=False):
|
||||
"""Delete a comment.
|
||||
|
||||
If `moderator` is True, the comment and all descendants will be deleted
|
||||
from the database, and the function returns ``True``.
|
||||
|
||||
If `moderator` is False, the comment will be marked as deleted (but not
|
||||
removed from the database so as not to leave any comments orphaned), but
|
||||
only if the `username` matches the `username` on the comment. The
|
||||
username and text files are replaced with "[deleted]" . In this case,
|
||||
the function returns ``False``.
|
||||
|
||||
This raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
|
||||
if moderator is False and `username` doesn't match username on the
|
||||
comment.
|
||||
|
||||
:param comment_id: the id of the comment to delete.
|
||||
:param username: the username requesting the deletion.
|
||||
:param moderator: whether the requestor is a moderator.
|
||||
"""
|
||||
return self.storage.delete_comment(comment_id, username, moderator)
|
||||
|
||||
def add_comment(self, text, node_id='', parent_id='', displayed=True,
|
||||
username=None, time=None, proposal=None,
|
||||
moderator=False):
|
||||
"""Add a comment to a node or another comment. Returns the comment
|
||||
in the same format as :meth:`get_comments`. If the comment is being
|
||||
attached to a node, pass in the node's id (as a string) with the
|
||||
node keyword argument::
|
||||
|
||||
comment = support.add_comment(text, node_id=node_id)
|
||||
|
||||
If the comment is the child of another comment, provide the parent's
|
||||
id (as a string) with the parent keyword argument::
|
||||
|
||||
comment = support.add_comment(text, parent_id=parent_id)
|
||||
|
||||
If you would like to store a username with the comment, pass
|
||||
in the optional `username` keyword argument::
|
||||
|
||||
comment = support.add_comment(text, node=node_id,
|
||||
username=username)
|
||||
|
||||
:param parent_id: the prefixed id of the comment's parent.
|
||||
:param text: the text of the comment.
|
||||
:param displayed: for moderation purposes
|
||||
:param username: the username of the user making the comment.
|
||||
:param time: the time the comment was created, defaults to now.
|
||||
"""
|
||||
if username is None:
|
||||
if self.allow_anonymous_comments:
|
||||
username = 'Anonymous'
|
||||
else:
|
||||
raise errors.UserNotAuthorizedError()
|
||||
parsed = self._parse_comment_text(text)
|
||||
comment = self.storage.add_comment(parsed, displayed, username,
|
||||
time, proposal, node_id,
|
||||
parent_id, moderator)
|
||||
comment['original_text'] = text
|
||||
if not displayed and self.moderation_callback:
|
||||
self.moderation_callback(comment)
|
||||
return comment
|
||||
|
||||
def process_vote(self, comment_id, username, value):
|
||||
"""Process a user's vote. The web support package relies
|
||||
on the API user to perform authentication. The API user will
|
||||
typically receive a comment_id and value from a form, and then
|
||||
make sure the user is authenticated. A unique username must be
|
||||
passed in, which will also be used to retrieve the user's past
|
||||
voting data. An example, once again in Flask::
|
||||
|
||||
@app.route('/docs/process_vote', methods=['POST'])
|
||||
def process_vote():
|
||||
if g.user is None:
|
||||
abort(401)
|
||||
comment_id = request.form.get('comment_id')
|
||||
value = request.form.get('value')
|
||||
if value is None or comment_id is None:
|
||||
abort(400)
|
||||
support.process_vote(comment_id, g.user.name, value)
|
||||
return "success"
|
||||
|
||||
:param comment_id: the comment being voted on
|
||||
:param username: the unique username of the user voting
|
||||
:param value: 1 for an upvote, -1 for a downvote, 0 for an unvote.
|
||||
"""
|
||||
value = int(value)
|
||||
if not -1 <= value <= 1:
|
||||
raise ValueError('vote value %s out of range (-1, 1)' % value)
|
||||
self.storage.process_vote(comment_id, username, value)
|
||||
|
||||
def update_username(self, old_username, new_username):
|
||||
"""To remain decoupled from a webapp's authentication system, the
|
||||
web support package stores a user's username with each of their
|
||||
comments and votes. If the authentication system allows a user to
|
||||
change their username, this can lead to stagnate data in the web
|
||||
support system. To avoid this, each time a username is changed, this
|
||||
method should be called.
|
||||
|
||||
:param old_username: The original username.
|
||||
:param new_username: The new username.
|
||||
"""
|
||||
self.storage.update_username(old_username, new_username)
|
||||
|
||||
def accept_comment(self, comment_id, moderator=False):
|
||||
"""Accept a comment that is pending moderation.
|
||||
|
||||
This raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
|
||||
if moderator is False.
|
||||
|
||||
:param comment_id: The id of the comment that was accepted.
|
||||
:param moderator: Whether the user making the request is a moderator.
|
||||
"""
|
||||
if not moderator:
|
||||
raise errors.UserNotAuthorizedError()
|
||||
self.storage.accept_comment(comment_id)
|
||||
|
||||
def _make_base_comment_options(self):
|
||||
"""Helper method to create the part of the COMMENT_OPTIONS javascript
|
||||
that remains the same throughout the lifetime of the
|
||||
:class:`~sphinx.websupport.WebSupport` object.
|
||||
"""
|
||||
self.base_comment_opts = {} # type: Dict[unicode, unicode]
|
||||
|
||||
if self.docroot != '':
|
||||
comment_urls = [
|
||||
('addCommentURL', '_add_comment'),
|
||||
('getCommentsURL', '_get_comments'),
|
||||
('processVoteURL', '_process_vote'),
|
||||
('acceptCommentURL', '_accept_comment'),
|
||||
('deleteCommentURL', '_delete_comment')
|
||||
]
|
||||
for key, value in comment_urls:
|
||||
self.base_comment_opts[key] = \
|
||||
'/' + posixpath.join(self.docroot, value)
|
||||
if self.staticroot != 'static':
|
||||
static_urls = [
|
||||
('commentImage', 'comment.png'),
|
||||
('closeCommentImage', 'comment-close.png'),
|
||||
('loadingImage', 'ajax-loader.gif'),
|
||||
('commentBrightImage', 'comment-bright.png'),
|
||||
('upArrow', 'up.png'),
|
||||
('upArrowPressed', 'up-pressed.png'),
|
||||
('downArrow', 'down.png'),
|
||||
('downArrowPressed', 'down-pressed.png')
|
||||
]
|
||||
for key, value in static_urls:
|
||||
self.base_comment_opts[key] = \
|
||||
'/' + posixpath.join(self.staticroot, '_static', value)
|
||||
|
||||
def _make_comment_options(self, username, moderator):
|
||||
"""Helper method to create the parts of the COMMENT_OPTIONS
|
||||
javascript that are unique to each request.
|
||||
|
||||
:param username: The username of the user making the request.
|
||||
:param moderator: Whether the user making the request is a moderator.
|
||||
"""
|
||||
rv = self.base_comment_opts.copy()
|
||||
if username:
|
||||
rv.update({
|
||||
'voting': True,
|
||||
'username': username,
|
||||
'moderator': moderator,
|
||||
})
|
||||
return '''\
|
||||
<script type="text/javascript">
|
||||
var COMMENT_OPTIONS = %s;
|
||||
</script>
|
||||
''' % dump_json(rv)
|
||||
|
||||
def _make_metadata(self, data):
|
||||
return '''\
|
||||
<script type="text/javascript">
|
||||
var COMMENT_METADATA = %s;
|
||||
</script>
|
||||
''' % dump_json(data)
|
||||
|
||||
def _parse_comment_text(self, text):
|
||||
settings = {'file_insertion_enabled': False,
|
||||
'raw_enabled': False,
|
||||
'output_encoding': 'unicode'}
|
||||
try:
|
||||
ret = publish_parts(text, writer_name='html',
|
||||
settings_overrides=settings)['fragment']
|
||||
except Exception:
|
||||
ret = htmlescape(text)
|
||||
return ret
|
||||
from sphinxcontrib.websupport import WebSupport # NOQA
|
||||
from sphinxcontrib.websupport import errors # NOQA
|
||||
from sphinxcontrib.websupport.search import BaseSearch, SEARCH_ADAPTERS # NOQA
|
||||
from sphinxcontrib.websupport.storage import StorageBackend # NOQA
|
||||
|
@ -9,18 +9,4 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
|
||||
class DocumentNotFoundError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UserNotAuthorizedError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CommentNotAllowedError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class NullSearchException(Exception):
|
||||
pass
|
||||
from sphinxcontrib.websupport.errors import * # NOQA
|
||||
|
@ -9,126 +9,4 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from six import text_type
|
||||
|
||||
|
||||
class BaseSearch(object):
|
||||
def __init__(self, path):
|
||||
pass
|
||||
|
||||
def init_indexing(self, changed=[]):
|
||||
"""Called by the builder to initialize the search indexer. `changed`
|
||||
is a list of pagenames that will be reindexed. You may want to remove
|
||||
these from the search index before indexing begins.
|
||||
|
||||
:param changed: a list of pagenames that will be re-indexed
|
||||
"""
|
||||
pass
|
||||
|
||||
def finish_indexing(self):
|
||||
"""Called by the builder when writing has been completed. Use this
|
||||
to perform any finalization or cleanup actions after indexing is
|
||||
complete.
|
||||
"""
|
||||
pass
|
||||
|
||||
def feed(self, pagename, filename, title, doctree):
|
||||
"""Called by the builder to add a doctree to the index. Converts the
|
||||
`doctree` to text and passes it to :meth:`add_document`. You probably
|
||||
won't want to override this unless you need access to the `doctree`.
|
||||
Override :meth:`add_document` instead.
|
||||
|
||||
:param pagename: the name of the page to be indexed
|
||||
:param filename: the name of the original source file
|
||||
:param title: the title of the page to be indexed
|
||||
:param doctree: is the docutils doctree representation of the page
|
||||
"""
|
||||
self.add_document(pagename, filename, title, doctree.astext())
|
||||
|
||||
def add_document(self, pagename, filename, title, text):
|
||||
"""Called by :meth:`feed` to add a document to the search index.
|
||||
This method should should do everything necessary to add a single
|
||||
document to the search index.
|
||||
|
||||
`pagename` is name of the page being indexed. It is the combination
|
||||
of the source files relative path and filename,
|
||||
minus the extension. For example, if the source file is
|
||||
"ext/builders.rst", the `pagename` would be "ext/builders". This
|
||||
will need to be returned with search results when processing a
|
||||
query.
|
||||
|
||||
:param pagename: the name of the page being indexed
|
||||
:param filename: the name of the original source file
|
||||
:param title: the page's title
|
||||
:param text: the full text of the page
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def query(self, q):
|
||||
"""Called by the web support api to get search results. This method
|
||||
compiles the regular expression to be used when :meth:`extracting
|
||||
context <extract_context>`, then calls :meth:`handle_query`. You
|
||||
won't want to override this unless you don't want to use the included
|
||||
:meth:`extract_context` method. Override :meth:`handle_query` instead.
|
||||
|
||||
:param q: the search query string.
|
||||
"""
|
||||
self.context_re = re.compile('|'.join(q.split()), re.I)
|
||||
return self.handle_query(q)
|
||||
|
||||
def handle_query(self, q):
|
||||
"""Called by :meth:`query` to retrieve search results for a search
|
||||
query `q`. This should return an iterable containing tuples of the
|
||||
following format::
|
||||
|
||||
(<path>, <title>, <context>)
|
||||
|
||||
`path` and `title` are the same values that were passed to
|
||||
:meth:`add_document`, and `context` should be a short text snippet
|
||||
of the text surrounding the search query in the document.
|
||||
|
||||
The :meth:`extract_context` method is provided as a simple way
|
||||
to create the `context`.
|
||||
|
||||
:param q: the search query
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def extract_context(self, text, length=240):
|
||||
"""Extract the context for the search query from the document's
|
||||
full `text`.
|
||||
|
||||
:param text: the full text of the document to create the context for
|
||||
:param length: the length of the context snippet to return.
|
||||
"""
|
||||
res = self.context_re.search(text)
|
||||
if res is None:
|
||||
return ''
|
||||
context_start = max(res.start() - int(length / 2), 0)
|
||||
context_end = context_start + length
|
||||
context = ''.join([context_start > 0 and '...' or '',
|
||||
text[context_start:context_end],
|
||||
context_end < len(text) and '...' or ''])
|
||||
|
||||
try:
|
||||
return text_type(context, errors='ignore')
|
||||
except TypeError:
|
||||
return context
|
||||
|
||||
def context_for_searchtool(self):
|
||||
"""Required by the HTML builder."""
|
||||
return {}
|
||||
|
||||
def get_js_stemmer_rawcode(self):
|
||||
"""Required by the HTML builder."""
|
||||
return None
|
||||
|
||||
|
||||
# The built-in search adapters.
|
||||
SEARCH_ADAPTERS = {
|
||||
'xapian': ('xapiansearch', 'XapianSearch'),
|
||||
'whoosh': ('whooshsearch', 'WhooshSearch'),
|
||||
'null': ('nullsearch', 'NullSearch'),
|
||||
}
|
||||
from sphinxcontrib.websupport.search import BaseSearch, SEARCH_ADAPTERS # NOQA
|
||||
|
@ -9,16 +9,4 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
from sphinx.websupport.search import BaseSearch
|
||||
from sphinx.websupport.errors import NullSearchException
|
||||
|
||||
|
||||
class NullSearch(BaseSearch):
|
||||
"""A search adapter that does nothing. Used when no search adapter
|
||||
is specified.
|
||||
"""
|
||||
def feed(self, pagename, filename, title, doctree):
|
||||
pass
|
||||
|
||||
def query(self, q):
|
||||
raise NullSearchException('No search adapter specified.')
|
||||
from sphinxcontrib.websupport.search.nullsearch import NullSearch # NOQA
|
||||
|
@ -9,53 +9,4 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
from whoosh import index
|
||||
from whoosh.fields import Schema, ID, TEXT
|
||||
from whoosh.qparser import QueryParser
|
||||
from whoosh.analysis import StemmingAnalyzer
|
||||
|
||||
from six import text_type
|
||||
|
||||
from sphinx.util.osutil import ensuredir
|
||||
from sphinx.websupport.search import BaseSearch
|
||||
|
||||
|
||||
class WhooshSearch(BaseSearch):
|
||||
"""The whoosh search adapter for sphinx web support."""
|
||||
|
||||
# Define the Whoosh Schema for the search index.
|
||||
schema = Schema(path=ID(stored=True, unique=True),
|
||||
title=TEXT(field_boost=2.0, stored=True),
|
||||
text=TEXT(analyzer=StemmingAnalyzer(), stored=True))
|
||||
|
||||
def __init__(self, db_path):
|
||||
ensuredir(db_path)
|
||||
if index.exists_in(db_path):
|
||||
self.index = index.open_dir(db_path)
|
||||
else:
|
||||
self.index = index.create_in(db_path, schema=self.schema)
|
||||
self.qparser = QueryParser('text', self.schema)
|
||||
|
||||
def init_indexing(self, changed=[]):
|
||||
for changed_path in changed:
|
||||
self.index.delete_by_term('path', changed_path)
|
||||
self.index_writer = self.index.writer()
|
||||
|
||||
def finish_indexing(self):
|
||||
self.index_writer.commit()
|
||||
|
||||
def add_document(self, pagename, filename, title, text):
|
||||
self.index_writer.add_document(path=text_type(pagename),
|
||||
title=title,
|
||||
text=text)
|
||||
|
||||
def handle_query(self, q):
|
||||
searcher = self.index.searcher()
|
||||
whoosh_results = searcher.search(self.qparser.parse(q))
|
||||
results = []
|
||||
for result in whoosh_results:
|
||||
context = self.extract_context(result['text'])
|
||||
results.append((result['path'],
|
||||
result.get('title', ''),
|
||||
context))
|
||||
return results
|
||||
from sphinxcontrib.websupport.search.whooshsearch import WhooshSearch # NOQA
|
||||
|
@ -9,78 +9,4 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import xapian
|
||||
|
||||
from six import string_types
|
||||
|
||||
from sphinx.util.osutil import ensuredir
|
||||
from sphinx.websupport.search import BaseSearch
|
||||
|
||||
|
||||
class XapianSearch(BaseSearch):
|
||||
# Adapted from the GSOC 2009 webapp project.
|
||||
|
||||
# Xapian metadata constants
|
||||
DOC_PATH = 0
|
||||
DOC_TITLE = 1
|
||||
|
||||
def __init__(self, db_path):
|
||||
self.db_path = db_path
|
||||
|
||||
def init_indexing(self, changed=[]):
|
||||
ensuredir(self.db_path)
|
||||
self.database = xapian.WritableDatabase(self.db_path,
|
||||
xapian.DB_CREATE_OR_OPEN)
|
||||
self.indexer = xapian.TermGenerator()
|
||||
stemmer = xapian.Stem("english")
|
||||
self.indexer.set_stemmer(stemmer)
|
||||
|
||||
def finish_indexing(self):
|
||||
# Ensure the db lock is removed.
|
||||
del self.database
|
||||
|
||||
def add_document(self, pagename, filename, title, text):
|
||||
self.database.begin_transaction()
|
||||
# sphinx_page_path is used to easily retrieve documents by path.
|
||||
sphinx_page_path = '"sphinxpagepath%s"' % pagename.replace('/', '_')
|
||||
# Delete the old document if it exists.
|
||||
self.database.delete_document(sphinx_page_path)
|
||||
|
||||
doc = xapian.Document()
|
||||
doc.set_data(text)
|
||||
doc.add_value(self.DOC_PATH, pagename)
|
||||
doc.add_value(self.DOC_TITLE, title)
|
||||
self.indexer.set_document(doc)
|
||||
self.indexer.index_text(text)
|
||||
doc.add_term(sphinx_page_path)
|
||||
for word in text.split():
|
||||
doc.add_posting(word, 1)
|
||||
self.database.add_document(doc)
|
||||
self.database.commit_transaction()
|
||||
|
||||
def handle_query(self, q):
|
||||
database = xapian.Database(self.db_path)
|
||||
enquire = xapian.Enquire(database)
|
||||
qp = xapian.QueryParser()
|
||||
stemmer = xapian.Stem("english")
|
||||
qp.set_stemmer(stemmer)
|
||||
qp.set_database(database)
|
||||
qp.set_stemming_strategy(xapian.QueryParser.STEM_SOME)
|
||||
query = qp.parse_query(q)
|
||||
|
||||
# Find the top 100 results for the query.
|
||||
enquire.set_query(query)
|
||||
matches = enquire.get_mset(0, 100)
|
||||
|
||||
results = []
|
||||
|
||||
for m in matches:
|
||||
data = m.document.get_data()
|
||||
if not isinstance(data, string_types):
|
||||
data = data.decode("utf-8")
|
||||
context = self.extract_context(data)
|
||||
results.append((m.document.get_value(self.DOC_PATH),
|
||||
m.document.get_value(self.DOC_TITLE),
|
||||
''.join(context)))
|
||||
|
||||
return results
|
||||
from sphinxcontrib.websupport.search.xapiansearch import XapianSearch # NOQA
|
||||
|
@ -9,108 +9,4 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
|
||||
class StorageBackend(object):
|
||||
def pre_build(self):
|
||||
"""Called immediately before the build process begins. Use this
|
||||
to prepare the StorageBackend for the addition of nodes.
|
||||
"""
|
||||
pass
|
||||
|
||||
def has_node(self, id):
|
||||
"""Check to see if a node exists.
|
||||
|
||||
:param id: the id to check for.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def add_node(self, id, document, source):
|
||||
"""Add a node to the StorageBackend.
|
||||
|
||||
:param id: a unique id for the comment.
|
||||
:param document: the name of the document the node belongs to.
|
||||
:param source: the source files name.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def post_build(self):
|
||||
"""Called after a build has completed. Use this to finalize the
|
||||
addition of nodes if needed.
|
||||
"""
|
||||
pass
|
||||
|
||||
def add_comment(self, text, displayed, username, time,
|
||||
proposal, node_id, parent_id, moderator):
|
||||
"""Called when a comment is being added.
|
||||
|
||||
:param text: the text of the comment
|
||||
:param displayed: whether the comment should be displayed
|
||||
:param username: the name of the user adding the comment
|
||||
:param time: a date object with the time the comment was added
|
||||
:param proposal: the text of the proposal the user made
|
||||
:param node_id: the id of the node that the comment is being added to
|
||||
:param parent_id: the id of the comment's parent comment.
|
||||
:param moderator: whether the user adding the comment is a moderator
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_comment(self, comment_id, username, moderator):
|
||||
"""Delete a comment.
|
||||
|
||||
Raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
|
||||
if moderator is False and `username` doesn't match the username
|
||||
on the comment.
|
||||
|
||||
:param comment_id: The id of the comment being deleted.
|
||||
:param username: The username of the user requesting the deletion.
|
||||
:param moderator: Whether the user is a moderator.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_metadata(self, docname, moderator):
|
||||
"""Get metadata for a document. This is currently just a dict
|
||||
of node_id's with associated comment counts.
|
||||
|
||||
:param docname: the name of the document to get metadata for.
|
||||
:param moderator: whether the requester is a moderator.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_data(self, node_id, username, moderator):
|
||||
"""Called to retrieve all data for a node. This should return a
|
||||
dict with two keys, *source* and *comments* as described by
|
||||
:class:`~sphinx.websupport.WebSupport`'s
|
||||
:meth:`~sphinx.websupport.WebSupport.get_data` method.
|
||||
|
||||
:param node_id: The id of the node to get data for.
|
||||
:param username: The name of the user requesting the data.
|
||||
:param moderator: Whether the requestor is a moderator.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def process_vote(self, comment_id, username, value):
|
||||
"""Process a vote that is being cast. `value` will be either -1, 0,
|
||||
or 1.
|
||||
|
||||
:param comment_id: The id of the comment being voted on.
|
||||
:param username: The username of the user casting the vote.
|
||||
:param value: The value of the vote being cast.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def update_username(self, old_username, new_username):
|
||||
"""If a user is allowed to change their username this method should
|
||||
be called so that there is not stagnate data in the storage system.
|
||||
|
||||
:param old_username: The username being changed.
|
||||
:param new_username: What the username is being changed to.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def accept_comment(self, comment_id):
|
||||
"""Called when a moderator accepts a comment. After the method is
|
||||
called the comment should be displayed to all users.
|
||||
|
||||
:param comment_id: The id of the comment being accepted.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
from sphinxcontrib.websupport.storage import StorageBackend # NOQA
|
||||
|
@ -9,78 +9,4 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import re
|
||||
from difflib import Differ
|
||||
|
||||
from sphinx.util.pycompat import htmlescape
|
||||
|
||||
|
||||
class CombinedHtmlDiff(object):
|
||||
"""Create an HTML representation of the differences between two pieces
|
||||
of text.
|
||||
"""
|
||||
highlight_regex = re.compile(r'([\+\-\^]+)')
|
||||
|
||||
def __init__(self, source, proposal):
|
||||
proposal = htmlescape(proposal)
|
||||
|
||||
differ = Differ()
|
||||
self.diff = list(differ.compare(source.splitlines(1),
|
||||
proposal.splitlines(1)))
|
||||
|
||||
def make_text(self):
|
||||
return '\n'.join(self.diff)
|
||||
|
||||
def make_html(self):
|
||||
"""Return the HTML representation of the differences between
|
||||
`source` and `proposal`.
|
||||
|
||||
:param source: the original text
|
||||
:param proposal: the proposed text
|
||||
"""
|
||||
html = []
|
||||
diff = self.diff[:]
|
||||
line = diff.pop(0)
|
||||
next = diff.pop(0)
|
||||
while True:
|
||||
html.append(self._handle_line(line, next))
|
||||
line = next
|
||||
try:
|
||||
next = diff.pop(0)
|
||||
except IndexError:
|
||||
html.append(self._handle_line(line))
|
||||
break
|
||||
return ''.join(html).rstrip()
|
||||
|
||||
def _handle_line(self, line, next=None):
|
||||
"""Handle an individual line in a diff."""
|
||||
prefix = line[0]
|
||||
text = line[2:]
|
||||
|
||||
if prefix == ' ':
|
||||
return text
|
||||
elif prefix == '?':
|
||||
return ''
|
||||
|
||||
if next is not None and next[0] == '?':
|
||||
tag = prefix == '+' and 'ins' or 'del'
|
||||
text = self._highlight_text(text, next, tag)
|
||||
css_class = prefix == '+' and 'prop-added' or 'prop-removed'
|
||||
|
||||
return '<span class="%s">%s</span>\n' % (css_class, text.rstrip())
|
||||
|
||||
def _highlight_text(self, text, next, tag):
|
||||
"""Highlight the specific changes made to a line by adding
|
||||
<ins> and <del> tags.
|
||||
"""
|
||||
next = next[2:]
|
||||
new_text = []
|
||||
start = 0
|
||||
for match in self.highlight_regex.finditer(next):
|
||||
new_text.append(text[start:match.start()])
|
||||
new_text.append('<%s>' % tag)
|
||||
new_text.append(text[match.start():match.end()])
|
||||
new_text.append('</%s>' % tag)
|
||||
start = match.end()
|
||||
new_text.append(text[start:])
|
||||
return ''.join(new_text)
|
||||
from sphinxcontrib.websupport.storage.differ import CombinedHtmlDiff # NOQA
|
||||
|
@ -10,217 +10,4 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from sqlalchemy import Column, Integer, Text, String, Boolean, \
|
||||
ForeignKey, DateTime
|
||||
from sqlalchemy.orm import relation, sessionmaker, aliased
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
|
||||
if False:
|
||||
# For type annotation
|
||||
from typing import List # NOQA
|
||||
|
||||
Base = declarative_base()
|
||||
Session = sessionmaker()
|
||||
|
||||
db_prefix = 'sphinx_'
|
||||
|
||||
|
||||
class Node(Base): # type: ignore
|
||||
"""Data about a Node in a doctree."""
|
||||
__tablename__ = db_prefix + 'nodes'
|
||||
|
||||
id = Column(String(32), primary_key=True)
|
||||
document = Column(String(256), nullable=False)
|
||||
source = Column(Text, nullable=False)
|
||||
|
||||
def nested_comments(self, username, moderator):
|
||||
"""Create a tree of comments. First get all comments that are
|
||||
descendants of this node, then convert them to a tree form.
|
||||
|
||||
:param username: the name of the user to get comments for.
|
||||
:param moderator: whether the user is moderator.
|
||||
"""
|
||||
session = Session()
|
||||
|
||||
if username:
|
||||
# If a username is provided, create a subquery to retrieve all
|
||||
# votes by this user. We will outerjoin with the comment query
|
||||
# with this subquery so we have a user's voting information.
|
||||
sq = session.query(CommentVote).\
|
||||
filter(CommentVote.username == username).subquery()
|
||||
cvalias = aliased(CommentVote, sq)
|
||||
q = session.query(Comment, cvalias.value).outerjoin(cvalias)
|
||||
else:
|
||||
# If a username is not provided, we don't need to join with
|
||||
# CommentVote.
|
||||
q = session.query(Comment)
|
||||
|
||||
# Filter out all comments not descending from this node.
|
||||
q = q.filter(Comment.path.like(str(self.id) + '.%'))
|
||||
|
||||
# Filter out all comments that are not moderated yet.
|
||||
if not moderator:
|
||||
q = q.filter(Comment.displayed == True) # noqa
|
||||
|
||||
# Retrieve all results. Results must be ordered by Comment.path
|
||||
# so that we can easily transform them from a flat list to a tree.
|
||||
results = q.order_by(Comment.path).all()
|
||||
session.close()
|
||||
|
||||
return self._nest_comments(results, username)
|
||||
|
||||
def _nest_comments(self, results, username):
|
||||
"""Given the flat list of results, convert the list into a
|
||||
tree.
|
||||
|
||||
:param results: the flat list of comments
|
||||
:param username: the name of the user requesting the comments.
|
||||
"""
|
||||
comments = [] # type: List
|
||||
list_stack = [comments]
|
||||
for r in results:
|
||||
if username:
|
||||
comment, vote = r
|
||||
else:
|
||||
comment, vote = (r, 0)
|
||||
|
||||
inheritance_chain = comment.path.split('.')[1:]
|
||||
|
||||
if len(inheritance_chain) == len(list_stack) + 1:
|
||||
parent = list_stack[-1][-1]
|
||||
list_stack.append(parent['children'])
|
||||
elif len(inheritance_chain) < len(list_stack):
|
||||
while len(inheritance_chain) < len(list_stack):
|
||||
list_stack.pop()
|
||||
|
||||
list_stack[-1].append(comment.serializable(vote=vote))
|
||||
|
||||
return comments
|
||||
|
||||
def __init__(self, id, document, source):
|
||||
self.id = id
|
||||
self.document = document
|
||||
self.source = source
|
||||
|
||||
|
||||
class CommentVote(Base): # type: ignore
|
||||
"""A vote a user has made on a Comment."""
|
||||
__tablename__ = db_prefix + 'commentvote'
|
||||
|
||||
username = Column(String(64), primary_key=True)
|
||||
comment_id = Column(Integer, ForeignKey(db_prefix + 'comments.id'),
|
||||
primary_key=True)
|
||||
# -1 if downvoted, +1 if upvoted, 0 if voted then unvoted.
|
||||
value = Column(Integer, nullable=False)
|
||||
|
||||
def __init__(self, comment_id, username, value):
|
||||
self.comment_id = comment_id
|
||||
self.username = username
|
||||
self.value = value
|
||||
|
||||
|
||||
class Comment(Base): # type: ignore
|
||||
"""An individual Comment being stored."""
|
||||
__tablename__ = db_prefix + 'comments'
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
rating = Column(Integer, nullable=False)
|
||||
time = Column(DateTime, nullable=False)
|
||||
text = Column(Text, nullable=False)
|
||||
displayed = Column(Boolean, index=True, default=False)
|
||||
username = Column(String(64))
|
||||
proposal = Column(Text)
|
||||
proposal_diff = Column(Text)
|
||||
path = Column(String(256), index=True)
|
||||
|
||||
node_id = Column(String(32), ForeignKey(db_prefix + 'nodes.id'))
|
||||
node = relation(Node, backref="comments")
|
||||
|
||||
votes = relation(CommentVote, backref="comment",
|
||||
cascade="all")
|
||||
|
||||
def __init__(self, text, displayed, username, rating, time,
|
||||
proposal, proposal_diff):
|
||||
self.text = text
|
||||
self.displayed = displayed
|
||||
self.username = username
|
||||
self.rating = rating
|
||||
self.time = time
|
||||
self.proposal = proposal
|
||||
self.proposal_diff = proposal_diff
|
||||
|
||||
def set_path(self, node_id, parent_id):
|
||||
"""Set the materialized path for this comment."""
|
||||
# This exists because the path can't be set until the session has
|
||||
# been flushed and this Comment has an id.
|
||||
if node_id:
|
||||
self.node_id = node_id
|
||||
self.path = '%s.%s' % (node_id, self.id)
|
||||
else:
|
||||
session = Session()
|
||||
parent_path = session.query(Comment.path).\
|
||||
filter(Comment.id == parent_id).one().path
|
||||
session.close()
|
||||
self.node_id = parent_path.split('.')[0]
|
||||
self.path = '%s.%s' % (parent_path, self.id)
|
||||
|
||||
def serializable(self, vote=0):
|
||||
"""Creates a serializable representation of the comment. This is
|
||||
converted to JSON, and used on the client side.
|
||||
"""
|
||||
delta = datetime.now() - self.time
|
||||
|
||||
time = {'year': self.time.year,
|
||||
'month': self.time.month,
|
||||
'day': self.time.day,
|
||||
'hour': self.time.hour,
|
||||
'minute': self.time.minute,
|
||||
'second': self.time.second,
|
||||
'iso': self.time.isoformat(),
|
||||
'delta': self.pretty_delta(delta)}
|
||||
|
||||
path = self.path.split('.')
|
||||
node = path[0]
|
||||
if len(path) > 2:
|
||||
parent = path[-2]
|
||||
else:
|
||||
parent = None
|
||||
|
||||
return {'text': self.text,
|
||||
'username': self.username or 'Anonymous',
|
||||
'id': self.id,
|
||||
'node': node,
|
||||
'parent': parent,
|
||||
'rating': self.rating,
|
||||
'displayed': self.displayed,
|
||||
'age': delta.seconds,
|
||||
'time': time,
|
||||
'vote': vote or 0,
|
||||
'proposal_diff': self.proposal_diff,
|
||||
'children': []}
|
||||
|
||||
def pretty_delta(self, delta):
|
||||
"""Create a pretty representation of the Comment's age.
|
||||
(e.g. 2 minutes).
|
||||
"""
|
||||
days = delta.days
|
||||
seconds = delta.seconds
|
||||
hours = seconds / 3600
|
||||
minutes = seconds / 60
|
||||
|
||||
if days == 0:
|
||||
if hours == 0:
|
||||
dt = (minutes, 'minute')
|
||||
else:
|
||||
dt = (hours, 'hour')
|
||||
else:
|
||||
dt = (days, 'day')
|
||||
|
||||
if dt[0] == 1:
|
||||
ret = '%s %s ago' % dt
|
||||
else:
|
||||
ret = '%s %ss ago' % dt
|
||||
|
||||
return ret
|
||||
from sphinxcontrib.websupport.storage.sqlalchemy_db import Node, Comment, CommentVote # NOQA
|
||||
|
@ -9,169 +9,4 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
import sqlalchemy
|
||||
from sqlalchemy.orm import aliased
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
from sphinx.websupport.errors import CommentNotAllowedError, \
|
||||
UserNotAuthorizedError
|
||||
from sphinx.websupport.storage import StorageBackend
|
||||
from sphinx.websupport.storage.sqlalchemy_db import Base, Node, \
|
||||
Comment, CommentVote, Session
|
||||
from sphinx.websupport.storage.differ import CombinedHtmlDiff
|
||||
|
||||
if sqlalchemy.__version__[:3] < '0.5': # type: ignore
|
||||
raise ImportError('SQLAlchemy version 0.5 or greater is required for this '
|
||||
'storage backend; you have version %s' % sqlalchemy.__version__)
|
||||
|
||||
|
||||
class SQLAlchemyStorage(StorageBackend):
|
||||
"""
|
||||
A :class:`.StorageBackend` using SQLAlchemy.
|
||||
"""
|
||||
|
||||
def __init__(self, uri):
|
||||
self.engine = sqlalchemy.create_engine(uri)
|
||||
Base.metadata.bind = self.engine
|
||||
Base.metadata.create_all()
|
||||
Session.configure(bind=self.engine)
|
||||
|
||||
def pre_build(self):
|
||||
self.build_session = Session()
|
||||
|
||||
def has_node(self, id):
|
||||
session = Session()
|
||||
node = session.query(Node).filter(Node.id == id).first()
|
||||
session.close()
|
||||
return bool(node)
|
||||
|
||||
def add_node(self, id, document, source):
|
||||
node = Node(id, document, source)
|
||||
self.build_session.add(node)
|
||||
self.build_session.flush()
|
||||
|
||||
def post_build(self):
|
||||
self.build_session.commit()
|
||||
self.build_session.close()
|
||||
|
||||
def add_comment(self, text, displayed, username, time,
|
||||
proposal, node_id, parent_id, moderator):
|
||||
session = Session()
|
||||
proposal_diff = None
|
||||
proposal_diff_text = None
|
||||
|
||||
if node_id and proposal:
|
||||
node = session.query(Node).filter(Node.id == node_id).one()
|
||||
differ = CombinedHtmlDiff(node.source, proposal)
|
||||
proposal_diff = differ.make_html()
|
||||
proposal_diff_text = differ.make_text()
|
||||
elif parent_id:
|
||||
parent = session.query(Comment.displayed).\
|
||||
filter(Comment.id == parent_id).one()
|
||||
if not parent.displayed:
|
||||
raise CommentNotAllowedError(
|
||||
"Can't add child to a parent that is not displayed")
|
||||
|
||||
comment = Comment(text, displayed, username, 0,
|
||||
time or datetime.now(), proposal, proposal_diff)
|
||||
session.add(comment)
|
||||
session.flush()
|
||||
# We have to flush the session before setting the path so the
|
||||
# Comment has an id.
|
||||
comment.set_path(node_id, parent_id)
|
||||
session.commit()
|
||||
d = comment.serializable()
|
||||
d['document'] = comment.node.document
|
||||
d['proposal_diff_text'] = proposal_diff_text
|
||||
session.close()
|
||||
return d
|
||||
|
||||
def delete_comment(self, comment_id, username, moderator):
|
||||
session = Session()
|
||||
comment = session.query(Comment).\
|
||||
filter(Comment.id == comment_id).one()
|
||||
if moderator:
|
||||
# moderator mode: delete the comment and all descendants
|
||||
# find descendants via path
|
||||
session.query(Comment).filter(
|
||||
Comment.path.like(comment.path + '.%')).delete(False)
|
||||
session.delete(comment)
|
||||
session.commit()
|
||||
session.close()
|
||||
return True
|
||||
elif comment.username == username:
|
||||
# user mode: do not really delete, but remove text and proposal
|
||||
comment.username = '[deleted]'
|
||||
comment.text = '[deleted]'
|
||||
comment.proposal = ''
|
||||
session.commit()
|
||||
session.close()
|
||||
return False
|
||||
else:
|
||||
session.close()
|
||||
raise UserNotAuthorizedError()
|
||||
|
||||
def get_metadata(self, docname, moderator):
|
||||
session = Session()
|
||||
subquery = session.query(
|
||||
Comment.node_id,
|
||||
func.count('*').label('comment_count')).group_by(
|
||||
Comment.node_id).subquery()
|
||||
nodes = session.query(Node.id, subquery.c.comment_count).outerjoin(
|
||||
(subquery, Node.id == subquery.c.node_id)).filter(
|
||||
Node.document == docname)
|
||||
session.close()
|
||||
session.commit()
|
||||
return dict([(k, v or 0) for k, v in nodes])
|
||||
|
||||
def get_data(self, node_id, username, moderator):
|
||||
session = Session()
|
||||
node = session.query(Node).filter(Node.id == node_id).one()
|
||||
session.close()
|
||||
comments = node.nested_comments(username, moderator)
|
||||
return {'source': node.source,
|
||||
'comments': comments}
|
||||
|
||||
def process_vote(self, comment_id, username, value):
|
||||
session = Session()
|
||||
|
||||
subquery = session.query(CommentVote).filter(
|
||||
CommentVote.username == username).subquery()
|
||||
vote_alias = aliased(CommentVote, subquery)
|
||||
q = session.query(Comment, vote_alias).outerjoin(vote_alias).filter(
|
||||
Comment.id == comment_id)
|
||||
comment, vote = q.one()
|
||||
|
||||
if vote is None:
|
||||
vote = CommentVote(comment_id, username, value)
|
||||
comment.rating += value
|
||||
else:
|
||||
comment.rating += value - vote.value
|
||||
vote.value = value
|
||||
|
||||
session.add(vote)
|
||||
session.commit()
|
||||
session.close()
|
||||
|
||||
def update_username(self, old_username, new_username):
|
||||
session = Session()
|
||||
|
||||
session.query(Comment).filter(Comment.username == old_username).\
|
||||
update({Comment.username: new_username})
|
||||
session.query(CommentVote).\
|
||||
filter(CommentVote.username == old_username).\
|
||||
update({CommentVote.username: new_username})
|
||||
|
||||
session.commit()
|
||||
session.close()
|
||||
|
||||
def accept_comment(self, comment_id):
|
||||
session = Session()
|
||||
session.query(Comment).filter(Comment.id == comment_id).update(
|
||||
{Comment.displayed: True}
|
||||
)
|
||||
|
||||
session.commit()
|
||||
session.close()
|
||||
from sphinxcontrib.websupport.storage.sqlalchemystorage import SQLAlchemyStorage # NOQA
|
||||
|
@ -9,38 +9,4 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
from sphinx.writers.html import HTMLTranslator
|
||||
from sphinx.util.websupport import is_commentable
|
||||
|
||||
|
||||
class WebSupportTranslator(HTMLTranslator):
|
||||
"""
|
||||
Our custom HTML translator.
|
||||
"""
|
||||
|
||||
def __init__(self, builder, *args, **kwargs):
|
||||
HTMLTranslator.__init__(self, builder, *args, **kwargs)
|
||||
self.comment_class = 'sphinx-has-comment'
|
||||
|
||||
def dispatch_visit(self, node):
|
||||
if is_commentable(node) and hasattr(node, 'uid'):
|
||||
self.handle_visit_commentable(node)
|
||||
HTMLTranslator.dispatch_visit(self, node)
|
||||
|
||||
def handle_visit_commentable(self, node):
|
||||
# We will place the node in the HTML id attribute. If the node
|
||||
# already has an id (for indexing purposes) put an empty
|
||||
# span with the existing id directly before this node's HTML.
|
||||
self.add_db_node(node)
|
||||
if node.attributes['ids']:
|
||||
self.body.append('<span id="%s"></span>'
|
||||
% node.attributes['ids'][0])
|
||||
node.attributes['ids'] = ['s%s' % node.uid]
|
||||
node.attributes['classes'].append(self.comment_class)
|
||||
|
||||
def add_db_node(self, node):
|
||||
storage = self.builder.storage
|
||||
if not storage.has_node(node.uid):
|
||||
storage.add_node(id=node.uid,
|
||||
document=self.builder.current_docname,
|
||||
source=node.rawsource or node.astext())
|
||||
from sphinxcontrib.websupport.writer import WebSupportTranslator # NOQA
|
||||
|
Loading…
Reference in New Issue
Block a user