mirror of
https://github.com/sphinx-doc/sphinx.git
synced 2025-02-25 18:55:22 -06:00
Merge pull request #2454 from mgeier/html-sourcelink-txt
Add option html_sourcelink_suffix
This commit is contained in:
commit
31c6beb578
@ -870,6 +870,13 @@ that use Sphinx's HTMLWriter class.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
|
||||
.. confval:: html_sourcelink_suffix
|
||||
|
||||
Suffix to be appended to source links (see :confval:`html_show_sourcelink`),
|
||||
unless they have this suffix already. Default is ``'.txt'``.
|
||||
|
||||
.. versionadded:: 1.5
|
||||
|
||||
.. confval:: html_use_opensearch
|
||||
|
||||
If nonempty, an `OpenSearch <http://www.opensearch.org/Home>`_ description file will be
|
||||
|
@ -339,6 +339,7 @@ class StandaloneHTMLBuilder(Builder):
|
||||
show_sphinx = self.config.html_show_sphinx,
|
||||
has_source = self.config.html_copy_source,
|
||||
show_source = self.config.html_show_sourcelink,
|
||||
sourcelink_suffix = self.config.html_sourcelink_suffix,
|
||||
file_suffix = self.out_suffix,
|
||||
script_files = self.script_files,
|
||||
language = self.config.language,
|
||||
@ -402,15 +403,21 @@ class StandaloneHTMLBuilder(Builder):
|
||||
# title rendered as HTML
|
||||
title = self.env.longtitles.get(docname)
|
||||
title = title and self.render_partial(title)['title'] or ''
|
||||
|
||||
# Suffix for the document
|
||||
source_suffix = path.splitext(self.env.doc2path(docname))[1]
|
||||
|
||||
# the name for the copied source
|
||||
sourcename = self.config.html_copy_source and docname + '.txt' or ''
|
||||
if self.config.html_copy_source:
|
||||
sourcename = docname + source_suffix
|
||||
if source_suffix != self.config.html_sourcelink_suffix:
|
||||
sourcename += self.config.html_sourcelink_suffix
|
||||
else:
|
||||
sourcename = ''
|
||||
|
||||
# metadata for the document
|
||||
meta = self.env.metadata.get(docname)
|
||||
|
||||
# Suffix for the document
|
||||
source_suffix = '.' + self.env.doc2path(docname).split('.')[-1]
|
||||
|
||||
# local TOC and global TOC tree
|
||||
self_toc = self.env.get_toc_for(docname, self)
|
||||
toc = self.render_partial(self_toc)['fragment']
|
||||
@ -712,6 +719,11 @@ class StandaloneHTMLBuilder(Builder):
|
||||
def index_page(self, pagename, doctree, title):
|
||||
# only index pages with title
|
||||
if self.indexer is not None and title:
|
||||
filename = self.env.doc2path(pagename, base=None)
|
||||
try:
|
||||
self.indexer.feed(pagename, filename, title, doctree)
|
||||
except TypeError:
|
||||
# fallback for old search-adapters
|
||||
self.indexer.feed(pagename, title, doctree)
|
||||
|
||||
def _get_local_toctree(self, docname, collapse=True, **kwds):
|
||||
|
@ -121,6 +121,7 @@ class Config(object):
|
||||
html_split_index = (False, 'html'),
|
||||
html_copy_source = (True, 'html'),
|
||||
html_show_sourcelink = (True, 'html'),
|
||||
html_sourcelink_suffix = ('.txt', 'html'),
|
||||
html_use_opensearch = ('', 'html'),
|
||||
html_file_suffix = (None, 'html', string_classes),
|
||||
html_link_suffix = (None, 'html', string_classes),
|
||||
|
@ -226,11 +226,13 @@ class IndexBuilder(object):
|
||||
|
||||
def __init__(self, env, lang, options, scoring):
|
||||
self.env = env
|
||||
# filename -> title
|
||||
# docname -> title
|
||||
self._titles = {}
|
||||
# stemmed word -> set(filenames)
|
||||
# docname -> filename
|
||||
self._filenames = {}
|
||||
# stemmed word -> set(docname)
|
||||
self._mapping = {}
|
||||
# stemmed words in titles -> set(filenames)
|
||||
# stemmed words in titles -> set(docname)
|
||||
self._title_mapping = {}
|
||||
# word -> stemmed word
|
||||
self._stem_cache = {}
|
||||
@ -338,15 +340,16 @@ class IndexBuilder(object):
|
||||
|
||||
def freeze(self):
|
||||
"""Create a usable data structure for serializing."""
|
||||
filenames, titles = zip(*sorted(self._titles.items()))
|
||||
fn2index = dict((f, i) for (i, f) in enumerate(filenames))
|
||||
docnames, titles = zip(*sorted(self._titles.items()))
|
||||
filenames = [self._filenames.get(docname) for docname in docnames]
|
||||
fn2index = dict((f, i) for (i, f) in enumerate(docnames))
|
||||
terms, title_terms = self.get_terms(fn2index)
|
||||
|
||||
objects = self.get_objects(fn2index) # populates _objtypes
|
||||
objtypes = dict((v, k[0] + ':' + k[1])
|
||||
for (k, v) in iteritems(self._objtypes))
|
||||
objnames = self._objnames
|
||||
return dict(filenames=filenames, titles=titles, terms=terms,
|
||||
return dict(docnames=docnames, filenames=filenames, titles=titles, terms=terms,
|
||||
objects=objects, objtypes=objtypes, objnames=objnames,
|
||||
titleterms=title_terms, envversion=self.env.version)
|
||||
|
||||
@ -365,9 +368,11 @@ class IndexBuilder(object):
|
||||
for wordnames in itervalues(self._title_mapping):
|
||||
wordnames.intersection_update(filenames)
|
||||
|
||||
def feed(self, filename, title, doctree):
|
||||
def feed(self, docname, filename, title, doctree):
|
||||
"""Feed a doctree to the index."""
|
||||
self._titles[filename] = title
|
||||
self._titles[docname] = title
|
||||
self._filenames[docname] = filename
|
||||
|
||||
visitor = WordCollector(doctree, self.lang)
|
||||
doctree.walk(visitor)
|
||||
|
||||
@ -383,12 +388,12 @@ class IndexBuilder(object):
|
||||
for word in visitor.found_title_words:
|
||||
word = stem(word)
|
||||
if _filter(word):
|
||||
self._title_mapping.setdefault(word, set()).add(filename)
|
||||
self._title_mapping.setdefault(word, set()).add(docname)
|
||||
|
||||
for word in visitor.found_words:
|
||||
word = stem(word)
|
||||
if word not in self._title_mapping and _filter(word):
|
||||
self._mapping.setdefault(word, set()).add(filename)
|
||||
self._mapping.setdefault(word, set()).add(docname)
|
||||
|
||||
def context_for_searchtool(self):
|
||||
return dict(
|
||||
|
@ -91,7 +91,8 @@
|
||||
VERSION: '{{ release|e }}',
|
||||
COLLAPSE_INDEX: false,
|
||||
FILE_SUFFIX: '{{ '' if no_search_suffix else file_suffix }}',
|
||||
HAS_SOURCE: {{ has_source|lower }}
|
||||
HAS_SOURCE: {{ has_source|lower }},
|
||||
SOURCELINK_SUFFIX: '{{ sourcelink_suffix }}'
|
||||
};
|
||||
</script>
|
||||
{%- for scriptfile in script_files %}
|
||||
|
@ -256,7 +256,8 @@ var Search = {
|
||||
displayNextItem();
|
||||
});
|
||||
} else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
|
||||
$.ajax({url: DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' + item[0] + '.txt',
|
||||
var suffix = DOCUMENTATION_OPTIONS.SOURCELINK_SUFFIX;
|
||||
$.ajax({url: DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' + item[5] + (item[5].endsWith(suffix) ? '' : suffix),
|
||||
dataType: "text",
|
||||
complete: function(jqxhr, textstatus) {
|
||||
var data = jqxhr.responseText;
|
||||
@ -295,6 +296,7 @@ var Search = {
|
||||
*/
|
||||
performObjectSearch : function(object, otherterms) {
|
||||
var filenames = this._index.filenames;
|
||||
var docnames = this._index.docnames;
|
||||
var objects = this._index.objects;
|
||||
var objnames = this._index.objnames;
|
||||
var titles = this._index.titles;
|
||||
@ -348,7 +350,7 @@ var Search = {
|
||||
} else {
|
||||
score += Scorer.objPrioDefault;
|
||||
}
|
||||
results.push([filenames[match[0]], fullname, '#'+anchor, descr, score]);
|
||||
results.push([docnames[match[0]], fullname, '#'+anchor, descr, score, filenames[match[0]]]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -360,6 +362,7 @@ var Search = {
|
||||
* search for full-text terms in the index
|
||||
*/
|
||||
performTermsSearch : function(searchterms, excluded, terms, titleterms) {
|
||||
var docnames = this._index.docnames;
|
||||
var filenames = this._index.filenames;
|
||||
var titles = this._index.titles;
|
||||
|
||||
@ -434,7 +437,7 @@ var Search = {
|
||||
// select one (max) score for the file.
|
||||
// for better ranking, we should calculate ranking by using words statistics like basic tf-idf...
|
||||
var score = $u.max($u.map(fileMap[file], function(w){return scoreMap[file][w]}));
|
||||
results.push([filenames[file], titles[file], '', null, score]);
|
||||
results.push([docnames[file], titles[file], '', null, score, filenames[file]]);
|
||||
}
|
||||
}
|
||||
return results;
|
||||
|
@ -34,19 +34,20 @@ class BaseSearch(object):
|
||||
"""
|
||||
pass
|
||||
|
||||
def feed(self, pagename, title, doctree):
|
||||
def feed(self, pagename, filename, title, doctree):
|
||||
"""Called by the builder to add a doctree to the index. Converts the
|
||||
`doctree` to text and passes it to :meth:`add_document`. You probably
|
||||
won't want to override this unless you need access to the `doctree`.
|
||||
Override :meth:`add_document` instead.
|
||||
|
||||
:param pagename: the name of the page to be indexed
|
||||
:param filename: the name of the original source file
|
||||
:param title: the title of the page to be indexed
|
||||
:param doctree: is the docutils doctree representation of the page
|
||||
"""
|
||||
self.add_document(pagename, title, doctree.astext())
|
||||
self.add_document(pagename, filename, title, doctree.astext())
|
||||
|
||||
def add_document(self, pagename, title, text):
|
||||
def add_document(self, pagename, filename, title, text):
|
||||
"""Called by :meth:`feed` to add a document to the search index.
|
||||
This method should should do everything necessary to add a single
|
||||
document to the search index.
|
||||
@ -59,6 +60,7 @@ class BaseSearch(object):
|
||||
query.
|
||||
|
||||
:param pagename: the name of the page being indexed
|
||||
:param filename: the name of the original source file
|
||||
:param title: the page's title
|
||||
:param text: the full text of the page
|
||||
"""
|
||||
|
@ -17,7 +17,7 @@ class NullSearch(BaseSearch):
|
||||
"""A search adapter that does nothing. Used when no search adapter
|
||||
is specified.
|
||||
"""
|
||||
def feed(self, pagename, title, doctree):
|
||||
def feed(self, pagename, filename, title, doctree):
|
||||
pass
|
||||
|
||||
def query(self, q):
|
||||
|
@ -44,7 +44,7 @@ class WhooshSearch(BaseSearch):
|
||||
def finish_indexing(self):
|
||||
self.index_writer.commit()
|
||||
|
||||
def add_document(self, pagename, title, text):
|
||||
def add_document(self, pagename, filename, title, text):
|
||||
self.index_writer.add_document(path=text_type(pagename),
|
||||
title=title,
|
||||
text=text)
|
||||
|
@ -68,6 +68,7 @@ HTML_XPATH = {
|
||||
(".//img[@src='_images/img1.png']", ''),
|
||||
(".//img[@src='_images/simg.png']", ''),
|
||||
(".//img[@src='_images/svgimg.svg']", ''),
|
||||
(".//a[@href='_sources/images.txt']", ''),
|
||||
],
|
||||
'subdir/images.html': [
|
||||
(".//img[@src='../_images/img1.png']", ''),
|
||||
@ -318,6 +319,7 @@ HTML_XPATH = {
|
||||
],
|
||||
'otherext.html': [
|
||||
(".//h1", "Generated section"),
|
||||
(".//a[@href='_sources/otherext.foo.txt']", ''),
|
||||
]
|
||||
}
|
||||
|
||||
@ -985,3 +987,15 @@ def test_html_extra_path(app, status, warning):
|
||||
assert (app.outdir / 'rimg.png').exists()
|
||||
assert not (app.outdir / '_build/index.html').exists()
|
||||
assert (app.outdir / 'background.png').exists()
|
||||
|
||||
|
||||
@with_app(buildername='html', confoverrides={'html_sourcelink_suffix': ''})
|
||||
def test_html_sourcelink_suffix(app, status, warning):
|
||||
app.builder.build_all()
|
||||
content_otherext = (app.outdir / 'otherext.html').text()
|
||||
content_images = (app.outdir / 'images.html').text()
|
||||
|
||||
assert '<a href="_sources/otherext.foo"' in content_otherext
|
||||
assert '<a href="_sources/images.txt"' in content_images
|
||||
assert (app.outdir / '_sources' / 'otherext.foo').exists()
|
||||
assert (app.outdir / '_sources' / 'images.txt').exists()
|
||||
|
@ -53,7 +53,7 @@ def test_wordcollector():
|
||||
parser.parse(FILE_CONTENTS, doc)
|
||||
|
||||
ix = IndexBuilder(None, 'en', {}, None)
|
||||
ix.feed('filename', 'title', doc)
|
||||
ix.feed('docname', 'filename', 'title', doc)
|
||||
assert 'boson' not in ix._mapping
|
||||
assert 'fermion' in ix._mapping
|
||||
|
||||
|
@ -41,7 +41,7 @@ def search_adapter_helper(adapter):
|
||||
|
||||
# Make sure documents are properly updated by the search adapter.
|
||||
s.init_indexing(changed=['markup'])
|
||||
s.add_document(u'markup', u'title', u'SomeLongRandomWord')
|
||||
s.add_document(u'markup', u'filename', u'title', u'SomeLongRandomWord')
|
||||
s.finish_indexing()
|
||||
# Now a search for "Epigraph" should return zero results.
|
||||
results = s.query(u'Epigraph')
|
||||
|
Loading…
Reference in New Issue
Block a user